input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
parents
if 'parents' in metadata:
for parent in metadata['parents']:
if parent not in self.items:
raise HTTPNotFound()
if parent in self.lostPermission:
return Response(status=403, content_type="application/json", text='{"error": {"errors": [{"reason": "forbidden"}]}}')
self.upload_info['size'] = size
self.upload_info['mime'] = mimeType
self.upload_info['item'] = self.formatItem(metadata, id)
self.upload_info['id'] = id
self.upload_info['next_start'] = 0
metadata['bytes'] = bytearray()
metadata['size'] = size
resp = Response()
resp.headers['Location'] = "http://localhost:" + \
str(self._port) + "/upload/drive/v3/files/progress/" + id
return resp
async def driveContinueUpload(self, request: Request):
if self.waitOnChunk > 0:
if self.current_chunk == self.waitOnChunk:
self._upload_chunk_trigger.set()
await self._upload_chunk_wait.wait()
else:
self.current_chunk += 1
id = request.match_info.get('id')
if (self.getSetting('drive_upload_sleep') > 0):
await self._time.sleepAsync(self.getSetting('drive_upload_sleep'))
await self._checkDriveHeaders(request)
if self.upload_info.get('id', "") != id:
raise HTTPBadRequest()
chunk_size = int(request.headers['Content-Length'])
info = request.headers['Content-Range']
if resumeBytesPattern.match(info):
resp = Response(status=308)
if self.upload_info['next_start'] != 0:
resp.headers['Range'] = "bytes=0-{0}".format(self.upload_info['next_start'] - 1)
return resp
if not bytesPattern.match(info):
raise HTTPBadRequest()
numbers = intPattern.findall(info)
start = int(numbers[0])
end = int(numbers[1])
total = int(numbers[2])
if total != self.upload_info['size']:
raise HTTPBadRequest()
if start != self.upload_info['next_start']:
raise HTTPBadRequest()
if not (end == total - 1 or chunk_size % (256 * 1024) == 0):
raise HTTPBadRequest()
if end > total - 1:
raise HTTPBadRequest()
# get the chunk
received_bytes = await self.readAll(request)
# See if we shoudl fail the request
if self.getSetting("drive_upload_error") is not None:
if self.getSetting("drive_upload_error_attempts") <= 0:
raise HttpMultiException(self.getSetting("drive_upload_error"))
else:
self.update({"drive_upload_error_attempts": self.getSetting("drive_upload_error_attempts") - 1})
# validate the chunk
if len(received_bytes) != chunk_size:
raise HTTPBadRequest()
if len(received_bytes) != end - start + 1:
raise HTTPBadRequest()
self.upload_info['item']['bytes'].extend(received_bytes)
if len(self.upload_info['item']['bytes']) != end + 1:
raise HTTPBadRequest()
self.chunks.append(len(received_bytes))
if end == total - 1:
# upload is complete, so create the item
self.items[self.upload_info['id']] = self.upload_info['item']
return json_response({"id": self.upload_info['id']})
else:
# Return an incomplete response
# For some reason, the tests like to stop right here
resp = Response(status=308)
self.upload_info['next_start'] = end + 1
resp.headers['Range'] = "bytes=0-{0}".format(end)
return resp
# HASSIO METHODS BELOW
async def _verifyHassioHeader(self, request) -> bool:
if self.supervisor_sleep > 0:
await asyncio.sleep(self.supervisor_sleep)
if self.getSetting("hassio_error") is not None:
raise HttpMultiException(self.getSetting("hassio_error"))
self._verifyHeader(request, "Authorization", "Bearer " + self.getSetting('ha_header'))
def _verifyHaHeader(self, request) -> bool:
if self._ha_error is not None:
raise HttpMultiException(self._ha_error)
self._verifyHeader(request, "Authorization", "Bearer " + self.getSetting('ha_header'))
def _verifyHeader(self, request, key: str, value: str) -> bool:
if request.headers.get(key, None) != value:
raise HTTPUnauthorized()
def formatDataResponse(self, data: Any) -> str:
return json_response({'result': 'ok', 'data': data})
def checkForSupervisorError(self):
if self.supervisor_error is not None:
return Response(status=self.supervisor_error)
return None
def formatErrorResponse(self, error: str) -> str:
return json_response({'result': error})
async def hassioSnapshots(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return self.formatDataResponse({'snapshots': list(self.snapshots.values())})
async def hassioSupervisorInfo(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return self.formatDataResponse(
{
"addons": list(all_addons).copy()
}
)
async def supervisorLogs(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return Response(body="Supervisor Log line 1\nSupervisor Log Line 2")
async def coreLogs(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return Response(body="Core Log line 1\nCore Log Line 2")
async def haInfo(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return self.formatDataResponse(
{
"version": self.getSetting('ha_version'),
"last_version": self.getSetting('ha_last_version'),
"machine": self.getSetting('machine'),
"ip_address": self.getSetting('ip_address'),
"arch": self.getSetting('arch'),
"image": self.getSetting('image'),
"custom": self.getSetting('custom'),
"boot": self.getSetting('boot'),
"port": self.getSetting('ha_port'),
"ssl": self.getSetting('ssl'),
"watchdog": self.getSetting('watchdog'),
"wait_boot": self.getSetting('wait_boot')
}
)
async def hassioNewFullSnapshot(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
if (self.block_snapshots or self.snapshot_in_progress) and not self.getSetting('always_hard_lock'):
raise HTTPBadRequest()
input_json = {}
try:
input_json = await request.json()
except: # noqa: E722
pass
try:
await self._snapshot_lock.acquire()
self.snapshot_in_progress = True
await self._verifyHassioHeader(request)
error = self.getSetting("hassio_snapshot_error")
if error is not None:
raise HttpMultiException(error)
seconds = int(request.query.get(
'seconds', self.getSetting('snapshot_wait_time')))
date = self._time.now()
size = int(random.uniform(float(self.getSetting('snapshot_min_size')), float(
self.getSetting('snapshot_max_size'))))
slug = self.generateId(8)
name = input_json.get('name', "Default name")
password = input_json.get('password', None)
if seconds > 0:
await asyncio.sleep(seconds)
data = createSnapshotTar(slug, name, date, size, password=password)
snapshot_info = parseSnapshotInfo(data)
self.snapshots[slug] = snapshot_info
self.snapshot_data[slug] = bytearray(data.getbuffer())
return self.formatDataResponse({"slug": slug})
finally:
self.snapshot_in_progress = False
self._snapshot_lock.release()
async def hassioNewPartialSnapshot(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
if (self.block_snapshots or self.snapshot_in_progress) and not self.getSetting('always_hard_lock'):
raise HTTPBadRequest()
input_json = await request.json()
try:
await self._snapshot_lock.acquire()
self.snapshot_in_progress = True
await self._verifyHassioHeader(request)
seconds = int(request.query.get(
'seconds', self.getSetting('snapshot_wait_time')))
date = self._time.now()
size = int(random.uniform(float(self.getSetting('snapshot_min_size')), float(
self.getSetting('snapshot_max_size'))))
slug = self.generateId(8)
name = input_json['name']
password = input_json.get('password', None)
if seconds > 0:
await asyncio.sleep(seconds)
data = createSnapshotTar(
slug,
name,
date,
size,
included_folders=input_json['folders'],
included_addons=input_json['addons'],
password=password)
snapshot_info = parseSnapshotInfo(data)
self.snapshots[slug] = snapshot_info
self.snapshot_data[slug] = bytearray(data.getbuffer())
return self.formatDataResponse({"slug": slug})
finally:
self.snapshot_in_progress = False
self._snapshot_lock.release()
async def uploadNewSnapshot(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
try:
received_bytes = await self.readAll(request)
info = parseSnapshotInfo(BytesIO(received_bytes))
self.snapshots[info['slug']] = info
self.snapshot_data[info['slug']] = received_bytes
return self.formatDataResponse({"slug": info['slug']})
except Exception as e:
print(str(e))
return self.formatErrorResponse("Bad snapshot")
async def hassioDelete(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
slug = request.match_info.get('slug')
await self._verifyHassioHeader(request)
if slug not in self.snapshots:
raise HTTPNotFound()
del self.snapshots[slug]
del self.snapshot_data[slug]
return self.formatDataResponse("deleted")
async def hassioSnapshotInfo(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
slug = request.match_info.get('slug')
await self._verifyHassioHeader(request)
if slug not in self.snapshots:
raise HTTPNotFound()
return self.formatDataResponse(self.snapshots[slug])
async def hassioSnapshotDownload(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
slug = request.match_info.get('slug')
await self._verifyHassioHeader(request)
if slug not in self.snapshot_data:
raise HTTPNotFound()
return self.serve_bytes(request, self.snapshot_data[slug])
async def hassioSelfInfo(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return self.formatDataResponse({
"webui": self.getSetting('web_ui'),
'ingress_url': self.getSetting('ingress_url'),
"slug": self.getSetting('addon_slug'),
"options": self._options
})
async def hassioInfo(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
return self.formatDataResponse({
"supervisor": self.getSetting('supervisor'),
"homeassistant": self.getSetting('homeassistant'),
"hassos": self.getSetting('hassos'),
"hostname": self.getSetting('hostname'),
"machine": self.getSetting('machine'),
"arch": self.getSetting('arch'),
"supported_arch": self.getSetting('supported_arch'),
"channel": self.getSetting('channel')
})
async def hassioAuthenticate(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
input_json = await request.json()
if input_json.get("username") != self._username or input_json.get("password") != self._password:
raise HTTPBadRequest()
return self.formatDataResponse({})
async def haStateUpdate(self, request: Request):
entity = request.match_info.get('entity')
self._verifyHaHeader(request)
json = await request.json()
self._entities[entity] = json['state']
self._attributes[entity] = json['attributes']
return Response()
async def haEventUpdate(self, request: Request):
name = request.match_info.get('name')
self._verifyHaHeader(request)
self._events.append((name, await request.json()))
return Response()
async def createNotification(self, request: Request):
self._verifyHaHeader(request)
notification = await request.json()
print("Created notification with: {}".format(notification))
self._notification = notification.copy()
return Response()
async def dismissNotification(self, request: Request):
self._verifyHaHeader(request)
print("Dismissed notification with: {}".format(await request.json()))
self._notification = None
return Response()
async def hassioUpdateOptions(self, request: Request):
if self.checkForSupervisorError() is not None:
return self.checkForSupervisorError()
await self._verifyHassioHeader(request)
self._options = (await request.json())['options'].copy()
return self.formatDataResponse({})
async def slugRedirect(self, request: Request):
raise HTTPSeeOther("https://localhost:" + str(self.config.get(Setting.INGRESS_PORT)))
@middleware
async def error_middleware(self, request: Request, handler):
self.urls.append(str(request.url))
for error in self.match_errors:
if re.match(error['url'], str(request.url)):
if error['attempts'] <= 0:
await self.readAll(request)
return Response(status=error['status'])
else:
error['attempts'] = error['attempts'] - 1
try:
resp = await handler(request)
return resp
except Exception as ex:
await self.readAll(request)
if isinstance(ex, HttpMultiException):
return Response(status=ex.status_code)
elif isinstance(ex, HTTPException):
raise
else:
logger.printException(ex)
return json_response(str(ex), status=502)
def createApp(self):
app = Application(middlewares=[self.error_middleware])
app.add_routes(self.routes())
self._authserver.buildApp(app)
return app
async def start(self, port):
self.runner = aiohttp.web.AppRunner(self.createApp())
await self.runner.setup()
site = aiohttp.web.TCPSite(self.runner, "0.0.0.0", port=port)
await site.start()
async def stop(self):
await self.runner.shutdown()
await self.runner.cleanup()
def toggleBlockSnapshot(self, request: Request):
self.snapshot_in_progress = not self.snapshot_in_progress
resp = "Blocking" if self.snapshot_in_progress else "Not Blocking"
return Response(text=resp)
def routes(self):
return [
post('/addons/self/options', self.hassioUpdateOptions),
post("/homeassistant/api/services/persistent_notification/dismiss", self.dismissNotification),
post("/homeassistant/api/services/persistent_notification/create", self.createNotification),
post("/homeassistant/api/events/{name}", self.haEventUpdate),
post("/homeassistant/api/states/{entity}", self.haStateUpdate),
post('/auth', self.hassioAuthenticate),
get('/auth', self.hassioAuthenticate),
get('/info', self.hassioInfo),
get('/addons/self/info', self.hassioSelfInfo),
get('/snapshots/{slug}/download', self.hassioSnapshotDownload),
get('/snapshots/{slug}/info', self.hassioSnapshotInfo),
post('/snapshots/{slug}/remove', self.hassioDelete),
post('/snapshots/new/upload', self.uploadNewSnapshot),
get('/snapshots/new/upload', self.uploadNewSnapshot),
get('/debug/toggleblock', self.toggleBlockSnapshot),
post('/snapshots/new/partial', self.hassioNewPartialSnapshot),
post('/snapshots/new/full', self.hassioNewFullSnapshot),
get('/snapshots/new/full', self.hassioNewFullSnapshot),
get('/homeassistant/info', self.haInfo),
get('/supervisor/info', self.hassioSupervisorInfo),
get('/supervisor/logs', self.supervisorLogs),
get('/core/logs', self.coreLogs),
get('/snapshots', self.hassioSnapshots),
put('/upload/drive/v3/files/progress/{id}', self.driveContinueUpload),
post('/upload/drive/v3/files/', self.driveStartUpload),
post('/drive/v3/files/', self.driveCreate),
get('/drive/v3/files/', self.driveQuery),
delete('/drive/v3/files/{id}/', self.driveDelete),
patch('/drive/v3/files/{id}/', self.driveUpdate),
get('/drive/v3/files/{id}/', self.driveGetItem),
post('/updatesettings', self.updateSettings),
get('/readfile', self.readFile),
post('/uploadfile', self.uploadfile),
post('/doareset', self.reset),
post('/oauth2/v4/token', self.driveRefreshToken),
get('/o/oauth2/v2/auth', self.driveAuthorize),
post('/token', self.driveToken),
get('/hassio/ingress/self_slug', self.slugRedirect)
]
def generateId(self, length: int = 30) -> Any:
self.id_counter += 1
ret = str(self.id_counter)
return ret + ''.join(map(lambda x: str(x), range(0, length - len(ret))))
# return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def timeToRfc3339String(self, time) -> Any:
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
def formatItem(self, base, id):
base['capabilities'] = {'canAddChildren': True,
'canListChildren': True, 'canDeleteChildren': True}
base['trashed'] = False
base['id'] = id
base['modifiedTime'] = self.timeToRfc3339String(self._time.now())
return base
def parseFields(self, source: str):
fields = []
for field in source.split(","):
if field.startswith("files("):
fields.append(field[6:])
elif field.endswith(")"):
| |
7.73686],
[691200, 10, 3, 2, 0, 0.945754],
[691488, 5, 2, 0, 4, 1.13066],
[694575, 0, 4, 2, 3, 8.40395],
[699840, 6, 7, 1, 0, 1.01206],
[700000, 5, 0, 5, 1, 1.07746],
[702464, 11, 0, 0, 3, 1.0266],
[703125, 0, 2, 7, 0, 8.57421],
[705600, 6, 2, 2, 2, 1.06551],
[705894, 1, 1, 0, 6, 8.57376],
[708588, 2, 11, 0, 0, 1.1192],
[708750, 1, 4, 4, 1, 8.74618],
[714420, 2, 6, 1, 2, 1.38663],
[716800, 12, 0, 2, 1, 0.943901],
[720000, 7, 2, 4, 0, 0.981341],
[720300, 2, 1, 2, 4, 1.45001],
[725760, 8, 4, 1, 1, 1.06077],
[729000, 3, 6, 3, 0, 1.19393],
[734832, 4, 8, 0, 1, 1.07592],
[735000, 3, 1, 4, 2, 1.3305],
[737280, 14, 2, 1, 0, 0.988863],
[740880, 4, 3, 1, 3, 1.3471],
[746496, 10, 6, 0, 0, 1.04045],
[750000, 4, 1, 6, 0, 1.2979],
[750141, 0, 7, 0, 3, 10.2384],
[752640, 10, 1, 1, 2, 1.29185],
[756000, 5, 3, 3, 1, 1.18769],
[756315, 0, 2, 1, 5, 10.2347],
[759375, 0, 5, 5, 0, 10.2369],
[762048, 6, 5, 0, 2, 1.12027],
[765450, 1, 7, 2, 1, 10.2575],
[765625, 0, 0, 6, 2, 10.2604],
[768000, 11, 1, 3, 0, 1.1529],
[768320, 6, 0, 1, 4, 1.27811],
[771750, 1, 2, 3, 3, 9.29377],
[774144, 12, 3, 0, 1, 1.05108],
[777600, 7, 5, 2, 0, 1.03642],
[777924, 2, 4, 0, 4, 1.40127],
[781250, 1, 0, 8, 0, 11.4192],
[784000, 7, 0, 3, 2, 1.13771],
[786432, 18, 1, 0, 0, 1.02046],
[787320, 3, 9, 1, 0, 1.26873],
[787500, 2, 2, 5, 1, 1.51342],
[790272, 8, 2, 0, 3, 1.22655],
[793800, 3, 4, 2, 2, 1.2832],
[800000, 8, 0, 5, 0, 1.08271],
[802816, 14, 0, 0, 2, 0.989918],
[806400, 9, 2, 2, 1, 1.26767],
[806736, 4, 1, 0, 5, 1.54242],
[810000, 4, 4, 4, 0, 1.14015],
[816480, 5, 6, 1, 1, 1.35146],
[819200, 15, 0, 2, 0, 0.945334],
[820125, 0, 8, 3, 0, 9.35975],
[823200, 5, 1, 2, 3, 1.42608],
[823543, 0, 0, 0, 7, 9.36698],
[826686, 1, 10, 0, 1, 9.36753],
[826875, 0, 3, 4, 2, 9.36921],
[829440, 11, 4, 1, 0, 1.11529],
[833490, 1, 5, 1, 3, 8.96444],
[839808, 7, 8, 0, 0, 0.994558],
[840000, 6, 1, 4, 1, 1.34283],
[840350, 1, 0, 2, 5, 12.6683],
[843750, 1, 3, 6, 0, 12.6571],
[846720, 7, 3, 1, 2, 1.31059],
[850500, 2, 5, 3, 1, 1.5805],
[857304, 3, 7, 0, 2, 1.39002],
[857500, 2, 0, 4, 3, 1.60425],
[860160, 13, 1, 1, 1, 1.33685],
[864000, 8, 3, 3, 0, 1.17741],
[864360, 3, 2, 1, 4, 1.67552],
[870912, 9, 5, 0, 1, 1.33997],
[874800, 4, 7, 2, 0, 1.2518],
[875000, 3, 0, 6, 1, 1.48286],
[878080, 9, 0, 1, 3, 1.51649],
[882000, 4, 2, 3, 2, 1.49382],
[884736, 15, 3, 0, 0, 1.01832],
[885735, 0, 11, 1, 0, 11.5199],
[889056, 5, 4, 0, 3, 1.34835],
[893025, 0, 6, 2, 2, 11.5251],
[896000, 10, 0, 3, 1, 1.34287],
[900000, 5, 2, 5, 0, 1.36657],
[900375, 0, 1, 3, 4, 13.0397],
[903168, 11, 2, 0, 2, 1.30505],
[907200, 6, 4, 2, 1, 1.25521],
[907578, 1, 3, 0, 5, 13.0504],
[911250, 1, 6, 4, 0, 13.0631],
[917504, 17, 0, 0, 1, 1.13197],
[918540, 2, 8, 1, 1, 1.66839],
[918750, 1, 1, 5, 2, 10.2675],
[921600, 12, 2, 2, 0, 1.19066],
[921984, 7, 1, 0, 4, 1.52107],
[926100, 2, 3, 2, 3, 1.73207],
[933120, 8, 6, 1, 0, 1.36334],
[937500, 2, 1, 7, 0, 1.73874],
[940800, 8, 1, 2, 2, 1.44079],
[941192, 3, 0, 0, 6, 1.71056],
[944784, 4, 10, 0, 0, 1.41927],
[945000, 3, 3, 4, 1, 1.58493],
[952560, 4, 5, 1, 2, 1.64818],
[960000, 9, 1, 4, 0, 1.4977],
[960400, 4, 0, 2, 4, 1.58899],
[964467, 0, 9, 0, 2, 12.2111],
[967680, 10, 3, 1, 1, 1.53882],
[972000, 5, 5, 3, 0, 1.4306],
[972405, 0, 4, 1, 4, 12.587],
[979776, 6, 7, 0, 1, 1.39165],
[980000, 5, 0, 4, 2, 1.44229],
[983040, 16, 1, 1, 0, 1.41934],
[984150, 1, 9, 2, 0, 13.1642],
[984375, 0, 2, 6, 1, 11.9186],
[987840, 6, 2, 1, 3, 1.71323],
[992250, 1, 4, 3, 2, 11.9304],
[995328, 12, 5, 0, 0, 1.29154],
[1000000, 6, 0, 6, 0, 1.46274],
[1000188, 2, 6, 0, 3, 1.87933],
[1003520, 12, 0, 1, 2, 1.46194],
[1008000, 7, 2, 3, 1, 1.55008],
[1008420, 2, 1, 1, 5, 2.20065],
[1012500, 2, 4, 5, 0, 1.71657],
[1016064, 8, 4, 0, 2, 1.34909],
[1020600, 3, 6, 2, 1, 1.7267],
[1024000, 13, 0, 3, 0, 1.37769],
[1029000, 3, 1, 3, 3, 1.98084],
[1032192, 14, 2, 0, 1, 1.36153],
[1036800, 9, 4, 2, 0, 1.40431],
[1037232, 4, 3, 0, 4, 1.74616],
[1048576, 20, 0, 0, 0, 1.11557],
[1049760, 5, 8, 1, 0, 1.46197],
[1050000, 4, 1, 5, 1, 1.86887],
[1053696, 10, 1, 0, 3, 1.78489],
[1058400, 5, 3, 2, 2, 1.58631],
[1058841, 0, 2, 0, 6, 13.6501],
[1062882, 1, 12, 0, 0, 13.6651],
[1063125, 0, 5, 4, 1, 12.448],
[1071630, 1, 7, 1, 2, 12.4605],
[1071875, 0, 0, 5, 3, 12.4608],
[1075200, 11, 1, 2, 1, 1.66027],
[1075648, 6, 0, 0, 5, 1.75578],
[1080000, 6, 3, 4, 0, 1.47633],
[1080450, 1, 2, 2, 4, 14.5627],
[1088640, 7, 5, 1, 1, 1.69578],
[1093500, 2, 7, 3, 0, 1.88143],
[1093750, 1, 0, 7, 1, 12.7563],
[1097600, 7, 0, 2, 3, 1.62891],
[1102248, 3, 9, 0, 1, 1.79196],
[1102500, 2, 2, 4, 2, 2.02785],
[1105920, 13, 3, 1, 0, 1.47313],
[1111320, 3, 4, 1, 3, 2.02882],
[1119744, 9, 7, 0, 0, 1.54478],
[1120000, 8, 0, 4, 1, 1.53198],
[1125000, 3, 2, 6, 0, 1.94283],
[1128960, 9, 2, 1, 2, 1.90468],
[1134000, 4, 4, 3, 1, 1.76804],
[1143072, 5, 6, 0, 2, 1.74368],
[1146880, 15, 0, 1, 1, 1.56005],
[1148175, 0, 8, 2, 1, 14.7745],
[1152000, 10, 2, 3, 0, 1.70406],
[1152480, 5, 1, 1, 4, 2.1319],
[1157625, 0, 3, 3, 3, 13.7687],
[1161216, 11, 4, 0, 1, 1.54028],
[1166400, 6, 6, 2, 0, 1.63296],
[1166886, 1, 5, 0, 4, 12.8202],
[1171875, 0, 1, 8, 0, 12.8344],
[1176000, 6, 1, 3, 2, 1.9464],
[1176490, 1, 0, 1, 6, 12.8296],
[1179648, 17, 2, 0, 0, 1.43288],
[1180980, 2, 10, 1, 0, 2.2157],
[1181250, 1, 3, 5, 1, 16.4587],
[1185408, 7, 3, 0, 3, 1.79828],
[1190700, 2, 5, 2, 2, 2.11406],
[1200000, 7, 1, 5, 0, 1.82176],
[1200500, 2, 0, 3, 4, 2.31215],
[1204224, 13, 1, 0, 2, 1.71383],
[1209600, 8, 3, 2, 1, 1.68907],
[1210104, 3, 2, 0, 5, 2.31703],
[1215000, 3, 5, 4, 0, 1.87086],
[1224720, 4, 7, 1, 1, 2.0312],
[1225000, 3, 0, 5, 2, 2.05677],
[1228800, 14, 1, 2, 0, 1.60738],
[1229312, 9, 0, 0, 4, 1.93747],
[1234800, 4, 2, 2, 3, 2.13566],
[1240029, 0, 11, 0, 1, 16.3677],
[1244160, 10, 5, 1, 0, 1.89065],
[1250000, 4, 0, 7, 0, 1.90179],
[1250235, 0, 6, 1, 3, 14.3063],
[1254400, 10, 0, 2, 2, 1.81372],
[1259712, 6, 9, 0, 0, 1.6593],
[1260000, 5, 2, 4, 1, 1.95432],
[1260525, 0, 1, 2, 5, 14.1273],
[1265625, 0, 4, 6, 0, 14.1254],
[1270080, 6, 4, 1, 2, 1.92554],
[1275750, 1, 6, 3, 1, 14.1498],
[1280000, 11, 0, 4, 0, 1.64822],
[1285956, 2, 8, 0, 2, 2.14696],
[1286250, 1, 1, 4, 3, 15.5362],
[1290240, 12, 2, 1, 1, 1.9661],
[1296000, 7, 4, 3, 0, 1.68765],
[1296540, 2, 3, 1, 4, 2.60112],
[1306368, 8, 6, 0, 1, 1.85203],
[1310720, 18, 0, 1, 0, 1.68662],
[1312200, 3, 8, 2, 0, 1.93679],
[1312500, 2, 1, 6, 1, 2.67937],
[1317120, 8, 1, 1, 3, 2.31936],
[1323000, 3, 3, 3, 2, 2.28746],
[1327104, 14, 4, 0, 0, 1.46944],
[1333584, 4, 5, 0, 3, 2.23958],
[1344000, 9, 1, 3, 1, 2.31766],
[1344560, 4, 0, 1, 5, 2.53729],
[1350000, 4, 3, 5, 0, 2.10648],
[1354752, 10, 3, 0, 2, 1.99937],
[1360800, 5, 5, 2, 1, 2.06379],
[1361367, 0, 4, 0, 5, 15.4373],
[1366875, 0, 7, 4, 0, 15.4423],
[1372000, 5, 0, 3, 3, 2.24875],
[1376256, 16, 1, 0, 1, 1.93554],
[1377810, 1, 9, 1, 1, 15.4507],
[1378125, 0, 2, 5, 2, 15.4527],
[1382400, 11, 3, 2, 0, 1.82454],
[1382976, 6, 2, 0, 4, 2.2204],
[1389150, 1, 4, 2, 3, 17.6507],
[1399680, 7, 7, 1, 0, 1.94971],
[1400000, 6, 0, 5, 1, 2.11083],
[1404928, 12, 0, 0, 3, 2.00728],
[1406250, 1, 2, 7, 0, 21.7574],
[1411200, 7, 2, 2, 2, 2.07099],
[1411788, 2, 1, 0, 6, 2.93129],
[1417176, 3, 11, 0, 0, 2.10439],
[1417500, 2, 4, 4, 1, 2.44115],
[1428840, 3, 6, 1, 2, 2.6002],
[1433600, 13, 0, 2, 1, 1.84133],
[1440000, 8, 2, 4, 0, 1.94004],
[1440600, 3, 1, 2, 4, 2.75695],
[1451520, 9, 4, 1, 1, 2.28743],
[1458000, 4, 6, 3, 0, 2.30429],
[1469664, 5, 8, 0, 1, 2.01967],
[1470000, 4, 1, 4, 2, 2.53095],
[1474560, 15, 2, 1, 0, 1.96183],
[1476225, 0, 10, 2, 0, 16.869],
[1481760, 5, 3, 1, 3, 2.57461],
[1488375, 0, 5, 3, 2, 16.8685],
[1492992, 11, 6, 0, 0, 2.00965],
[1500000, 5, 1, 6, 0, 2.50573],
[1500282, 1, 7, 0, 3, 21.7826],
[1500625, 0, 0, 4, 4, 21.8241],
[1505280, 11, 1, 1, 2, 2.51821],
[1512000, 6, 3, 3, 1, 2.33902],
[1512630, 1, 2, 1, 5, 21.8594],
[1518750, 1, 5, 5, 0, 21.8374],
[1524096, 7, 5, 0, 2, 2.16827],
[1530900, 2, 7, 2, 1, 2.66897],
[1531250, 1, 0, 6, 2, 20.6219],
[1536000, 12, 1, 3, 0, 2.29054],
[1536640, 7, 0, 1, 4, 2.47425],
[1543500, 2, 2, 3, 3, 3.11905],
[1548288, 13, 3, 0, 1, 2.02683],
[1555200, 8, 5, 2, 0, 2.02745],
[1555848, 3, 4, 0, 4, | |
False,
# }
# # Check if a record for the licence_activity_id already exists, if
# # not, add.
# if not merged_activities.get(activity.licence_activity_id):
# issued_list = [
# p for p in activity.proposed_purposes.all() if p.is_issued]
# if not len(issued_list):
# continue
# merged_activities[activity.licence_activity_id] = {
# 'licence_activity_id': activity.licence_activity_id,
# 'activity_name_str': activity.licence_activity.name,
# 'issue_date': activity.get_issue_date(),
# 'start_date': activity.get_start_date(),
# 'expiry_date': '\n'.join(['{}'.format(
# p.expiry_date.strftime('%d/%m/%Y') if p.expiry_date else '')
# for p in activity.proposed_purposes.all() if p.is_issued]),
# 'activity_purpose_names_and_status': '\n'.join(['{} ({})'.format(
# p.purpose.name, activity.get_activity_status_display())
# for p in activity.proposed_purposes.all() if p.is_issued]),
# 'can_action':
# {
# 'licence_activity_id': activity.licence_activity_id,
# 'can_renew': activity_can_action['can_renew'],
# 'can_amend': activity_can_action['can_amend'],
# 'can_surrender': activity_can_action['can_surrender'],
# 'can_cancel': activity_can_action['can_cancel'],
# 'can_suspend': activity_can_action['can_suspend'],
# 'can_reissue': activity_can_action['can_reissue'],
# 'can_reinstate': activity_can_action['can_reinstate'],
# }
# }
# else:
# activity_key = merged_activities[activity.licence_activity_id]
# activity_key['activity_purpose_names_and_status'] += \
# '\n' + '\n'.join(['{} ({})'.format(
# p.purpose.name, activity.get_activity_status_display())
# for p in activity.proposed_purposes.all() if p.is_issued and p.purpose in activity.purposes])
# activity_key['expiry_date'] += \
# '\n' + '\n'.join(['{}'.format(
# p.expiry_date.strftime('%d/%m/%Y') if p.expiry_date else None)
# for p in activity.proposed_purposes.all() if p.is_issued and p.purpose in activity.purposes])
# activity_key['can_action']['can_renew'] =\
# activity_key['can_action']['can_renew'] or activity_can_action['can_renew']
# activity_key['can_action']['can_amend'] =\
# activity_key['can_action']['can_amend'] or activity_can_action['can_amend']
# activity_key['can_action']['can_surrender'] =\
# activity_key['can_action']['can_surrender'] or activity_can_action['can_surrender']
# activity_key['can_action']['can_cancel'] =\
# activity_key['can_action']['can_cancel'] or activity_can_action['can_cancel']
# activity_key['can_action']['can_suspend'] =\
# activity_key['can_action']['can_suspend'] or activity_can_action['can_suspend']
# activity_key['can_action']['can_reissue'] =\
# activity_key['can_action']['can_reissue'] or activity_can_action['can_reissue']
# activity_key['can_action']['can_reinstate'] =\
# activity_key['can_action']['can_reinstate'] or activity_can_action['can_reinstate']
# merged_activities_list = merged_activities.values()
# return merged_activities_list
@property
def latest_activities(self):
'''
Returns the most recently issued activities.
'''
from wildlifecompliance.components.applications.models import (
ApplicationSelectedActivity)
REPLACE = ApplicationSelectedActivity.ACTIVITY_STATUS_REPLACED
return self.get_activities_by_processing_status(
ApplicationSelectedActivity.PROCESSING_STATUS_ACCEPTED
).exclude(activity_status=REPLACE)
@property
def current_activities(self):
from wildlifecompliance.components.applications.models import (
ApplicationSelectedActivity
)
activities = self.get_activities_by_activity_status(
ApplicationSelectedActivity.ACTIVITY_STATUS_CURRENT
)
return activities
@property
def next_licence_number_id(self):
licence_number_max = WildlifeLicence.objects.all().aggregate(
Max('licence_number'))['licence_number__max']
if licence_number_max is None:
return self.pk
else:
return int(licence_number_max.split('L')[1]) + 1
@property
def reference(self):
return '{}-{}'.format(self.licence_number, self.licence_sequence)
@property
def is_issued(self):
return self.licence_number is not None and len(self.licence_number) > 0
@property
def is_latest_in_category(self):
'''
Returns True if the licence is the most recent one of it's category,
filtered by matching org_applicant, proxy_applicant and submitter.
'''
logger.debug('WildlifeLicence.is_latest_in_category() - start')
organisation_id = self.current_application.org_applicant
proxy_id = self.current_application.proxy_applicant
submitter = self.current_application.submitter
is_latest = WildlifeLicence.objects.filter(
Q(current_application__org_applicant_id=organisation_id)
if organisation_id else
(
Q(current_application__submitter=proxy_id) |
Q(current_application__proxy_applicant=proxy_id)
) if proxy_id else Q(
current_application__submitter=submitter,
current_application__proxy_applicant=None,
current_application__org_applicant=None
)
).filter(
licence_category_id=self.licence_category.id
).latest('id') == self
logger.debug('WildlifeLicence.is_latest_in_category() - end')
return is_latest
@property
def has_inspection_open(self):
"""
An attribute indicating a licence inspection is created and opened for
this License.
"""
logger.debug('WildlifeLicence.has_inspection_open() - start')
inspection_exists = False
inspections = LicenceInspection.objects.filter(
licence=self
)
is_active = [i.is_active for i in inspections if i.is_active]
inspection_exists = is_active[0] if is_active else False
logger.debug('WildlifeLicence.has_inspection_open() - end')
return inspection_exists
@property
def status(self):
'''
Property defining this Wildlife Licence status. Can be either Current,
Cancelled, Suspended or Surrendered.
'''
status = self.get_property_cache_status()
return status
def get_property_cache_status(self):
'''
Getter for status on the property cache.
'''
status = None
try:
status = self.property_cache['status']
except KeyError:
pass
return status
def set_property_cache_status(self, status):
'''
Setter for status on the property cache.
'''
if self.id:
self.property_cache['status'] = status
def get_activities_by_activity_status_ordered(self, status):
'''
Get all activities available on this licence by status using the last
issued application to chain all previous application activities. The
list is ordered by issue date.
NOTE: Issue Date is by Activity Purpose not Licence Activity therefore
getter may not perform correctly.
'''
return self.current_application.get_activity_chain(
activity_status=status).order_by(
'licence_activity_id', '-issue_date'
)
def get_activities_by_activity_status(self, status):
'''
Get all current activities available on this licence by status using
the last issued application to chain all previous application
activities.
'''
return self.current_application.get_current_activity_chain(
activity_status=status)
def get_activities_by_processing_status_ordered(self, status):
'''
Get all activities available on this licence by processing status using
the last issued application to chain all previous application
activities. The list is ordered by issue date.
NOTE: Issue Date is by Activity Purpose not Licence Activity therefore
getter may not perform correctly.
'''
return self.current_application.get_activity_chain(
processing_status=status).order_by(
'licence_activity_id', '-issue_date'
)
def get_activities_by_processing_status(self, status):
'''
Get all current activities available on this licence by the processing
status using the last issued application to chain all previous
application activities.
'''
return self.current_application.get_current_activity_chain(
processing_status=status).order_by(
'licence_activity_id',
)
def get_application_activities_by(
self, activity_id=None, action=None, purpose_ids=None):
'''
Returns the latest list of ApplicationSelectedActivity records for a
single application.
Supports actioning by allowing single applications with multiple
activities and purposes to be actioned by an officer in one process.
NOTE: Supporting only Reissue.
'''
acts = self.get_latest_activities_for_licence_activity_and_action(
activity_id,
action
)
if action == WildlifeLicence.ACTIVITY_PURPOSE_ACTION_REISSUE:
activities = self.latest_activities
acts = activities.filter(
proposed_purposes__purpose_id__in=purpose_ids)
first = acts[0]
acts = acts.filter(application_id=first.application_id)
return acts
def get_latest_activities_for_licence_activity_and_action(
self, licence_activity_id=None, action=None):
'''
Return a list of ApplicationSelectedActivity records for the licence
Filter by licence_activity_id (optional) and/or specified action
(optional).
'''
# for a given licence_activity_id and action, return relevant
# applications only check if licence is the latest in its category for
# the applicant.
CANCEL = WildlifeLicence.ACTIVITY_PURPOSE_ACTION_CANCEL
SUSPEND = WildlifeLicence.ACTIVITY_PURPOSE_ACTION_SUSPEND
SURRENDER = WildlifeLicence.ACTIVITY_PURPOSE_ACTION_SURRENDER
RENEW = WildlifeLicence.ACTIVITY_PURPOSE_ACTION_REACTIVATE_RENEW
REINSTATE = WildlifeLicence.ACTIVITY_PURPOSE_ACTION_REINSTATE
REISSUE = WildlifeLicence.ACTIVITY_PURPOSE_ACTION_REISSUE
if self.is_latest_in_category:
latest_activities = self.latest_activities
if licence_activity_id:
latest_activities = latest_activities.filter(
licence_activity_id=licence_activity_id)
# get the list of can_<action> ApplicationSelectedActivity records
if action:
can_action_activity_ids = []
purposes_in_open_applications = \
self.get_purposes_in_open_applications()
for activity in latest_activities:
activity_can_action = activity.can_action(
purposes_in_open_applications)
if action == CANCEL:
if activity_can_action['can_cancel']:
can_action_activity_ids.append(activity.id)
elif action == SUSPEND:
if activity_can_action['can_suspend']:
can_action_activity_ids.append(activity.id)
elif action == SURRENDER:
if activity_can_action['can_surrender']:
can_action_activity_ids.append(activity.id)
elif action == RENEW:
if activity_can_action['can_reactivate_renew']:
can_action_activity_ids.append(activity.id)
elif action == REINSTATE:
if activity_can_action['can_reinstate']:
can_action_activity_ids.append(activity.id)
elif action == REISSUE:
if activity_can_action['can_reissue']:
can_action_activity_ids.append(activity.id)
latest_activities = latest_activities.filter(
id__in=can_action_activity_ids)
else:
latest_activities = []
return latest_activities
def get_latest_purposes_for_licence_activity_and_action(
self, licence_activity_id=None, action=None):
'''
Return a list of LicencePurpose records for the licence Filter by
licence_activity_id (optional) and/or specified action (optional)
Exclude purposes that are currently in an application being processed.
TODO:AYN REDUNDANT replace with LicenceActioner.
'''
can_action_purpose_list = []
active_licence_purposes = self.get_purposes_in_open_applications()
latest_activities = \
self.get_latest_activities_for_licence_activity_and_action(
licence_activity_id, action
)
for activity in latest_activities:
for purpose in activity.purposes:
if purpose.id not in active_licence_purposes:
can_action_purpose_list.append(purpose.id)
records = LicencePurpose.objects.filter(
id__in=can_action_purpose_list
).distinct()
return records
def get_latest_purposes_for_licence(self, licence_activity_id):
'''
Return a list of LicencePurpose records for the licence. Exclude
purposes that are currently in an application being processed.
TODO:AYN REDUNDANT replace with LicenceActioner.
'''
from wildlifecompliance.components.applications.models import (
ApplicationSelectedActivity,
ApplicationSelectedActivityPurpose,
)
can_action_purpose_list = []
status = {
ApplicationSelectedActivity.ACTIVITY_STATUS_CURRENT,
ApplicationSelectedActivity.ACTIVITY_STATUS_REPLACED,
ApplicationSelectedActivity.ACTIVITY_STATUS_SUSPENDED,
}
active_licence_purposes = self.get_purposes_in_open_applications()
latest = self.current_application.get_current_activity_chain(
activity_status__in=status
).filter(licence_activity_id=licence_activity_id)
for activity in latest:
for proposed in activity.proposed_purposes.all():
if proposed.purpose.id not in active_licence_purposes\
and proposed.is_issued:
can_action_purpose_list.append(proposed.id)
ISSUED = ApplicationSelectedActivityPurpose.PROCESSING_STATUS_ISSUED
records = ApplicationSelectedActivityPurpose.objects.filter(
id__in=can_action_purpose_list,
processing_status=ISSUED,
).distinct()
return records
def get_activities_in_open_applications(self):
'''
Get selected activities which are currently in an application being
processed.
:return list of ApplicationSelectedActivity records.
'''
from wildlifecompliance.components.applications.models import (
Application, ApplicationSelectedActivity)
logger.debug('WildlifeLicence.get_activities_open_apps() - start')
open_activities = []
open_applications = Application.objects.filter(
Q(org_applicant=self.current_application.org_applicant)
if self.current_application.org_applicant
else Q(proxy_applicant=self.current_application.proxy_applicant)
if self.current_application.proxy_applicant
else Q(
submitter=self.current_application.submitter,
proxy_applicant=None,
org_applicant=None)
).computed_filter(
licence_category_id=self.licence_category.id
).exclude(
selected_activities__processing_status__in=[
ApplicationSelectedActivity.PROCESSING_STATUS_ACCEPTED,
ApplicationSelectedActivity.PROCESSING_STATUS_DECLINED,
ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED
]
)
for application in open_applications:
activities = application.activities
if activities:
open_activities += activities
logger.debug('WildlifeLicence.get_activities_open_apps() - end')
return open_activities
def get_purposes_in_open_applications(self, as_objects=False):
"""
Get list of LicencePurpose for applications on this licence currently
being processed. The list can be either LicencePurpose objects or
LicencePurpose identifiers.
:param: as_objects flag to determine list as objects or identifiers.
:return: list of LicencePurpose.
"""
from wildlifecompliance.components.applications.models import (
Application, ApplicationSelectedActivity)
logger.debug('WildlifeLicence.get_purposes_in_open_apps() - start')
open_applications = Application.objects.filter(
Q(org_applicant=self.current_application.org_applicant)
if self.current_application.org_applicant
else Q(proxy_applicant=self.current_application.proxy_applicant)
if self.current_application.proxy_applicant
else Q(
submitter=self.current_application.submitter,
proxy_applicant=None,
org_applicant=None)
).computed_filter(
licence_category_id=self.licence_category.id
).exclude(
selected_activities__processing_status__in=[
ApplicationSelectedActivity.PROCESSING_STATUS_ACCEPTED,
ApplicationSelectedActivity.PROCESSING_STATUS_DECLINED,
ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED
]
)
open_purposes = open_applications.values_list(
'licence_purposes',
flat=True)
if as_objects:
open_purposes = []
for a in open_applications:
open_purposes += a.licence_purposes.all()
logger.debug('WildlifeLicence.get_purposes_in_open_apps() - end')
return open_purposes
def get_proposed_purposes_in_open_applications(self):
'''
Get proposed purposes which are currently in an application being
processed.
:return list of ApplicationSelectedActivityPurpose records.
'''
from wildlifecompliance.components.applications.models import (
Application, ApplicationSelectedActivity)
logger.debug('WildlifeLicence.get_proposed_purposes_in_open() - start')
open_purposes = []
open_applications = Application.objects.filter(
Q(org_applicant=self.current_application.org_applicant)
if self.current_application.org_applicant
else Q(proxy_applicant=self.current_application.proxy_applicant)
if self.current_application.proxy_applicant
else Q(
submitter=self.current_application.submitter,
proxy_applicant=None,
org_applicant=None)
).computed_filter(
licence_category_id=self.licence_category.id
).exclude(
selected_activities__processing_status__in=[
ApplicationSelectedActivity.PROCESSING_STATUS_ACCEPTED,
ApplicationSelectedActivity.PROCESSING_STATUS_DECLINED,
ApplicationSelectedActivity.PROCESSING_STATUS_DISCARDED
]
)
for application in open_applications:
purposes = application.get_proposed_purposes()
if purposes:
open_purposes += purposes
logger.debug('WildlifeLicence.get_proposed_purposes_in_open() - end')
return open_purposes
def get_proposed_purposes_in_applications(self):
'''
Return a | |
<reponame>okcashpro/okshop
from django.test import TestCase, Client
from django.contrib.auth.models import User
from .models import *
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from io import BytesIO
import pyotp
import json
# Create your tests here.
class RegisterTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('u1', '<EMAIL>', '')
ue1 = UserExtra(user=self.u1)
ue1.save()
self.u1.save()
def test_user_register_all_valid(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u3',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertEquals(m.tags, 'success')
def test_user_register_invalid_email(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u4',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm':
'<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_register_password_too_short(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u5',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_register_password_mismatch(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u6',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_register_username_in_use(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u1',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm':
'<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_email_in_use(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u7',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_invalid_username(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u3',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'passwordconfirm': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
class LoginTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('_u1', '<EMAIL>',
'p4ssw<PASSWORD>')
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
self.u1.save()
self.u2 = User.objects.create_user('_u2', '<EMAIL>',
'<PASSWORD>')
ue2 = UserExtra(user=self.u2, verified=False)
ue2.save()
self.u2.save()
self.u3 = User.objects.create_user('_u3', '<EMAIL>',
'<PASSWORD>')
ue3 = UserExtra(user=self.u3, verified=True, authenticator_id='test',
authenticator_verified=True)
ue3.save()
self.u1.save()
def test_login_all_valid_no_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u1',
'password': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
self.assertEquals(str(list(response.context['messages'])[0]),
'Welcome back, _u1!')
def test_login_all_invalid_no_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': 'invalidname',
'password': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_login_invalid_pass_no_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u1',
'password': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_login_not_verified(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u2',
'password': '<PASSWORD>'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_login_all_valid_2fa(self):
totp = pyotp.TOTP('test')
response = self.client.post(reverse('shop:login'), {
'username': '_u3',
'password': '<PASSWORD>',
'2facode': totp.now()
}, follow=True)
self.assertEquals(str(list(response.context['messages'])[0]),
'Welcome back, _u3!')
def test_login_invalid_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u3',
'password': '<PASSWORD>',
'2facode': ''
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
class TestUploadFiles(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('__u1', '', '<PASSWORD>')
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
self.u1.save()
self.p1 = Product(
product_name='T',
product_description='d',
price=0,
physical=False,
seller=self.u1
)
self.p1.save()
self.u2 = User.objects.create_user('__u2', '', '<PASSWORD>')
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
self.u2.save()
def test_upload_product_not_found(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': '291827346271725623'}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'n'
}
)
self.assertEqual(r.status_code, 404)
def test_upload_product_not_logged_in(self):
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'n'
}
)
self.assertNotEqual(r.status_code, 200)
def test_upload_product_no_permission(self):
self.client.login(username=self.u2.username, password='<PASSWORD>')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'n'
}
)
self.assertEqual(r.status_code, 403)
def test_upload_incomplete_request(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{}
)
self.assertEqual(r.status_code, 400)
def test_upload_name_too_big(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'a'*201
}
)
self.assertEqual(r.status_code, 400)
def test_upload_no_name(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse(
'shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': ''
}
)
self.assertEqual(r.status_code, 400)
# Can't seem to fake file size... I'll have to rely on my intuition
"""def test_upload_file_too_large(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': InMemoryUploadedFile(
BytesIO(b"d"),
None,
'file.txt',
"text/txt",
10**10,
None,
None
),
'name': 's'
}
)
self.assertEqual(r.status_code, 400)"""
def test_upload_all_fine(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 's'
}
)
# TODO: Get this to work on py3.5
"""rjson = json.loads(str(r.content))
file = DigitalFile.objects.get(id=rjson['file'])
self.assertEqual(file.file.read(), b't')"""
self.assertEqual(r.status_code, 200)
class TestDeleteFile(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('___u1', '', '<PASSWORD>')
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
self.u1.save()
self.u2 = User.objects.create_user('___u2', '', '<PASSWORD>')
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
self.u1.save()
self.p1 = Product(product_name='T', product_description='d', price=0,
physical=False, seller=self.u1)
self.p1.save()
self.file1 = DigitalFile(
file=SimpleUploadedFile("file.txt", b"t", content_type="text/txt"),
name='test',
product=self.p1
)
self.file1.save()
self.file2 = DigitalFile(
file=SimpleUploadedFile("file.txt", b"t", content_type="text/txt"),
name='test',
product=self.p1
)
self.file2.save()
def test_file_not_logged_in(self):
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': self.file1.id}))
self.assertNotEqual(r.status_code, 200)
def test_file_no_permission(self):
self.client.login(username=self.u2.username, password='<PASSWORD>')
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': self.file1.id}))
self.assertEqual(r.status_code, 403)
def test_file_not_exists(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': 2912787347128272}))
self.assertEqual(r.status_code, 404)
def test_file_all_fine(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': self.file2.id}), follow=True)
self.assertEqual(r.status_code, 200)
class CheckoutTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('____u1', '', '<PASSWORD>')
self.u1.save()
self.u2 = User.objects.create_user('____u2', '', '<PASSWORD>')
self.u2.save()
self.u3 = User.objects.create_user('____u3', '', '<PASSWORD>')
self.u3.save()
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
ue3 = UserExtra(user=self.u3, verified=True)
ue3.save()
w = Wallet(user=self.u1)
w.save()
w1 = Wallet(user=self.u2)
w2 = Wallet(user=self.u2, label='2')
w3 = Wallet(user=self.u3, label='3', redeemed=Decimal(-10000))
w4 = Wallet(user=self.u3, label='3', redeemed=Decimal(-500))
w1.save()
w2.save()
w3.save()
w4.save()
self.p1 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p1.save()
self.p2 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=True,
stock=10,
worldwide_shipping=True,
free_shipping=True
)
self.p2.save()
self.expensiveproduct = Product(
product_name='t',
seller=self.u1,
price=2**32,
stock=10
)
self.expensiveproduct.save()
self.reasonableproduct = Product(
product_name='t',
seller=self.u1,
price=10,
stock=10
)
self.reasonableproduct.save()
self.outofstock = Product(
product_name='t',
seller=self.u1,
price=0,
stock=0
)
self.outofstock.save()
def test_checkout_not_logged_in(self):
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_checkout_cart_empty(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_checkout_no_money(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.expensiveproduct)
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_checkout_outofstock(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.outofstock)
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_physical_one_wallet_free(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p2)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout1.html')
def test_physical_one_wallet_free_incomplete_data(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p2)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout1.html')
c = r.context['checkout']
r = self.client.post(reverse('shop:checkout'),
{'checkout': str(c.uuid)})
self.assertGreater(len(r.context['messages']), 0)
def test_physical_one_wallet_free_new_address(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p2)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout1.html')
c = r.context['checkout']
r = self.client.post(reverse('shop:checkout'), {
'checkout': str(c.uuid),
'name': "Mr. Testing",
'address1': "Somewhere, Norcross",
'state': "GA",
'country': "US",
'zip': "30092",
'use_custom_address': ""
})
self.assertTemplateUsed(r, 'shop/checkout3.html')
r = self.client.post(reverse('shop:checkout'),
{'checkout': str(c.uuid), 'confirm': ''})
self.assertEqual(r.status_code, 302)
def test_digital_one_wallet_free(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p1)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout3.html')
def test_digital_multiple_wallets_free(self):
self.client.login(username=self.u2.username, password='<PASSWORD>')
self.u2.userextra.clear_cart()
self.u2.userextra.add_to_cart(self.p1)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout3.html')
def test_digital_multiple_wallets_enough_money(self):
self.client.login(username=self.u3.username, password='<PASSWORD>')
self.u3.userextra.clear_cart()
self.u3.userextra.add_to_cart(self.reasonableproduct)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout2.html')
class ReviewTestCase(TestCase):
def setUp(self):
# These names are getting ridiculous
self.u1 = User.objects.create_user('______u1', '', '<PASSWORD>')
self.u1.save()
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
c = Cart(user=self.u1)
c.save()
self.u2 = User.objects.create_user('______u2', '', '<PASSWORD>')
self.u2.save()
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
c2 = Cart(user=self.u2)
c2.save()
self.p1 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p1.save()
self.p2 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p2.save()
self.pur = Purchase(by=self.u1)
self.pur.save()
pi = PurchaseItem(purchase=self.pur, price=Decimal(0), product=self.p1)
pi.save()
self.pur2 = Purchase(by=self.u2)
self.pur2.save()
pi2 = PurchaseItem(purchase=self.pur2, price=Decimal(0),
product=self.p1)
pi2.save()
def test_post_not_logged_in(self):
self.client.logout()
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'post_not_logged_in',
'rating': 3,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(r.status_code, 302)
self.assertEqual(0,
self.p1.review_set.filter(title='post_not_logged_in')
.count())
def test_post_not_owned(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p2.id}), {
'title': 'post_not_owned',
'rating': 3,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p2.review_set.filter(title='post_not_owned')
.count())
def test_post_owned_title_too_long(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'a'*200,
'rating': 3,
'review': 'test_post_too_long'
})
self.assertEqual(0,
self.p1.review_set.filter(review='test_post_too_long')
.count())
def test_post_owned_rate_too_high(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_rate_high',
'rating': 6,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p1.review_set.filter(title='test_post_rate_high')
.count())
def test_post_owned_rate_too_low(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_rate_low',
'rating': 0,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p1.review_set.filter(title='test_post_rate_low')
.count())
def test_post_owned_rate_invalid(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_rate_bad',
'rating': 'neat',
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p1.review_set.filter(title='test_post_rate_bad')
.count())
def test_post_owned_all_fine(self):
self.client.login(username=self.u1.username, password='<PASSWORD>')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_fine',
'rating': 4,
'review': 'This should have been posted'
})
self.assertEqual(1,
self.p1.review_set.filter(title='test_post_fine')
.count())
def test_post_owned_edit(self):
self.client.login(username=self.u2.username, password='<PASSWORD>')
self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 't',
'rating': 4,
'review': 'This shouldn\'t have been posted'
})
self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_edit',
'rating': 4,
'review': 'This should have been posted'
})
self.assertEqual(0, self.p1.review_set.filter(title='t').count())
self.assertEqual(1,
self.p1.review_set.filter(title='test_post_edit')
.count())
class DeleteReviewTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('_______u1', '', '<PASSWORD>')
self.u1.save()
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
c = Cart(user=self.u1)
c.save()
self.u2 = User.objects.create_user('_______u2', '', '<PASSWORD>')
self.u2.save()
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
c2 = Cart(user=self.u2)
c2.save()
self.p1 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
| |
<reponame>gatgui/excons<filename>tools/python.py
# MIT License
#
# Copyright (c) 2009 <NAME>
#
# This file is part of excons.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import SCons.Script # pylint: disable=import-error
import os
import re
import sys
import subprocess
import excons
import distutils
import distutils.sysconfig
# pylint: disable=bad-indentation,global-statement,unused-argument,deprecated-lambda
def GetOptionsString():
return """PYTHON OPTIONS
with-python=<str> : Python version or prefix [current interpreter]"""
def _GetPythonVersionOSX(pythonPath):
# On osx, pythonPath must be the path to the python framework
# i.e. with-python=/System/Library/Frameworks/Python.framework
p = subprocess.Popen("ls -l %s/Versions | grep Current" % pythonPath, shell=True, stdout=subprocess.PIPE)
out, _ = p.communicate()
m = re.search(r"Current\s+->\s+(%s/Versions/)?([0-9\.]+)" % pythonPath, out)
if m is not None:
return m.group(2)
return None
def _GetPythonVersionWIN(pythonPath):
# On windows, pythonPath must be the path to the python executable
# i.e. with-python=C:/Python27/python.exe
dn = os.path.dirname(pythonPath)
fl = excons.glob(excons.joinpath(dn, "python*.dll"))
if len(fl) == 1:
m = re.match(r"python(\d)(\d)\.dll", os.path.basename(fl[0]), re.IGNORECASE)
if m is not None:
return "%s.%s" % (m.group(1), m.group(2))
return None
def _GetPythonVersionUNIX(pythonPath):
# On unix, pythonPath must be the path to the python executable
# i.e. with-python=/usr/local/bin/python
p = subprocess.Popen("ldd %s | grep libpython" % pythonPath, shell=True, stdout=subprocess.PIPE)
out, _ = p.communicate()
m = re.search(r"libpython([0-9\.]+)\.so", out)
if m is not None:
return m.group(1)
return None
_specCache = {}
def _GetPythonSpec(specString):
global _specCache
if specString in _specCache:
return _specCache[specString]
spec = None
specErr = ""
plat = str(SCons.Script.Platform())
if re.match(r"\d+\.\d+", specString):
ver = specString
# Look in standard locations
if plat == "darwin":
searchPaths = ["/System/Library/Frameworks", "/Library/Frameworks"]
for searchPath in searchPaths:
pythonPath = excons.joinpath(searchPath, "Python.framework", "Versions", ver)
if os.path.isdir(pythonPath):
incdir = None
for isd in ("include/python%s" % ver, "Headers"):
_incdir = pythonPath + "/" + isd
if os.path.isdir(_incdir):
incdir = _incdir
break
if incdir is not None:
if ver == _GetPythonVersionOSX(excons.joinpath(searchPath, "Python.framework")):
spec = (ver, incdir, searchPath, "Python")
specErr = ""
break
else:
spec = (ver, incdir, None, "%s/Python" % (pythonPath))
specErr = ""
break
else:
specErr += "\n Cannot find python %s include directory in %s" % (ver, pythonPath)
else:
specErr += "\n Cannot find python %s in %s" % (ver, searchPath)
elif plat == "win32":
pythonPath = "C:\\Python%s" % ver.replace(".", "")
if os.path.isdir(pythonPath):
incdir = excons.joinpath(pythonPath, "include")
libdir = excons.joinpath(pythonPath, "libs")
lib = "python%s" % ver.replace(".", "")
spec = (ver, incdir, libdir, lib)
else:
searchPaths = ["/usr", "/usr/local"]
for searchPath in searchPaths:
pythonPath = excons.joinpath(searchPath, "bin", "python%s" % ver)
if not os.path.isfile(pythonPath):
pythonPath = excons.joinpath(searchPath, "python")
if os.path.isfile(pythonPath) and _GetPythonVersionUNIX(pythonPath) == ver:
spec = (ver, searchPath)
break
else:
spec = (ver, searchPath)
break
if spec:
ver, prefix = spec
incdir = excons.joinpath(prefix, "include", "python%s" % ver)
libdir = excons.joinpath(prefix, ("lib64" if excons.Build64() else "lib"))
lib = "python%s" % ver
spec = (ver, incdir, libdir, lib)
if spec is None:
curver = str(distutils.sysconfig.get_python_version())
specErr += "\n"
if curver != ver:
excons.PrintOnce("Couldn't find stock python %s.%sCurrent version doesn't match (%s), aborting build." % (ver, specErr, curver), tool="python")
sys.exit(1)
else:
excons.PrintOnce("Couldn't find stock python %s.%sUse currently running version instead." % (ver, specErr), tool="python")
else:
if plat == "darwin":
if specString[-1] == "/":
specString = specString[:-1]
m = re.search(r"/([^/]+)\.framework/Versions/([^/]+)/?$", specString)
if m:
fwn = m.group(1)
ver = m.group(2)
fw = "%s/%s" % (specString, fwn)
fwh = "%s/Headers" % specString
if not os.path.isdir(fwh):
fwh = "%s/include/python%s" % (specString, ver)
if os.path.isfile(fw) and os.path.isdir(fwh):
# if it is the current version, use framework directory
fwd = re.sub(r"/Versions/.*$", "", specString)
if ver == _GetPythonVersionOSX(fwd):
spec = (ver, fwh, os.path.dirname(fwd), fwn)
else:
spec = (ver, fwh, None, fw)
else:
if not os.path.isfile(fwh):
specErr += "\n Cannot find python %s include directory in %s" % (ver, specString)
if not os.path.isfile(fw):
specErr += "\n Cannot find python framework in %s" % specString
else:
ver = _GetPythonVersionOSX(specString)
if ver is not None:
d = os.path.dirname(specString)
n = os.path.splitext(os.path.basename(specString))[0]
incdir = None
for isd in ("include/python%s" % ver, "Headers"):
_incdir = "%s/Versions/%s/%s" % (specString, ver, isd)
if os.path.isdir(_incdir):
incdir = _incdir
break
if incdir is not None:
spec = (ver, incdir, d, n)
else:
specErr += "\n Cannot find python %s include directory in %s" % (ver, specString)
elif plat == "win32":
ver = _GetPythonVersionWIN(specString)
if ver is not None:
d = os.path.dirname(specString)
incdir = excons.joinpath(d, "include")
libdir = excons.joinpath(d, "libs")
lib = "python%s" % ver.replace(".", "")
spec = (ver, incdir, libdir, lib)
else:
ver = _GetPythonVersionUNIX(specString)
if ver is not None:
# not specString but 2 dirs up (as specString is the path to the python executable)
d = os.path.dirname(specString)
if os.path.basename(d) == "bin":
d = os.path.dirname(d)
incdir = excons.joinpath(d, "include", "python%s" % ver)
libdir = excons.joinpath(d, ("lib64" if excons.Build64() else "lib"))
lib = "python%s" % ver
spec = (ver, incdir, libdir, lib)
if spec is None:
specErr += "\n"
excons.PrintOnce("Invalid python specification \"%s\".%sAborting build." % (specErr, specString), tool="python")
sys.exit(1)
# check setup validity
if spec is not None:
if plat == "darwin":
_, incdir, fwdir, fw = spec
if fwdir is None:
# directly linking version specific framework
if not os.path.isdir(incdir) or not os.path.isfile(fw):
spec = None
else:
if not os.path.isdir(incdir) or not os.path.isdir(fwdir):
spec = None
else:
ver, incdir, libdir, lib = spec
if not os.path.isdir(incdir) or not os.path.isdir(libdir):
spec = None
else:
if plat == "win32":
if not os.path.isfile(excons.joinpath(libdir, "%s.lib" % lib)):
spec = None
else:
if not os.path.isfile(os.path.join(libdir, "lib%s.so" % lib)):
spec = None
if spec is None:
excons.PrintOnce("Invalid python specification \"%s\". Aborting build." % specString, tool="python")
sys.exit(1)
excons.PrintOnce("Resolved python for \"%s\": %s" % (specString, ('<current>' if spec is None else spec)), tool="python")
_specCache[specString] = spec
return spec
def Version():
po = excons.GetArgument("with-python")
if po is not None:
rv = _GetPythonSpec(po)
if rv is not None:
return rv[0]
return str(distutils.sysconfig.get_python_version())
def Require(e, ignoreLinkFlags=False):
po = excons.GetArgument("with-python")
if po is not None:
rv = _GetPythonSpec(po)
if rv is not None:
ver, incdir, libdir, lib = rv
plat = str(SCons.Script.Platform())
e.Append(CCFLAGS=" -DPY_VER=%s" % ver)
e.Append(CPPPATH=[incdir])
if not ignoreLinkFlags:
if plat == "darwin":
if libdir:
e.Append(LINKFLAGS=" -F%s -framework %s" % (libdir, lib))
else:
e.Append(LINKFLAGS=" %s" % lib)
else:
e.Append(LIBPATH=[libdir])
e.Append(LIBS=[lib])
return
# Default settings: use the python that this script runs on
pyver = distutils.sysconfig.get_python_version()
e.Append(CCFLAGS=" -DPY_VER=%s" % pyver)
e.Append(CPPPATH=[distutils.sysconfig.get_python_inc()])
if distutils.sysconfig.get_config_var("PYTHONFRAMEWORK"):
if not ignoreLinkFlags:
fwdir = distutils.sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX")
fwname = distutils.sysconfig.get_config_var("PYTHONFRAMEWORK")
if _GetPythonVersionOSX("%s/%s.framework" % (fwdir, fwname)) != pyver:
e.Append(LINKFLAGS=" %s/%s.framework/Versions/%s/%s" % (fwdir, fwname, pyver, fwname))
else:
e.Append(LINKFLAGS=" -F%s -framework %s" % (fwdir, fwname))
else:
if str(SCons.Script.Platform()) == "win32":
e.Append(LIBPATH=[distutils.sysconfig.PREFIX+'\\libs'])
e.Append(LIBS=["python%s" % pyver.replace(".", "")])
else:
e.Append(CCFLAGS=" %s" % distutils.sysconfig.get_config_var("CFLAGS"))
if not ignoreLinkFlags:
e.Append(LINKFLAGS=" %s" % distutils.sysconfig.get_config_var("LINKFORSHARED"))
e.Append(LIBS=["python%s" % pyver])
excons.AddHelpOptions(python=GetOptionsString())
def SoftRequire(e):
if str(SCons.Script.Platform()) == "darwin":
e.Append(LINKFLAGS=" -undefined dynamic_lookup")
Require(e, ignoreLinkFlags=True)
else:
Require(e)
def ModulePrefix():
return "lib/python/"
def ModuleExtension():
return distutils.sysconfig.get_config_var("SO")
_cython = ""
def RequireCython(e):
global _cython
cython = excons.GetArgument("with-cython", _cython)
if not os.path.isfile(cython):
excons.PrintOnce("Invalid 'cython' specification", tool="python")
cython = None
if not cython:
cython = "cython%s" % Version()
path = excons.Which(cython)
if path is None:
excons.PrintOnce("No \"%s\" found in PATH. Try with \"cython\" instead" % cython, tool="python")
cython = "cython"
path = excons.Which(cython)
if path is None:
excons.PrintOnce("Cannot find a valid cython in your PATH, use | |
<reponame>So-dal/Keras_FasterRCNN_CustomDataset
from __future__ import division
import os
import cv2
import numpy as np
import pandas as pd
import sys
import pickle
from optparse import OptionParser
import time
import re
import tensorflow as tf
from keras_frcnn import config
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from keras.backend.tensorflow_backend import set_session
from keras_frcnn import roi_helpers
from keras_frcnn import data_generators
from sklearn.metrics import average_precision_score
sys.setrecursionlimit(40000)
#config = tf.ConfigProto()
config=tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
#sess = tf.Session(config=config)
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
#set_session(sess)
parser = OptionParser()
parser.add_option("-p", "--path", dest="test_path", help="Path to test data.")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
help="Number of ROIs per iteration. Higher means more memory use.", default=32)
parser.add_option("--config_filename", dest="config_filename", help=
"Location to read the metadata related to the training (generated when training).",
default="config.pickle")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple or pascal_voc",
default="pascal_voc"),
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("-i", "--output_model_number", dest="model_iter", help="Models of Epoch step to use. Type this with leading spaces for the hdf5 files!"),
parser.add_option("--ovt", "--overlap_threshold", type="float", dest="overlap_threshold",
help="Value of overlap threshold for non-max-suppression.", default=0.7)
parser.add_option("--iou", "--iou_threshold", type="float", dest="iou_threshold",
help="Value of IoU to overpass for the box to ve counted as True Positive", default=0.5)
(options, args) = parser.parse_args()
if not options.test_path: # if filename is not given
parser.error('Error: path to test data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
config_output_filename = options.config_filename
with open(config_output_filename, 'rb') as f_in:
C = pickle.load(f_in)
if options.model_iter is not None:
x = re.match("^(.+)(\.hdf5)$", C.model_path)
C.model_path = x.group(1) + "_" + options.model_iter + x.group(2)
if C.network == 'resnet50':
import keras_frcnn.resnet as nn
elif C.network == 'vgg':
import keras_frcnn.vgg as nn
# turn off any data augmentation at test time
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
img_path = options.test_path
iou_thresh = options.iou_threshold
def get_map(pred, gt, f):
T = {}
P = {}
iou_result = 0
fx, fy = f
for bbox in gt:
bbox['bbox_matched'] = False
pred_probs = np.array([s['prob'] for s in pred])
#print(pred)
#print(pred_probs)
box_idx_sorted_by_prob = np.argsort(pred_probs)[::-1]
for box_idx in box_idx_sorted_by_prob:
pred_box = pred[box_idx]
pred_class = pred_box['class']
pred_x1 = pred_box['x1']
pred_x2 = pred_box['x2']
pred_y1 = pred_box['y1']
pred_y2 = pred_box['y2']
pred_prob = pred_box['prob']
if pred_class not in P:
P[pred_class] = []
T[pred_class] = []
P[pred_class].append(pred_prob)
found_match = False
for gt_box in gt:
gt_class = gt_box['class']
gt_x1 = gt_box['x1']/fx
gt_x2 = gt_box['x2']/fx
gt_y1 = gt_box['y1']/fy
gt_y2 = gt_box['y2']/fy
gt_seen = gt_box['bbox_matched']
if gt_class != pred_class:
continue
if gt_seen:
continue
iou = 0
iou = data_generators.iou((pred_x1, pred_y1, pred_x2, pred_y2), (gt_x1, gt_y1, gt_x2, gt_y2))
iou_result += iou
#print('IoU = ' + str(iou))
if iou >= iou_thresh:
found_match = True
gt_box['bbox_matched'] = True
break
else:
continue
T[pred_class].append(int(found_match))
for gt_box in gt:
if not gt_box['bbox_matched']: # and not gt_box['difficult']:
if gt_box['class'] not in P:
P[gt_box['class']] = []
T[gt_box['class']] = []
T[gt_box['class']].append(1)
P[gt_box['class']].append(0)
return T, P, iou_result
def format_img_size(img, C):
""" formats the image size based on config """
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
ratio = img_min_side/width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side/height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio
def format_img_channels(img, C):
""" formats the image channels based on config """
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def format_img_ratio(img, C):
""" formats an image for model prediction based on config """
img, ratio = format_img_size(img, C)
img = format_img_channels(img, C)
return img, ratio
def format_img(img, C):
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
f = img_min_side/width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side/height
new_width = int(f * width)
new_height = int(img_min_side)
fx = width/float(new_width)
fy = height/float(new_height)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img, fx, fy
# Method to transform the coordinates of the bounding box to its original size
def get_real_coordinates(ratio, x1, y1, x2, y2):
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2 ,real_y2)
class_mapping = C.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.items()}
print(class_mapping)
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
C.num_rois = int(options.num_rois)
if C.network == 'resnet50':
num_features = 1024
elif C.network == 'vgg':
num_features = 512
if K.common.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
input_shape_features = (num_features, None, None)
else:
input_shape_img = (None, None, 3)
input_shape_features = (None, None, num_features)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
print('Loading weights from {}'.format(C.model_path))
model_rpn.load_weights(C.model_path, by_name=True)
model_classifier.load_weights(C.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
begin = time.time()
T = {}
P = {}
iou_result = 0
filepath_list=[]
iou_list=[]
all_map=[]
all_imgs = []
classes = {}
all_dets = []
filenames=[]
coord=[]
cl_prob=[]
overlap_threshold = options.overlap_threshold
test_imgs, _, _ = get_data(options.test_path)
for idx, img_data in enumerate(test_imgs):
print('{}/{}'.format(idx + 1,len(test_imgs)))
st = time.time()
filepath = img_data['filepath']
filepath_list.append(filepath)
img = cv2.imread(filepath)
X, ratio = format_img_ratio(img, C)
if K.common.image_dim_ordering() == 'tf':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.common.image_dim_ordering(), overlap_thresh=overlap_threshold)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0]//C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0]//C.num_rois:
#pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0],C.num_rois,curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = model_classifier_only.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
#if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
if np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
bboxes[cls_name].append([C.rpn_stride*x, C.rpn_stride*y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
det_iou=[]
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=overlap_threshold)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk,:]
det = {'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': key, 'prob': new_probs[jk]}
all_dets.append(det)
det_iou.append(det)
filenames.append(str(filepath).split('/')[1])
(real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)
cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2)
textLabel = ("{}: {}".format(key,int(100*new_probs[jk])))
cl_prob.append((key,100*new_probs[jk]))
print(textLabel)
(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_PLAIN,1,1)
textOrg = (real_x1, real_y1-0)
cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1)
cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 1)
cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)
t=(real_x1, real_y1, real_x2, real_y2)
coord.append(t)
# if the model detects the targeted object, the image with the predicted bbox is saved in the results_imgs folder
if det_iou!=[]:
cv2.imwrite('./results_imgs/{}.png'.format(str((filepath).split('/')[1])[:-4]), img)
X, fx, fy = format_img(img, C)
t, p, iou = get_map(det_iou, img_data['bboxes'], (fx, fy))
iou_result += iou
iou_list.append(iou)
for key in t.keys():
print("t.keys:",t.keys())
if key not in T:
T[key] = []
P[key] = []
T[key].extend(t[key])
| |
# pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for MaskedArray.
Adapted from the original test_ma by <NAME>
:author: <NAME> & <NAME>
:contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com
:version: $Id: test_timeseries.py 3836 2008-01-15 13:09:03Z <EMAIL> $
"""
__author__ = "<NAME> & <NAME> ($Author: <EMAIL> $)"
__revision__ = "$Revision: 3836 $"
__date__ = '$Date: 2008-01-15 08:09:03 -0500 (Tue, 15 Jan 2008) $'
import numpy as np
from numpy import bool_, complex_, float_, int_, object_
from numpy.testing import *
import numpy.ma as ma
from numpy.ma import MaskedArray, masked, nomask
from numpy.ma.testutils import *
import scikits.timeseries as ts
from scikits.timeseries import \
TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, \
tseries, Date, date_array, now, time_series, \
adjust_endpoints, align_series, align_with, \
concatenate, fill_missing_dates, find_duplicated_dates, \
remove_duplicated_dates, split, stack
get_varshape = tseries.get_varshape
_timeseriescompat_multiple = tseries._timeseriescompat_multiple
#------------------------------------------------------------------------------
class TestCreation(TestCase):
"Base test class for MaskedArrays."
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
dlist = ['2007-01-%02i' % i for i in range(1, 16)]
dates = date_array(dlist, freq='D')
data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3)
self.d = (dlist, dates, data)
def test_fromlist (self):
"Test the creation of a TimeSeries w/ a list of dates as input dates."
(dlist, dates, data) = self.d
series = time_series(data, dlist, freq='D')
self.failUnless(isinstance(series, TimeSeries))
assert_equal(series.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(series.series, data)
assert_equal(series.dates, dates)
assert_equal(series.freqstr, 'D')
def test_fromrange (self):
"Test the creation of a TimeSeries w/ a starting date."
(dlist, dates, data) = self.d
series = time_series(data, start_date=dates[0])
self.failUnless(isinstance(series, TimeSeries))
assert_equal(series.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(series.series, data)
assert_equal(series.dates, dates)
assert_equal(series.freqstr, 'D')
def test_fromseries (self):
"Test the creation of a TimeSeries w/ a time series as input data."
(dlist, dates, data) = self.d
series = time_series(data, dlist, freq='D')
dates = dates + 15
series = time_series(series, dates)
self.failUnless(isinstance(series, TimeSeries))
assert_equal(series.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(series.series, data)
assert_equal(series.dates, dates)
assert_equal(series.freqstr, 'D')
def test_fromdatearray(self):
"Tests the creation of a series with a DateArray as input data."
(_, dates, _) = self.d
data = dates
#
series = time_series(data, dates)
self.failUnless(isinstance(series, TimeSeries))
assert_equal(series.dates, dates)
assert_equal(series.data, data)
assert_equal(series.freqstr, 'D')
#
series[5] = masked
# ensure that series can be represented by a string after masking a value
# (there was a bug before that prevented this from working when using a
# DateArray for the data)
strrep = str(series)
def test_datafromlist(self):
"Test the creation of a series w/ a list as input data."
(_, dates, _) = self.d
data = list(range(15))
series = time_series(data, dates)
assert_equal(series._data.size, 15)
def test_unsorted(self):
"Tests that the data are properly sorted along the dates."
dlist = ['2007-01-%02i' % i for i in (3, 2, 1)]
data = [10, 20, 30]
series = time_series(data, dlist, freq='D')
assert_equal(series.data, [30, 20, 10])
#
dates = date_array(dlist, freq='D')
series = TimeSeries(data, dates)
assert_equal(series.data, [30, 20, 10])
#
series = time_series(data, dlist, freq='D', mask=[1, 0, 0])
assert_equal(series.mask, [0, 0, 1])
#
data = ma.array([10, 20, 30], mask=[1, 0, 0])
series = time_series(data, dlist, freq='D')
assert_equal(series._mask, [0, 0, 1])
def test_unsorted_w_datearray(self):
"Tests that the data are properly sorted along the dates."
dlist = ['2007-01-%02i' % i for i in (3, 2, 1)]
data = [10, 20, 30]
dates = date_array(dlist, freq='D')
self.failUnless(dates._unsorted is not None)
#
series = time_series(data, dates=dates)
assert_equal(series.data, [30, 20, 10])
self.failUnless(dates._unsorted is not None)
self.failUnless(series.dates._unsorted is None)
#
series = time_series(data, dates=dates)
assert_equal(series.data, [30, 20, 10])
self.failUnless(series.dates._unsorted is None)
def test_setdates(self):
"Tests setting the dates of a series."
(dlist, dates, data) = self.d
reference = time_series(data, dates=dates)
# Set with a DateArray: that should work
test_series = data.view(TimeSeries)
test_series.dates = dates
assert_equal(test_series.dates, reference.dates)
def test_setdates_asndarray(self):
"Tests setting the dates as a ndarray."
(dlist, dates, data) = self.d
test_series = data.view(TimeSeries)
# Set with a ndarray: that shouldn't work
test_dates = np.array(dates, copy=False, subok=False)
try:
test_series._dates = test_dates
except TypeError:
pass
else:
err_msg = "Dates shouldn't be set as basic ndarrays."
raise TimeSeriesError(err_msg)
def test_setdates_asdate(self):
"Tests setting the dates as a Date"
(dlist, dates, data) = self.d
series = data.view(TimeSeries)
try:
series.dates = ts.now('D')
except TypeError:
pass
else:
err_msg = "Dates shouldn't be set as a Date objects."
raise TimeSeriesError(err_msg)
def test_setdates_with_incompatible_size(self):
"Tests setting the dates w/ a DateArray of incompatible size"
(dlist, dates, data) = self.d
series = data.view(TimeSeries)
try:
series.dates = dates[:len(dates) // 2]
except ts.TimeSeriesCompatibilityError:
pass
else:
err_msg = "Dates size should match the input."
raise TimeSeriesError(err_msg)
def test_setdates_with_autoreshape(self):
"Tests the automatic reshaping of dates."
(dlist, dates, data) = self.d
reference = time_series(data, dates=dates)
test_series = data.view(TimeSeries)
# Set with a datearray w/ a different size than expected: should fail
test_dates = dates[:-1]
try:
test_series.dates = test_dates
except TimeSeriesCompatibilityError:
pass
else:
err_msg = "Dates should have a size compatible with data"
raise TimeSeriesError(err_msg)
# Set w/ a date of a different shape: should work, but the shape changes
test_dates = dates.reshape(-1, 1)
test_series._dates = test_dates
assert_equal(test_series.dates, reference.dates)
assert_equal(test_series.dates.shape, test_series.shape)
test_dates = np.array(dates, copy=False, subok=True, ndmin=2)
test_series._dates = test_dates
assert_equal(test_series.dates, reference.dates)
assert_equal(test_series.dates.shape, test_series.shape)
def test_setdates_unsorted_basic(self):
"Test automatic sorting when setting dates - 1D case."
dates = date_array([ts.Date('D',
'2001-01-%02i' % _) for _ in (4, 3, 2, 1)])
a = np.array((4, 3, 2, 1), dtype=float)
series = a.view(ts.TimeSeries)
assert_equal(series.dates, [])
assert_equal(series, (4, 3, 2, 1))
#
series._dates = dates
series.sort_chronologically()
assert_equal(series, (1, 2, 3, 4))
def test_setdates_unsorted_reshaped(self):
"Test automatic sorting when setting dates - 1D case reshaped to nD."
dates = date_array([ts.Date('D',
'2001-01-%02i' % _) for _ in (4, 3, 2, 1)])
a = np.array([[4., 3.], [2., 1.]], dtype=float)
series = a.view(TimeSeries)
series._dates = dates
series.sort_chronologically()
assert_equal(series, [[1., 2.], [3., 4.]])
def test_setdates_unsorted_2D(self):
"Test automatic sorting when setting dates - 1D case reshaped to nD."
dates = date_array([ts.Date('D',
'2001-01-%02i' % _) for _ in (4, 3, 2, 1)])
a = np.arange(12).reshape(4, 3)
series = a.view(TimeSeries)
series._dates = dates
series.sort_chronologically()
assert_equal(series, [[ 9., 10., 11.],
[ 6., 7., 8.],
[ 3., 4., 5.],
[ 0., 1., 2.]])
def test_copy(self):
"Tests the creation of a timeseries with copy=True"
dlist = ['2007-01-%02i' % i for i in range(1, 16)]
dates = date_array(dlist, freq='D')
data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3)
series = time_series(data, dates)
assert_equal(series.dates.ctypes.data, dates.ctypes.data)
assert_equal(series.data.ctypes.data, data.data.ctypes.data)
assert_equal(series.mask.ctypes.data, data.mask.ctypes.data)
#
series = time_series(data, dates, copy=True)
assert_not_equal(series.dates.ctypes.data, dates.ctypes.data)
assert_not_equal(series.data.ctypes.data, data.data.ctypes.data)
assert_not_equal(series.mask.ctypes.data, data.mask.ctypes.data)
def test_using_length(self):
"Test using the `length` parameter of time_series."
start = ts.Date('M', '1955-01')
data = np.random.uniform(0, 1, 50 * 12).reshape(50, 12)
# Default : the dates should be (50,)
series = ts.time_series(data, start_date=start)
assert_equal(series.shape, (50, 12))
assert_equal(series.dates.shape, (50,))
assert_equal(series.varshape, (12,))
# Forcing dates to be 2D
series = ts.time_series(data, start_date=start, length=600)
assert_equal(series.shape, (50, 12))
assert_equal(series.dates.shape, (50, 12))
assert_equal(series.varshape, ())
# Forcing dates to 1D
series = ts.time_series(data, start_date=start, length=50)
assert_equal(series.shape, (50, 12))
assert_equal(series.dates.shape, (50,))
assert_equal(series.varshape, (12,))
# Make sure we raise an exception if something goes wrong....
try:
series = ts.time_series(data, start_date=start, length=100)
except ts.TimeSeriesCompatibilityError:
pass
else:
errmsg = "The should not be dates/data compatibility in this case."
raise TimeSeriesCompatibilityError(errmsg)
def test_varshape(self):
"Test some corner case of varshape"
test = ts.time_series(np.ones((10, 2)), start_date=ts.now('d'))
assert_equal(test.varshape, (2,))
#
test = ts.time_series(np.ones((10, 1)), start_date=ts.now('d'))
assert_equal(test.varshape, (1,))
#
test = ts.time_series(np.ones((10,)), start_date=ts.now('d'))
assert_equal(test.varshape, ())
#------------------------------------------------------------------------------
class TestArithmetics(TestCase):
"Some basic arithmetic tests"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
dlist = ['2007-01-%02i' % i for i in range(1, 16)]
dates = date_array(dlist, freq='D')
data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3)
self.d = (time_series(data, dlist, freq='D'), data)
def test_intfloat(self):
"Test arithmetic timeseries/integers"
(series, data) = self.d
#
nseries = series + 1
self.failUnless(isinstance(nseries, TimeSeries))
assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(nseries.series, data + 1)
assert_equal(nseries.dates, series.dates)
#
nseries = series - 1
self.failUnless(isinstance(nseries, TimeSeries))
assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(nseries.series, data - 1)
assert_equal(nseries.dates, series.dates)
#
nseries = series * 1
self.failUnless(isinstance(nseries, TimeSeries))
assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(nseries.series, data * 1)
assert_equal(nseries.dates, series.dates)
#
nseries = series / 1.
self.failUnless(isinstance(nseries, TimeSeries))
assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3)
assert_equal(nseries.series, data / 1.)
assert_equal(nseries.dates, series.dates)
def test_intfloat_inplace(self):
"Test int/float arithmetics in place."
(series, data) = self.d
nseries = | |
Image.open(_fp)
_rgb_im = _img.convert('RGB')
_rgb_im.save(os.path.join(_path, _file_no_ext + '.jpg'))
_fp.close()
# 删除原文件,修改xml中的文件名
FileTool.remove_file(_img_file)
_xml_file = os.path.join(_path, _file_no_ext + '.xml')
if os.path.exists(_xml_file):
_tree = ET.parse(os.path.join(_path, _xml_file))
_root = _tree.getroot()
_root.find('filename').text = _file_no_ext + '.jpg'
_root.find('path').text = os.path.join(_path, _file_no_ext + '.jpg')
_tree.write(
os.path.join(_path, _xml_file),
encoding='utf-8', method="xml",
xml_declaration=None
)
# 修改文件名变量
_img_file = os.path.join(_path, _file_no_ext + '.jpg')
# 打开图片判断位深
_fp = open(_img_file, 'rb')
_img = Image.open(_fp)
if _img.mode != 'RGB':
# 需要删除掉
_fp.close()
_xml_file = os.path.join(_path, FileTool.get_file_name_no_ext(_file) + '.xml')
print('delete %s' % _img_file)
FileTool.remove_file(_img_file)
if os.path.exists(_xml_file):
FileTool.remove_file(_xml_file)
else:
_fp.close()
# 检查xml文件
_xml_file = os.path.join(_path, _file_no_ext + '.xml')
if os.path.exists(_xml_file):
_tree = ET.parse(os.path.join(_path, _xml_file))
_root = _tree.getroot()
if _root.find('filename').text != _file_no_ext + '.jpg' or os.path.split(
_root.find('path').text
)[0] != _path:
_root.find('filename').text = _file_no_ext + '.jpg'
_root.find('path').text = os.path.join(_path, _file_no_ext + '.jpg')
_tree.write(
os.path.join(_path, _xml_file),
encoding='utf-8', method="xml",
xml_declaration=None
)
@classmethod
def labelimg_crop_pic_by_flags(cls, path: str, dest_path: str, copy_no_flag_pic: bool = True,
with_sub_dir: bool = True, fix_len: int = 10):
"""
根据标注进行图片截图处理
@param {str} path - 需要处理的目录
@param {str} dest_path - 截图图片存放目录
@param {bool} copy_no_flag_pic=True - 直接复制没有标注的图片
@param {bool} with_sub_dir=True - 是否按原目录结构存储图片
@param {int} fix_len=10 - 图片重命名的文件名长度
@returns {iter_list} - 通过yield返回的处理进度信息清单
[总文件数int, 当前已处理文件数int, 是否成功]
"""
try:
# 获取所有要处理的图片清单
_file_list = cls._get_pic_file_list(path)
_total = len(_file_list)
_deal_num = 0
# 先返回进度情况
if _total == 0:
yield [_deal_num, _total, True]
return
# 创建复制文件夹
FileTool.create_dir(dest_path, exist_ok=True)
# 遍历处理
_rename_index = 1
_src_path = os.path.realpath(path)
_dest_path = os.path.realpath(dest_path)
for _file in _file_list:
# 当前进展
yield [_deal_num, _total, True]
# 路径处理
_file_path, _filename = os.path.split(_file)
if with_sub_dir:
# 创建子目录
_dest_path = os.path.join(
os.path.realpath(dest_path),
os.path.realpath(_file_path)[len(_src_path):].strip('/\\')
)
FileTool.create_dir(_dest_path, exist_ok=True)
# 获取标注文件
_ext = FileTool.get_file_ext(_filename)
_xml_file = os.path.join(
_file_path,
_filename[0: -len(_ext)] + 'xml'
)
if not os.path.exists(_xml_file):
# 标注文件不存在
if copy_no_flag_pic:
# 直接复制文件
shutil.copy(
_file, os.path.join(
_dest_path,
StringTool.fill_fix_string(
str(_rename_index), fix_len, '0') + '.' + _ext
)
)
_rename_index += 1
# 下一个
_deal_num += 1
continue
# 将图片放入内存
with open(_file, 'rb') as _fid:
_file_bytes = _fid.read()
_image = Image.open(BytesIO(_file_bytes))
# 处理标注
_tree = ET.parse(_xml_file)
_root = _tree.getroot()
for _member in _root.findall('object'):
# 逐个标注进行处理
_crop_image = _image.crop((
int(_member[4][0].text),
int(_member[4][1].text),
int(_member[4][2].text),
int(_member[4][3].text)
))
_crop_image.save(
os.path.join(
_dest_path,
StringTool.fill_fix_string(
str(_rename_index), fix_len, '0') + '.' + _ext
),
format='JPEG'
)
_rename_index += 1
# 下一个
_deal_num += 1
# 返回结果
yield [_total, _total, True]
except:
print('labelimg_crop_pic_by_flags error: %s\r\n%s' % (path, traceback.format_exc()))
yield [-1, -1, False]
#############################
# 内部函数
#############################
@classmethod
def _get_keys_by_value(cls, d: dict, value):
"""
根据字典的值获取key
@param {dict} d - 字典
@param {str} value - 值
"""
for _key in d.keys():
if d[_key] == value:
return _key
# 找不到
return None
@classmethod
def _get_labelimg_annotation_file_list(cls, input_path: str) -> list:
"""
获取要处理的LabelImg标注文件清单
@param {str} input_path - 起始目录
@returns {list} - 返回文件清单
"""
_list = []
# 先获取当前目录下的所有xml文件
for _file in FileTool.get_filelist(input_path, regex_str=r'.*\.xml$'):
_pic_file = _file[0:-3] + 'jpg'
if os.path.exists(_pic_file):
_list.append(_file)
# 获取子目录
for _dir in FileTool.get_dirlist(input_path):
_temp_list = cls._get_labelimg_annotation_file_list(_dir)
_list.extend(_temp_list)
return _list
@classmethod
def _create_labelimg_tf_example(cls, annotation_file: str, class_to_int_fun=None,
use_mapping: bool = False,
copy_img_path: str = None,
mapping: dict = None,
flags_count: dict = {}) -> tf.train.Example:
"""
生成指定标注的Example对象
@param {str} annotation_file - 标注xml文件
@param {function} class_to_int_fun=None - 将分类名转换为int的函数
如果传None代表类名为数字,可以直接将类名转换为数字
@param {bool} use_mapping=False - 是否使用mapping.json数据处理转换
@param {str} copy_img_path=None - 如果传值了则复制对应的图片到对应目录
@param {dict} mapping=None - mapping.json字典
@param {dict} flags_count={} - 标签统计信息
@returns {tf.train.Example} - Example对象
"""
# 获取未知类对应的int值
_other_class_int = -1
for _key in mapping['class'].keys():
if mapping['class'][_key] == 'other':
_other_class_int = mapping['class_int'][_key]
break
# 获取标注文件信息
_tree = ET.parse(annotation_file)
_root = _tree.getroot()
_annotations = dict()
_annotations['filename'] = _root.find('filename').text
_annotations['file_path'] = os.path.join(
os.path.split(annotation_file)[0], _annotations['filename']
)
_annotations['width'] = int(_root.find('size')[0].text)
_annotations['height'] = int(_root.find('size')[1].text)
# 图片文件二进制处理
with tf.io.gfile.GFile(_annotations['file_path'], 'rb') as fid:
_encoded_jpg = fid.read()
_encoded_jpg_io = io.BytesIO(_encoded_jpg)
_image = Image.open(_encoded_jpg_io)
_width, _height = _image.size
# 处理信息要素
_filename = _annotations['filename'].encode('utf8')
_image_format = b'jpg'
_xmins = []
_xmaxs = []
_ymins = []
_ymaxs = []
_classes_text = []
_classes = []
# 获取信息字典
_info_dict = ExtendLib.get_info_dict(
_annotations['file_path'], mapping['info_key_dict'])
# 遍历字典信息获取要处理的标注
_tag_list = list()
for _member in _root.findall('object'):
_member_class = _member[0].text
_class_int = 0
if use_mapping:
# 使用mapping.json类型转换
if _member_class == mapping['set_by_info']['class_name']:
# 需要获取真实的信息
if mapping['set_by_info']['info_tag'] in _info_dict.keys():
_member_class = _info_dict[mapping['set_by_info']['info_tag']]
if _member_class in mapping['class_int'].keys():
_class_int = mapping['class_int'][_member_class]
_member_class = mapping['class'][_member_class]
else:
# 不在处理清单的标签
if mapping['unknow_to_other'] and _other_class_int != -1:
# 将类型转为未知
_member_class = 'other'
_class_int = _other_class_int
else:
# 不进行处理
if _member_class in flags_count.keys():
flags_count[_member_class] -= 1
else:
flags_count[_member_class] = -1
continue
else:
if class_to_int_fun is None:
_class_int = int(_member_class)
else:
_class_int = class_to_int_fun(_member_class)
_tag_info = {
'class': _member_class,
'class_int': _class_int,
'xmin': int(_member[4][0].text),
'ymin': int(_member[4][1].text),
'xmax': int(_member[4][2].text),
'ymax': int(_member[4][3].text)
}
_tag_info['size'] = (_tag_info['xmax'] - _tag_info['xmin']) * \
(_tag_info['ymax'] - _tag_info['ymin'])
_tag_list.append(_tag_info)
# 按标注的size反向排序
_tag_list.sort(key=lambda x: x['size'], reverse=True)
# 从后往前遍历看是否要删除标注
_end_index = len(_tag_list) - 1 # 从后往前的遍历索引
while _end_index > 0:
_start_index = 0 # 从前往后的遍历索引
while _start_index < _end_index:
_large = _tag_list[_start_index] # 大面积标注
_small = _tag_list[_end_index] # 小面积标注
if _large['class'] not in mapping['ignore_inner'].keys():
# 外部标注无需忽略内部标注
_start_index += 1
continue
if mapping['ignore_inner'][_large['class']] == 'other' and _small['class'] != 'other':
# 内部标注不是other标注
_start_index += 1
continue
if _large['xmin'] <= _small['xmin'] and _large['xmax'] >= _small['xmax'] and _large['ymin'] <= _small['ymin'] and _large['ymax'] >= _small['ymax']:
if _large['size'] != _small['size']:
# 确保两个框不是完全一样, 是包含关系,前面已排除不能忽略的情况,直接删除
_tag_list.pop(_end_index)
break
# 从上往下找下一个
_start_index += 1
# 从下网上继续进行判断
_end_index -= 1
# 留下来的逐个标签处理
for _tag in _tag_list:
_xmins.append(_tag['xmin'] / _width)
_xmaxs.append(_tag['xmax'] / _width)
_ymins.append(_tag['ymin'] / _height)
_ymaxs.append(_tag['ymax'] / _height)
_classes_text.append(_tag['class'].encode('utf8'))
_classes.append(_tag['class_int'])
if _tag['class'] in flags_count.keys():
flags_count[_tag['class']] += 1
else:
flags_count[_tag['class']] = 1
if len(_classes_text) == 0:
# 没有找到适用的内容,返回None
return None
else:
# 复制文件
# print(_annotations['file_path'])
if copy_img_path is not None:
shutil.copyfile(
annotation_file,
os.path.join(copy_img_path, os.path.split(annotation_file)[1])
)
shutil.copyfile(
_annotations['file_path'],
os.path.join(copy_img_path, _annotations['filename'])
)
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(_height),
'image/width': dataset_util.int64_feature(_width),
'image/filename': dataset_util.bytes_feature(_filename),
'image/source_id': dataset_util.bytes_feature(_filename),
'image/encoded': dataset_util.bytes_feature(_encoded_jpg),
'image/format': dataset_util.bytes_feature(_image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(_xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(_xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(_ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(_ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(_classes_text),
'image/object/class/label': dataset_util.int64_list_feature(_classes),
}))
return tf_example
@classmethod
def _get_pic_file_list(cls, input_path: str) -> list:
"""
获取制定目录下的所有图片文件清单
@param {str} input_path - 要处理的目录
@returns {list} - 文件清单列表
"""
_list = []
# 先获取当前目录下的所有xml文件
for _file in FileTool.get_filelist(input_path, is_fullname=True):
_ext = FileTool.get_file_ext(_file)
if _ext.lower() in ('jpg', 'jpeg'):
_list.append(_file)
# 获取子目录
for _dir in FileTool.get_dirlist(input_path):
_temp_list = cls._get_pic_file_list(_dir)
_list.extend(_temp_list)
return _list
class TFObjectDetect(object):
"""
物体识别处理类
"""
def __init__(self, auto_label: dict, mapping: dict, base_path: str):
"""
物体识别构造函数
@param {dict} auto_label - 自动标注参数配置
@param {dict} mapping - mapping.json字典
@param {str} base_path - 程序启动的路径
"""
self.auto_label = auto_label
self.mapping = mapping
self.graphs = list()
for _key in self.auto_label.keys():
if not self.auto_label[_key]['enable']:
# 没有启动
continue
# 装载物理识别模型
_graph = {
'key': _key,
}
_pb_file = os.path.join(base_path, self.auto_label[_key]['frozen_graph'])
_detection_graph = tf.Graph()
with _detection_graph.as_default():
_od_graph_def = tf.GraphDef()
with tf.gfile.GFile(_pb_file, 'rb') as _fid:
_serialized_graph = _fid.read()
_od_graph_def.ParseFromString(_serialized_graph)
tf.import_graph_def(_od_graph_def, name='')
_graph['session'] = tf.Session(graph=_detection_graph)
# Input tensor is the image
_graph['image_tensor'] = _detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
_graph['detection_boxes'] = _detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
_graph['detection_scores'] = _detection_graph.get_tensor_by_name('detection_scores:0')
_graph['detection_classes'] = _detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
_graph['num_detections'] = _detection_graph.get_tensor_by_name('num_detections:0')
# 添加到模型列表中
self.graphs.append(_graph)
def detect_object(self, image_file: str, shapes: list):
"""
对指定图片进行物体识别
@param {str} image_file - 要识别的图片
@param {list} shapes - 已有的形状
@returns {list} - 返回匹配上的shape列表
"""
_object_list = list()
if len(self.graphs) == 0:
return _object_list
_image = Image.open(image_file)
_image_np = self._load_image_into_numpy_array(_image)
_image_np_expanded = np.expand_dims(_image_np, axis=0)
for _graph in self.graphs:
# 遍历每个识别模型图执行处理
# Perform the actual detection by running the model with the image as input
(_boxes, _scores, _classes, _num) = _graph['session'].run(
[_graph['detection_boxes'], _graph['detection_scores'],
_graph['detection_classes'], _graph['num_detections']],
feed_dict={_graph['image_tensor']: _image_np_expanded})
_np_scores = np.squeeze(_scores)
_np_boxes = np.squeeze(_boxes)
_np_classes = np.squeeze(_classes)
_index = 0
_min_distance = self.auto_label[_graph['key']]['min_distance']
_min_score = self.auto_label[_graph['key']]['min_score']
_class_int = self.mapping[_graph['key']]['class_int']
_x_min_distance = int(_image.size[0] * _min_distance)
_y_min_distance = int(_image.size[1] * _min_distance)
for _score in _np_scores:
if _score >= _min_score:
# 折算为像素的框
_ymin = int(_np_boxes[_index][0] * _image.size[1])
_xmin = int(_np_boxes[_index][1] * _image.size[0])
_ymax = int(_np_boxes[_index][2] * _image.size[1])
_xmax = int(_np_boxes[_index][3] * _image.size[0])
_points = [(_xmin, _ymin), (_xmax, _ymin), (_xmax, _ymax), (_xmin, _ymax)]
# 标签
_label = TFRecordCreater._get_keys_by_value(
_class_int, | |
""" pylabnet measurement and service classes for Swabian Instruments TimeTagger
which implements qudi's SlowCounter interface.
This file contains pylabnet wrapper and service classes to allow qudi to
access Swabian Instruments TT through pylabnet network as SlowCounter.
Steps:
- instantiate TimeTagger
- instantiate pylabnet-SlowCtrWrap (pass ref to TimeTagger as tagger)
- instantiate pylabnet-SlowCtrService and assign module to the created wrapper
- start pylabnet-server for SlowCtrService
- in qudi, instantiate SlowCtrClient as one of the hardware modules
"""
from pylabnet.network.core.service_base import ServiceBase
import TimeTagger as TT
import time
import copy
import pickle
class Wrap:
""" Measurement instance which implements qudi's SlowCounter interface.
"""
def __init__(self, tagger, channel_list, clock_frequency, buffer_size):
# References to the device and to TT.Counter measurement
self._tagger = tagger
self._counter = None
# Counter parameters
self._channel_list = channel_list
self._clock_frequency = clock_frequency
self._buffer_size = buffer_size
self._bin_width = 0
self._bin_width_sec = 0
def set_up_clock(self, clock_frequency=None, clock_channel=None):
"""
Sets sample clock frequency for the Counter measurement.
:param clock_frequency: (float) sample clock frequency. If not given,
configuration value is used
:param clock_channel: ignored (internal timebase is used to generate
sample clock signal)
:return: (int) operation status code: 0 - OK
-1 - Error
"""
# Use config value, if no clock_frequency is specified
if clock_frequency is None:
clock_frequency = self._clock_frequency
# Calculate final bin width
bin_width = int(1e12 / clock_frequency) # in picoseconds, for device
bin_width_sec = bin_width * 1e-12 # is seconds, for software timing
# Set new values param to internal variables
self._bin_width = bin_width
self._bin_width_sec = bin_width_sec
return 0
def set_up_counter(self,
counter_channels=None,
sources=None,
clock_channel=None,
counter_buffer=None):
"""
Configures the actual counter with a given clock.
(list of int) [optional] list of channels
to count clicks on. If not given, config value is used.
:param counter_buffer: (int) [optional] size of the memory buffer.
If not given, config value is used.
:param counter_channels: ignored
This argument should not be used. Counter GUI initializes set of plot curves
self.curves during its on_activate() method. It basically calls
counter_hardware.get_counter_channels() and uses this list to init self.curves
Only after that user can click "Start" button, which will call set_up_counter().
And since GUI already has inited set of curves, set of channels must not be
modified here! It will case GUI to fail.
:param sources: ignored
:param clock_channel: ignored
:return: (int) operation status code: 0 - OK
-1 - Error
"""
# Set counter channels
if counter_channels is not None:
channel_list = counter_channels
else:
channel_list = self._channel_list
# apply counter channel change
self.set_counter_channels(channel_list=channel_list)
# Set buffer size
if counter_buffer is not None:
buffer_size = counter_buffer
else:
buffer_size = self._buffer_size
# sanity check:
if not isinstance(buffer_size, int) or buffer_size <= 0:
# self.log.error('set_up_counter(): invalid parameter value counter_buffer = {}.'
# 'This parameter must be a positive integer.'
# ''.format(buffer_size))
return -1
# apply buffer size change
self._buffer_size = buffer_size
# Create instance of Counter measurement
try:
self._counter = TT.Counter(
tagger=self._tagger,
channels=self._channel_list,
binwidth=self._bin_width,
n_values=self._buffer_size
)
# handle initialization error (TT functions always produce NotImplementedError)
except NotImplementedError:
self._counter = None
# self.log.error('set_up_counter(): failed to instantiate TT.Counter measurement')
return -1
# Start Counter
# (TT.Counter measurement starts running immediately after instantiation,
# so it is necessary to erase all counts collected since instantiation)
self._counter.stop()
self._counter.clear()
self._counter.start()
return 0
def close_clock(self):
"""
Closes the clock.
:return: (int) error code: 0 - OK
-1 - Error
"""
# self._bin_width = 0
# self._bin_width_sec = 0
return 0
def close_counter(self):
"""
Closes the counter and cleans up afterwards.
:return: (int) error code: 0 - OK
-1 - Error
"""
# Try stopping and clearing TT.Counter measurement
try:
self._counter.stop()
self._counter.clear()
# Handle the case of exception in TT function call (NotImplementedError)
# and the case of self._ctr = None (AttributeError)
except (NotImplementedError, AttributeError):
pass
# Remove reference to the counter
# self._ctr = None
# Clear counter parameters
# self._buffer_size = []
# Do not clear channel list:
# Counter GUI inits its list of curves self.curves
# by calling counter_hardware.get_counter_channels() before
# calling counter_hardware.set_up_counter()
# If one clears _channel_list here, GUI will fail at the next
# "Start" button click after reloading.
#
# self._channel_list = []
return 0
def get_counter(self, samples=1):
"""
Returns the current counts per second of the counter.
:param samples: (int) [optional] number of samples to read in one go
(default is one sample)
:return: numpy.array((samples, uint32), dtype=np.uint32)
array of count rate [counts/second] arrays of length samples for each click channel
Empty array [] is returned in the case of error.
"""
# Sanity check: samples has valid value
if samples != 1:
if not isinstance(samples, int) or samples <= 0:
# self.log.error('get_counter(): invalid argument samples={0}. This argument must be a positive integer'
# ''.format(samples))
return []
# MORE SOPHISTICATED VERSION
# (WORKS TOO SLOWLY: PROBABLY BECAUSE OF SLOW INTEGER DIVISION OF LARGE INTEGERS)
#
# start_time = time.time()
# while time.time() - start_time < self._timeout:
# new_complete_bins = self._ctr.getCaptureDuration() // self._bin_width - self._last_read_bin
#
# self._overflow = new_complete_bins
# # self.log.error('new_complete_bins = {}'.format(new_complete_bins))
#
# if new_complete_bins < samples:
# time.sleep(self._bin_width_sec/2)
# continue
# elif new_complete_bins == samples:
# self._last_read_bin += new_complete_bins
# break
# else:
# # self.log.warn('Counter is overflowing. \n'
# # 'Software pulls data in too slowly and counter bins are too short, '
# # 'such that some bins are lost. \n'
# # 'Try reducing sampling rate or increasing oversampling')
# self._last_read_bin += new_complete_bins
# break
# Wait for specified number of samples (samples parameter) to be accumulated
#
# This approach is very naive and is more or less accurate for
# clock frequency below 50 Hz.
#
# For higher frequencies, the actual time sampling interval is determined
# by software delays (about 1 ms). Counter measurement overflows
# (most of the samples are over-written before software reads them in)
# but does not fail. The only problem here is that time axis on the count-trace
# graph is no longer accurate:
# the difference between consecutive tick labels is much smaller than the actual
# time interval between measured samples (about 1 ms)
time.sleep(samples * self._bin_width_sec)
# read-in most recent 'samples' samples
try:
count_array = self._counter.getData()[:, -samples:]
except NotImplementedError:
# self.log.error('get_counter() reading operation failed')
return []
except AttributeError:
# self.log.error('get_counter(): counter was not initialized')
return []
# Calculate count rate [count/sec]
count_rate_array = count_array / self._bin_width_sec
return count_rate_array
def get_counter_channels(self):
"""
Returns the list of click channel numbers.
:return: (list of int) list of click channel numbers
"""
return copy.deepcopy(self._channel_list)
def set_counter_channels(self, channel_list=None):
"""
Set click channel list.
Notice that this method only modifies internal variable _channel_list.
To apply the change to the counter, one has to call set_up_counter() again.
:param channel_list: (list of int) list of channels to count clicks on
:return: (list of int) actual list of click channels
"""
if channel_list is None:
return self.get_counter_channels()
# Sanity check:
all_channels = self._get_all_channels()
if not set(channel_list).issubset(set(all_channels)):
# self.log.error('set_counter_channels(): requested list of channels is invalid: '
# 'some channels are not present on the device.'
# 'requested list: {0} \n'
# 'available channels: {1}'
# ''.format(channel_list, all_channels))
return self.get_counter_channels()
# Apply changes to internal variable self._channel_list
self._channel_list = channel_list
# Sort channel numbers, such that channel order does not depend
# on order of numbers in the config file
self._channel_list.sort()
return self.get_counter_channels()
def _get_all_channels(self):
"""
Return list of all channels available on the device.
Positive/negative values correspond to rising/falling edge detection.
For example:
1 means 'rising edge on connector 1'
-1 means 'falling edge on connector 1
:return: (list of int) list of all available channel numbers,
including edge sign.
"""
try:
available_channel_tuple = list(
self._tagger.getChannelList(TT.TT_CHANNEL_RISING_AND_FALLING_EDGES)
)
# handle exception in the call (TT functions normally produce NotImplementedError)
except NotImplementedError:
# self.log.error('_get_all_channels(): communication with the device failed')
return []
# handle the case of self._tagger = None
except AttributeError:
# self.log.error('_get_all_channels(): _tagger is None. Initialize device first')
return []
return list(available_channel_tuple)
class Service(ServiceBase):
def exposed_set_up_clock(self, clock_frequency=None, clock_channel=None):
"""
Sets sample | |
%s != %s' % (ds.GetGeoTransform(), gt))
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_60.tif')
assert not os.path.exists(options_tuple[1])
###############################################################################
# Test BigTIFF=IF_NEEDED creation option
def test_tiff_write_61():
ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 50000, 50000, 1,
options=['BIGTIFF=IF_NEEDED', 'SPARSE_OK=TRUE'])
ds = None
ds = gdal.Open('tmp/bigtiff.tif')
assert ds is not None
ds = None
fileobj = open('tmp/bigtiff.tif', mode='rb')
binvalues = struct.unpack('B' * 4, fileobj.read(4))
fileobj.close()
gdaltest.tiff_drv.Delete('tmp/bigtiff.tif')
# Check classical TIFF signature
assert (not ((binvalues[2] != 0x2A or binvalues[3] != 0) and
(binvalues[3] != 0x2A or binvalues[2] != 0)))
###############################################################################
# Test BigTIFF=IF_SAFER creation option
def test_tiff_write_62():
ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 50000, 50000, 1,
options=['BIGTIFF=IF_SAFER', 'SPARSE_OK=TRUE'])
ds = None
ds = gdal.Open('tmp/bigtiff.tif')
assert ds is not None
ds = None
fileobj = open('tmp/bigtiff.tif', mode='rb')
binvalues = struct.unpack('B' * 4, fileobj.read(4))
fileobj.close()
gdaltest.tiff_drv.Delete('tmp/bigtiff.tif')
# Check BigTIFF signature
assert (not ((binvalues[2] != 0x2B or binvalues[3] != 0) and
(binvalues[3] != 0x2B or binvalues[2] != 0)))
###############################################################################
# Test BigTIFF=NO creation option when creating a BigTIFF file would be required
def test_tiff_write_63():
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 150000, 150000, 1,
options=['BIGTIFF=NO'])
gdal.PopErrorHandler()
if ds is None:
return
pytest.fail()
###############################################################################
# Test returned projection in WKT format for a WGS84 GeoTIFF (#2787)
def test_tiff_write_64():
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_64.tif', 1, 1, 1)
srs = osr.SpatialReference()
srs.SetFromUserInput('WGS84')
ds.SetProjection(srs.ExportToWkt())
ds = None
ds = gdal.Open('tmp/tiff_write_64.tif')
wkt = ds.GetProjection()
ds = None
expected_wkt = """GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]"""
assert wkt == expected_wkt, 'coordinate system does not exactly match.'
gdaltest.tiff_drv.Delete('tmp/tiff_write_64.tif')
###############################################################################
# Verify that we can write XML metadata.
def test_tiff_write_65():
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_65.tif', 10, 10)
doc = '<doc><test xml:attr="abc"/></doc>'
ds.SetMetadata([doc], 'xml:test')
ds = None
ds = gdal.Open('tmp/tiff_write_65.tif')
md = ds.GetMetadata('xml:test')
ds = None
assert len(md) == 1 and md[0] == doc, 'did not get xml back clean'
gdaltest.tiff_drv.Delete('tmp/tiff_write_65.tif')
###############################################################################
# Verify that we can write and read a band-interleaved GeoTIFF with 65535 bands (#2838)
def test_tiff_write_66():
if gdal.GetConfigOption('SKIP_MEM_INTENSIVE_TEST') is not None:
pytest.skip()
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_66.tif', 1, 1, 65535, options=['INTERLEAVE=BAND'])
ds = None
ds = gdal.Open('tmp/tiff_write_66.tif')
assert ds.RasterCount == 65535
assert ds.GetRasterBand(1).Checksum() == 0
assert ds.GetRasterBand(65535).Checksum() == 0
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_66.tif')
###############################################################################
# Verify that we can write and read a pixel-interleaved GeoTIFF with 65535 bands (#2838)
def test_tiff_write_67():
if gdal.GetConfigOption('SKIP_MEM_INTENSIVE_TEST') is not None:
pytest.skip()
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_67.tif', 1, 1, 65535, options=['INTERLEAVE=PIXEL'])
ds = None
ds = gdal.Open('tmp/tiff_write_67.tif')
assert ds.RasterCount == 65535
assert ds.GetRasterBand(1).Checksum() == 0
assert ds.GetRasterBand(65535).Checksum() == 0
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_67.tif')
###############################################################################
# Verify that we can set the color table after a Create() (scenario hit by map.tif in #2820)
def test_tiff_write_68():
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_68.tif', 151, 161, options=['COMPRESS=LZW'])
ct = gdal.ColorTable()
ct.SetColorEntry(0, (255, 255, 255, 255))
ct.SetColorEntry(1, (255, 255, 0, 255))
ct.SetColorEntry(2, (255, 0, 255, 255))
ct.SetColorEntry(3, (0, 255, 255, 255))
ds.GetRasterBand(1).SetRasterColorTable(ct)
ds.GetRasterBand(1).Fill(255)
ds = None
ds = gdal.Open('tmp/tiff_write_68.tif')
assert ds.GetRasterBand(1).Checksum() != 0
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_68.tif')
###############################################################################
# Verify GTiffRasterBand::NullBlock() when reading empty block without any nodata value set
def test_tiff_write_69():
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_69.tif', 32, 32, 1, gdal.GDT_Int16, options=['SPARSE_OK=YES'])
ds = None
ds = gdal.Open('tmp/tiff_write_69.tif')
assert ds.GetRasterBand(1).Checksum() == 0
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_69.tif')
###############################################################################
# Verify GTiffRasterBand::NullBlock() when reading empty block with nodata value set
def test_tiff_write_70():
ref_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_70_ref.tif', 32, 32, 1, gdal.GDT_Int16)
ref_ds.GetRasterBand(1).Fill(-32768)
ref_ds = None
ref_ds = gdal.Open('tmp/tiff_write_70_ref.tif')
expected_cs = ref_ds.GetRasterBand(1).Checksum()
ref_ds = None
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_70.tif', 32, 32, 1, gdal.GDT_Int16, options=['SPARSE_OK=YES'])
ds.GetRasterBand(1).SetNoDataValue(0)
assert os.stat('tmp/tiff_write_70.tif').st_size <= 8, \
'directory should not be crystallized'
ds = None
ds = gdal.Open('tmp/tiff_write_70.tif', gdal.GA_Update)
ds.GetRasterBand(1).SetNoDataValue(-32768)
ds = None
ds = gdal.Open('tmp/tiff_write_70.tif')
assert ds.GetRasterBand(1).Checksum() == expected_cs, 'wrong checksum'
ds = None
ds = gdal.Open('tmp/tiff_write_70.tif', gdal.GA_Update)
assert ds.GetRasterBand(1).DeleteNoDataValue() == 0
assert ds.GetRasterBand(1).GetNoDataValue() is None
ds = None
with pytest.raises(OSError):
os.stat('tmp/tiff_write_70.tif.aux.xml')
ds = gdal.Open('tmp/tiff_write_70.tif')
assert ds.GetRasterBand(1).GetNoDataValue() is None
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_70.tif')
gdaltest.tiff_drv.Delete('tmp/tiff_write_70_ref.tif')
###############################################################################
# Test reading in a real BigTIFF file (on filesystems supporting sparse files)
def test_tiff_write_71():
# Determine if the filesystem supports sparse files (we don't want to create a real 10 GB
# file !
if not gdaltest.filesystem_supports_sparse_files('tmp'):
pytest.skip()
header = open('data/bigtiff_header_extract.tif', 'rb').read()
f = open('tmp/tiff_write_71.tif', 'wb')
f.write(header)
# Write StripByteCounts tag
# 100,000 in little endian
for _ in range(100000):
f.write(b'\xa0\x86\x01\x00\x00\x00\x00\x00')
# Write StripOffsets tag
offset = 1600252
for _ in range(100000):
f.write(struct.pack('<Q', offset))
offset = offset + 100000
# Write 0x78 as value of pixel (99999, 99999)
f.seek(10001600252 - 1, 0)
f.write(b'\x78')
f.close()
ds = gdal.Open('tmp/tiff_write_71.tif')
data = ds.GetRasterBand(1).ReadRaster(99999, 99999, 1, 1)
assert struct.unpack('b', data)[0] == 0x78
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_71.tif')
###############################################################################
# With CreateCopy(), check that TIFF directory is in the first bytes of the file
# and has not been rewritten later (#3021)
def test_tiff_write_72():
shutil.copyfile('data/byte.tif', 'tmp/byte.tif')
ds = gdal.Open('tmp/byte.tif', gdal.GA_Update)
ds.SetMetadata({'TEST_KEY': 'TestValue'})
ds = None
for profile in ('GDALGeotiff', 'GEOTIFF', 'BASELINE'):
src_ds = gdal.Open('tmp/byte.tif')
out_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_72.tif', src_ds, options=['ENDIANNESS=LITTLE', 'PROFILE=' + profile])
del out_ds
src_ds = None
fileobj = open('tmp/tiff_write_72.tif', mode='rb')
fileobj.seek(4)
binvalues = struct.unpack('B' * 4, fileobj.read(4))
fileobj.close()
# Directory should be at offset 8 of the file
assert (binvalues[0] == 0x08 and binvalues[1] == 0x00 and binvalues[2] == 0x00 and binvalues[3] == 0x00), \
('Failed with profile %s' % profile)
gdaltest.tiff_drv.Delete('tmp/byte.tif')
gdaltest.tiff_drv.Delete('tmp/tiff_write_72.tif')
###############################################################################
# With Create(), check that TIFF directory is in the first bytes of the file
# and has not been rewritten later (#3021)
def test_tiff_write_73():
out_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_73.tif', 10, 10, options=['ENDIANNESS=LITTLE'])
out_ds.SetGeoTransform([1, 0.01, 0, 1, 0, -0.01])
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:32601')
out_ds.SetProjection(srs.ExportToWkt())
out_ds.SetMetadata({'TEST_KEY': 'TestValue'})
out_ds.BuildOverviews('NONE', [2])
out_ds.GetRasterBand(1).Fill(255)
out_ds = None
fileobj = open('tmp/tiff_write_73.tif', mode='rb')
fileobj.seek(4)
binvalues = struct.unpack('B' * 4, fileobj.read(4))
fileobj.close()
# Directory should be at offset 8 of the file
assert (binvalues[0] == 0x08 and binvalues[1] == 0x00 and binvalues[2] == 0x00 and binvalues[3] == 0x00)
# Re-open the file and modify the pixel content
out_ds = gdal.Open('tmp/tiff_write_73.tif', gdal.GA_Update)
out_ds.GetRasterBand(1).Fill(0)
out_ds = None
fileobj = open('tmp/tiff_write_73.tif', mode='rb')
fileobj.seek(4)
binvalues = struct.unpack('B' * 4, fileobj.read(4))
fileobj.close()
# Directory should be at offset 8 of the file
assert (binvalues[0] == 0x08 and binvalues[1] == 0x00 and binvalues[2] == 0x00 and binvalues[3] == 0x00)
gdaltest.tiff_drv.Delete('tmp/tiff_write_73.tif')
###############################################################################
# Verify we can write 12bit jpeg encoded tiff.
def test_tiff_write_74():
md = gdaltest.tiff_drv.GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
pytest.skip()
old_accum = gdal.GetConfigOption('CPL_ACCUM_ERROR_MSG', 'OFF')
gdal.SetConfigOption('CPL_ACCUM_ERROR_MSG', 'ON')
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
ds = gdal.Open('data/mandrilmini_12bitjpeg.tif')
ds.GetRasterBand(1).ReadRaster(0, 0, 1, 1)
except:
ds = None
gdal.PopErrorHandler()
gdal.SetConfigOption('CPL_ACCUM_ERROR_MSG', old_accum)
if gdal.GetLastErrorMsg().find(
'Unsupported JPEG data precision 12') != -1:
sys.stdout.write('(12bit jpeg not available) ... ')
pytest.skip()
for photometric in ('YCBCR', 'RGB'):
drv = gdal.GetDriverByName('GTiff')
dst_ds = drv.CreateCopy('tmp/test_74.tif', ds,
options=['COMPRESS=JPEG', 'NBITS=12',
'JPEG_QUALITY=95',
'PHOTOMETRIC=' + photometric])
dst_ds = None
dst_ds = gdal.Open('tmp/test_74.tif')
stats = dst_ds.GetRasterBand(1).GetStatistics(0, 1)
if stats[2] < 2150 or stats[2] > 2180:
print(photometric)
pytest.fail('did not get expected mean for band1.')
try:
compression = dst_ds.GetMetadataItem('COMPRESSION', 'IMAGE_STRUCTURE')
except:
md = dst_ds.GetMetadata('IMAGE_STRUCTURE')
compression = md['COMPRESSION']
if (photometric == 'YCBCR' and compression != 'YCbCr JPEG') or \
(photometric == 'RGB' and compression != 'JPEG'):
print(('COMPRESSION="%s"' % compression))
pytest.fail('did not get expected COMPRESSION value')
try:
nbits = dst_ds.GetRasterBand(3).GetMetadataItem('NBITS', 'IMAGE_STRUCTURE')
except:
md = dst_ds.GetRasterBand(3).GetMetadata('IMAGE_STRUCTURE')
nbits = md['NBITS']
if nbits != '12':
print(photometric)
pytest.fail('did not get expected NBITS value')
dst_ds = None
gdaltest.tiff_drv.Delete('tmp/test_74.tif')
###############################################################################
# Verify that FlushCache() alone doesn't cause crash (#3067 )
def test_tiff_write_75():
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_75.tif', 1, 1, 1)
ds.FlushCache()
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_75.tif')
###############################################################################
# Test generating a G4 band to use the TIFFWriteScanline()
def test_tiff_write_76():
src_ds = gdal.Open('data/slim_g4.tif')
compression = src_ds.GetMetadata('IMAGE_STRUCTURE')['COMPRESSION']
new_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_76.tif', src_ds, options=['BLOCKYSIZE=%d' % src_ds.RasterYSize, 'COMPRESS=' + compression])
new_ds = None
new_ds = gdal.Open('tmp/tiff_write_76.tif')
cs = new_ds.GetRasterBand(1).Checksum()
assert cs == 3322, 'Got wrong checksum'
src_ds = None
new_ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_76.tif')
###############################################################################
# Test generating & reading a 8bit all-in-one-strip multiband TIFF (#3904)
def test_tiff_write_77():
src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_77_src.tif', 1, 5000, 3)
src_ds.GetRasterBand(2).Fill(255)
for interleaving in ('PIXEL', 'BAND'):
new_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_77.tif', src_ds,
options=['BLOCKYSIZE=%d' % src_ds.RasterYSize,
'COMPRESS=LZW',
'INTERLEAVE=' + interleaving])
for attempt in range(2):
# Test reading a few samples to check that random reading works
band_lines = [(1, 0), (1, 5), (1, 3), (2, 10), (1, 100), (2, 1000), (2, 500),
(1, 500), (2, 500), (2, 4999), (2, 4999), (3, 4999), (1, 4999)]
for band_line in band_lines:
cs = new_ds.GetRasterBand(band_line[0]).Checksum(0, band_line[1], 1, 1)
if band_line[0] == 2:
expected_cs = 255 % 7
else:
expected_cs = 0 % 7
assert cs == expected_cs, 'Got wrong checksum'
# Test whole bands
for i in range(3):
cs = new_ds.GetRasterBand(i + 1).Checksum()
expected_cs = src_ds.GetRasterBand(i + 1).Checksum()
assert cs == | |
<filename>chrono_figure/host/usb2snes.py
# USB2SNES interface library and command line tool
# Copyright (c) 2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file's home is: "https://github.com/tpwrules/tasha_and_friends/
# blob/master/chrono_figure/host/usb2snes.py"
import struct
from collections import namedtuple
import pathlib
import serial
import serial.tools.list_ports
# SOME WORDS ON FILE PATHS
# A USB2SNES path describes the path to a file or directory on the SD card.
# USB2SNES paths operate essentially like Unix paths: forward slashes separate
# directories, '.' and '..' represent current and one-level-up directories
# respectively. However, note that paths are ASCII-encoded, cannot contain 0x00
# characters, and are limited to 255 bytes. Paths are always relative to the
# root of the SD card: paths "/cool.file" and "cool.file" always refer to
# "cool.file" in the root directory.
# Errors during certain file operations can crash the USB2SNES and require that
# the console be power-cycled. This includes reading nonexistent files, and
# (inexplicably) attempting to boot a ROM without a period (.) in its name.
# These situations are checked for an an exception is thrown if they are about
# to occur.
# if there is exactly one USB2SNES device attached, return its port (suitable
# for passing to connect()). otherwise, if there are no devices or more than
# one, return None.
def detect_port():
got_port = None
for port in serial.tools.list_ports.comports():
if (port.vid, port.pid) == (0x1209, 0x5A22):
if got_port is not None: # there are multiple ports
return None
got_port = port.device
return got_port
OP_GET = 0
OP_PUT = 1
OP_LS = 4
OP_MKDIR = 5
OP_RM = 6
OP_RESET = 8
OP_BOOT = 9
OP_POWER_CYCLE = 10
OP_INFO = 11
OP_MENU_RESET = 12
SPACE_FILE = 0
SPACE_SNES = 1
SPACE_CHRONO_FIGURE = 5
FLAG_NONE = 0
FLAG_NORESP = 64
class USB2SNESError(Exception): pass
class Timeout(USB2SNESError): pass
class FileError(USB2SNESError):
def __init__(self, path, problem):
self.path = path
self.problem = problem
def __str__(self):
return "Path '{}': {}".format(self.path, self.problem)
USB2SNESInfo = namedtuple("USB2SNESInfo", [
"fw_version", # CONFIG_FWVER: firmware version as a 32 bit number
"fw_version_string", # firmware version string displayed in the menu
"device_name", # DEVICE_NAME: "sd2snes Mk.II" or "sd2snes Mk.III"
"feature_byte", # low byte of active FPGA feature bits. consult usb2snes's
# src/fpga_spi.c for definitions
"current_rom", # file name of the currently executing ROM
])
class USB2SNES:
def __init__(self):
# we don't have a port until we're connected
self.port = None
def _ser_read(self, length):
if self.port is None:
raise USB2SNESError("not connected")
read = b""
while length > 0:
new = self.port.read(length)
if len(new) == 0:
raise Timeout("read timeout")
read += new
length -= len(new)
return read
def _ser_write(self, data):
if self.port is None:
raise USB2SNESError("not connected")
sent_len = 0
while sent_len != len(data):
sent_len += self.port.write(data[sent_len:])
self.port.flush()
def connect(self, port):
if self.port is not None:
self.disconnect()
port = serial.Serial(port=port, baudrate=9600, timeout=3)
self.port = port
def disconnect(self):
if self.port is None:
return
port = self.port
self.port = None
try:
port.close()
except:
pass
# send out a usb2snes command. arg_size is the 32 bit size at [252:256].
# arg_data is additional binary data at [256:]. if resp is True, then the
# usb2snes is told to respond. this function DOES NOT read or parse the
# response.
def _send_command(self, opcode, space,
arg_size=0, arg_data=b'', resp=False):
flags = FLAG_NONE if resp else FLAG_NORESP
cmd_buf = b'USBA' + bytes([opcode, space, flags])
# pad to and then write out the size field
cmd_buf += b'\x00'*(252-len(cmd_buf))
cmd_buf += struct.pack(">I", arg_size)
# add the rest of the argument data
cmd_buf += arg_data[:256]
# pad out to the 512 byte packet size
cmd_buf += b'\x00'*(512-len(cmd_buf))
# and send everything on
self._ser_write(cmd_buf)
# reset the currently running game (or the menu, if it's currently running)
def reset_console(self):
self._send_command(OP_RESET, SPACE_SNES)
# reset back to the menu. has no effect if the menu is currently running,
def reset_to_menu(self):
self._send_command(OP_MENU_RESET, SPACE_SNES)
# reset the USB2SNES's microcontroller. implies disconnection.
def reset_microcontroller(self):
self._send_command(OP_POWER_CYCLE, SPACE_SNES)
# this will sever the USB connection, so make sure we recognize that
time.sleep(0.2)
self.disconnect()
# boot the SNES ROM off the SD card with the given file name
def boot_rom(self, path):
encoded_path, parts = self.parse_path(path)
# trying to boot a ROM that doesn't exist will do weird things and
# require a menu reset, so we don't allow nonexistent ROMs to be booted
contents = self.list_dir('/'.join(parts[:-1]))
if parts[-1] not in contents:
raise FileError(path, "ROM does not exist")
if "." not in parts[-1]:
# attempting to boot such names will crash the USB2SNES
raise FileError(path, "name has no period (.)")
self._send_command(OP_BOOT, SPACE_SNES, arg_data=encoded_path)
# read various pieces of information about what's going on
def get_info(self):
# ask for the information
self._send_command(OP_INFO, SPACE_SNES, resp=True)
# it comes back in its own packet
info_packet = self._ser_read(512)
# convert some packet bytes to a string
def tostr(b):
# remove all the null terminators
try:
b = b[:b.index(b'\x00')]
except ValueError:
pass # there weren't any
return b.decode("ascii")
return USB2SNESInfo(
fw_version=struct.unpack(">I", info_packet[256:260])[0],
fw_version_string=tostr(info_packet[260:260+64]),
device_name=tostr(info_packet[260+64:260+128]),
feature_byte=info_packet[6],
current_rom=tostr(info_packet[16:256])
)
# read some data from a given memory space
def read_space(self, space, address, size):
# ask to read the data
self._send_command(OP_GET, space,
arg_size=size, arg_data=struct.pack('>I', address))
# receive enough 512 byte blocks to get all of it
num_blocks = (size+511) >> 9
data = self._ser_read(num_blocks*512)
# return only what was asked for
return data[:size]
# write some data to a given memory space
def write_space(self, space, address, data):
# say that we're writing some data
self._send_command(OP_PUT, space,
arg_size=len(data), arg_data=struct.pack('>I', address))
# pad it out to full 512 byte blocks
if len(data) % 512 > 0:
data += b'\x00'*(512-(len(data)%512))
# then send it along
self._ser_write(data)
# parse a path and return the final encoded filename along with the list of
# components (parts)
def parse_path(self, path):
# make sure the path actually is ASCII before we do anything to it
try:
path.encode("ascii")
except UnicodeEncodeError as e:
raise FileError(path, str(e)) from None
# canonicalize the path to remove "."s, ".."s, and extra "/"s
parts = [""] # root directory is at the start
for part in path.split("/"):
if part == "" or part == ".":
continue
elif part == "..":
if len(parts) == 1:
raise FileError(path,
"traversing above root directory") from None
parts.pop()
else:
parts.append(part)
encoded_path = "/".join(parts[1:]).encode("ascii")
if len(encoded_path) > 255:
raise FileError(encoded_path.decode("ascii"), "path is too long")
return encoded_path, parts
def list_dir(self, path):
encoded_path, parts = self.parse_path(path)
self._send_command(OP_LS, SPACE_FILE, arg_data=encoded_path, resp=True)
resp = self._ser_read(512)
if resp[5]:
raise FileError(path, "failed to list directory")
list_result = {}
finished = False
while not finished:
data = self._ser_read(512)
while len(data) > 0:
if data[0] == 0xFF: # no more entries
finished = True
break
elif data[0] == 0x02: # another packet is coming
break
is_dir = data[0] == 0
name_end = data[1:].index(b'\x00')+1
filename = data[1:name_end].decode("ascii")
data = data[name_end+1:]
if filename == "." or filename == "..": continue
if is_dir:
list_result[filename] = "dir"
| |
# Copyright (c) Facebook, Inc. and its affiliates
# Copyright (c) MTRF authors
import collections
from enum import Enum
from gym import utils
import numpy as np
import os
from pathlib import Path
import pickle
from typing import Any, Dict, Optional, NewType, Sequence, Union, Tuple
from r3l.r3l_envs.base_env.base_env_deltapos import SawyerDhandDeltaBaseEnv
from r3l.utils.quatmath import quat2euler, euler2quat
from r3l.utils.circle_math import circle_distance, circle_distance_mod
from r3l.utils.range import get_range_from_params
from r3l.robot.object import ObjectState
from r3l.robot.default_configs import (
DEFAULT_DHAND_ROBOT_CONFIG,
DEFAULT_SAWYER_ROBOT_CONFIG,
DEFAULT_OBJECT_CONFIG,
)
class ObjectType(Enum):
Valve3 = 1
Rod = 2
Dodecahedron = 3
DodecahedronBasket = 4
DodecahedronBulb = 5
Dumbbell = 6
Mug = 7
Pipe = 8
DHAND_INHAND_ASSET_PATH = Path(__file__).absolute().parent / "assets"
OBJECT_TYPE_TO_MODEL_PATH = {
ObjectType.Valve3: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_valve.xml"),
ObjectType.Rod: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_rod.xml"),
ObjectType.Dodecahedron: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_dodecahedron.xml"),
ObjectType.DodecahedronBasket: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_dodecahedron_basket.xml"),
ObjectType.DodecahedronBulb: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_dodecahedron_bulb.xml"),
ObjectType.Dumbbell: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_dumbbell.xml"),
ObjectType.Mug: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_mug.xml"),
ObjectType.Pipe: str(DHAND_INHAND_ASSET_PATH / "sawyer_dhand_inhand_pipe.xml"),
}
class SawyerDhandInHandObjectBaseEnv(SawyerDhandDeltaBaseEnv):
VALVE_DEFAULT_OBSERVATION_KEYS = (
"dhand_qpos", # "sawyer_arm_qpos",
"dhand_qvel", # "sawyer_arm_qvel",
"mocap_pos", "mocap_euler",
# Object info
"object_xyz", # Not including object euler and qvel
# object "z orientation" accounts for flips
"object_top_angle_cos",
"object_top_angle_sin",
"target_xyz",
# target "z orientation" accounting for flips
"target_top_angle_cos",
"target_top_angle_sin",
"object_to_hand_xyz",
# These all account for site symmetry
"object_site1_to_target_site1_xyz_err",
"object_site2_to_target_site2_xyz_err",
"object_site3_to_target_site3_xyz_err"
)
ROD_DEFAULT_OBSERVATION_KEYS = (
"dhand_qpos", "sawyer_arm_qpos",
"mocap_pos", "mocap_euler",
# Object info
"object_xyz", # Not including object euler and qvel
"object_z_orientation_cos", "object_z_orientation_sin",
"target_xyz",
"target_z_orientation_cos", "target_z_orientation_sin",
"object_to_hand_xyz",
)
PIPE_DEFAULT_OBSERVATION_KEYS = (
"dhand_qpos", "sawyer_arm_qpos",
"mocap_pos", "mocap_euler",
# Object info
"object_xyz", # Not including object euler and qvel
# "object_z_orientation_cos", "object_z_orientation_sin",
"target_xyz",
# "target_z_orientation_cos", "target_z_orientation_sin",
"object_to_hand_xyz",
"object_normal_to_target_normal_err",
"object_parallel_to_target_parallel_err",
)
DODECAHEDRON_DEFAULT_OBSERVATION_KEYS = (
"dhand_qpos", "sawyer_arm_qpos",
"mocap_pos", "mocap_euler",
# Object info
"object_xyz", # Not including object euler and qvel
"target_xyz",
"object_to_hand_xyz",
"object_to_target_xyz",
)
def __init__(
self,
model_path=None,
sim=None,
object_type: ObjectType = ObjectType.Rod,
observation_keys: list = ROD_DEFAULT_OBSERVATION_KEYS,
collect_bin_counts: bool = False,
n_bins: int = 32,
reset_policy_directories: list = [],
reset_state_pkl_path: str = None,
reset_offset: np.ndarray = None,
save_reset_videos: bool = False,
**kwargs
):
self.object_type = object_type
# Discrete count rewards
self.x_range = (0.72 - 0.4, 0.72 + 0.4)
self.y_range = (0.15 - 0.4, 0.15 + 0.4)
self._collect_bin_counts = collect_bin_counts
self.n_bins = n_bins
self.x_bins = np.linspace(self.x_range[0], self.x_range[1], self.n_bins)
self.y_bins = np.linspace(self.y_range[0], self.y_range[1], self.n_bins)
self.bin_counts = np.ones((self.n_bins + 1, self.n_bins + 1))
# Loaded policy resets
self._reset_dist = np.zeros(1)
self._reset_policies = []
self._wrapped_reset_policies = []
self._reset_horizons = []
self._reset_envs = []
self._reset_imgs = []
self._save_reset_videos = save_reset_videos
# Track goal position
self._last_target_xyz = np.zeros(3)
self._last_target_euler = np.zeros(3)
self._pose_target = 0.7 * np.array([
1, 1, 1, 1,
-1, -1, -1, -1,
1, 1, 1, 1,
-1, -1, -1, -1,
])
# Initialize observation keys depending on object type
if self.object_type in (
ObjectType.Dodecahedron,
ObjectType.DodecahedronBasket,
ObjectType.DodecahedronBulb):
observation_keys = self.DODECAHEDRON_DEFAULT_OBSERVATION_KEYS
elif self.object_type in (ObjectType.Valve3,):
observation_keys = self.VALVE_DEFAULT_OBSERVATION_KEYS
elif self.object_type in (ObjectType.Rod,):
observation_keys = self.ROD_DEFAULT_OBSERVATION_KEYS
elif self.object_type in (ObjectType.Pipe,):
observation_keys = self.PIPE_DEFAULT_OBSERVATION_KEYS
try:
if self.script_motion:
observation_keys = observation_keys + ("step_counter",)
except AttributeError:
pass
# Init the environment + sim
env_params = dict(
observation_keys=observation_keys,
dhand_config=DEFAULT_DHAND_ROBOT_CONFIG,
sawyer_config=DEFAULT_SAWYER_ROBOT_CONFIG,
object_config=DEFAULT_OBJECT_CONFIG,
)
env_params.update(kwargs)
if sim:
super().__init__(sim=sim, **env_params)
else:
assert object_type in OBJECT_TYPE_TO_MODEL_PATH
object_model_path = OBJECT_TYPE_TO_MODEL_PATH.get(object_type)
# If a model_path is already specified, make sure that it matches the object type.
if model_path is not None:
assert model_path == object_model_path
else:
model_path = object_model_path
super().__init__(
model_path=model_path,
observation_keys=observation_keys,
**kwargs
)
# Boost damping in the arms to avoid high accelerations
# TODO(justinvyu): Is this also the case on the hardware?
self.sim.model.dof_damping[:7] *= 10
self.sim.model.dof_armature[:7] *= 10
# Get model ids
self.target_bid = self.sim.model.body_name2id('target')
self.grasp_left = self.sim.model.site_name2id('grasp_left')
self.grasp_right = self.sim.model.site_name2id('grasp_right')
self.grasp_id = self.sim.model.site_name2id('grasp')
if self.object_type in (ObjectType.Rod, ObjectType.Pipe):
self.rod_center = self.sim.model.site_name2id('rod_center')
self.rod_left = self.sim.model.site_name2id('rod_left')
self.rod_right = self.sim.model.site_name2id('rod_right')
self.rod_normal = self.sim.model.site_name2id('rod_normal')
self.rod_parallel = self.sim.model.site_name2id('rod_parallel')
self.target_center = self.sim.model.site_name2id('target_center')
self.target_left = self.sim.model.site_name2id('target_left')
self.target_right = self.sim.model.site_name2id('target_right')
self.target_normal = self.sim.model.site_name2id('target_normal')
self.target_parallel = self.sim.model.site_name2id('target_parallel')
elif self.object_type == ObjectType.Valve3:
self.valve_1 = self.sim.model.site_name2id('valve3_1')
self.valve_2 = self.sim.model.site_name2id('valve3_2')
self.valve_3 = self.sim.model.site_name2id('valve3_3')
self.target_1 = self.sim.model.site_name2id('valve3_target_1')
self.target_2 = self.sim.model.site_name2id('valve3_target_2')
self.target_3 = self.sim.model.site_name2id('valve3_target_3')
# Track goal position
self._last_target_xyz = self.sim.data.get_body_xpos("target").copy()
self._last_target_euler = quat2euler(self.sim.data.get_body_xquat("target").copy())
# Object offset = the offset relative to the object that the hand is reset to
self.obj_offset = reset_offset
if self.obj_offset is None:
self.obj_offset = np.array([-0.15, 0, 0.175])
if reset_state_pkl_path:
self._use_reset_state = True
with open(reset_state_pkl_path, 'rb') as f:
reset_states = pickle.load(f)
if isinstance(reset_states, dict):
assert "sawyer" in reset_states
assert "dhand" in reset_states
assert "object" in reset_states
self._reset_sawyer_state = reset_states["sawyer"]
self._reset_dhand_state = reset_states["dhand"]
self._reset_object_state = reset_states["object"]
else:
self._reset_sawyer_state = reset_states[0]
self._reset_dhand_state = reset_states[1]
self._reset_object_state = reset_states[2]
else:
self._use_reset_state = False
reset_policy_directories = reset_policy_directories or []
for directory in reset_policy_directories:
self._load_policy(directory)
def _load_policy(self, saved_policy_dir, phase_num=None):
try:
import softlearning
except ModuleNotFoundError:
print('Error: softlearning package not found. Unable to load reset policy.')
return None
from softlearning.policies.utils import get_policy_from_variant
from softlearning.environments.utils import get_environment_from_params
policy_dir = Path(saved_policy_dir)
variant_path = policy_dir / "params.pkl"
policy_weights_path = policy_dir / "policy_params.pkl"
assert variant_path.exists() and policy_weights_path.exists(), (
"Error loading policy and variant: we expect a file at:\n"
+ str(variant_path) + "\n" + str(policy_weights_path)
)
with open(variant_path, 'rb') as f:
variant = pickle.load(f)
with open(policy_weights_path, 'rb') as f:
policy_weights = pickle.load(f)
if phase_num is not None:
policy_weights = policy_weights[phase_num]
from softlearning.environments.adapters.gym_adapter import GymAdapter
from softlearning.models.utils import flatten_input_structure
reset_env_params = variant["environment_params"]["evaluation"]
reset_env_kwargs = reset_env_params.pop("kwargs", {})
reset_env_kwargs["sim"] = self.sim
reset_env_params["kwargs"] = reset_env_kwargs
reset_env = get_environment_from_params(reset_env_params)
# Create environment as softlearning expects it for policy initialization
reset_env = GymAdapter(None, None, env=reset_env)
self._reset_envs.append(reset_env)
reset_policy = get_policy_from_variant(variant, reset_env)
reset_policy.set_weights(policy_weights)
self._reset_policies.append(reset_policy)
# Save some time by taking a max reset horizon of 50 steps
horizon = min(
50,
variant.get('sampler_params', {}).get('kwargs', {}).get('max_path_length', 50)
)
self._reset_horizons.append(horizon)
def wrapped_policy(obs_dict):
feed_dict = {
key: obs_dict[key][None, ...]
for key in reset_policy.observation_keys
}
observation = flatten_input_structure(feed_dict)
with reset_policy.set_deterministic(True):
action = reset_policy.actions_np(observation)[0]
return action
self._wrapped_reset_policies.append(wrapped_policy)
return wrapped_policy
def run_reset_policy(self):
if self._use_reset_state:
# Reset angles so that mocap doesn't go crazy after many iters
self.sawyer_robot.reset(command_angles=True)
self.sawyer_robot.set_state(self._reset_sawyer_state, command_angles=False)
self.do_simulation(self.act_mid - 0.75 * self.act_rng, 300) # wait for arm to be stable
self.dhand_robot.set_state(self._reset_dhand_state)
self.object.set_state(self._reset_object_state)
else:
self._reset_imgs = []
for reset_policy, reset_env, horizon in zip(self._wrapped_reset_policies, self._reset_envs, self._reset_horizons):
reset_env.reset_robot()
for _ in range(horizon):
action = reset_policy(reset_env.get_obs_dict())
reset_env.step(action)
if self._save_reset_videos:
self._reset_imgs.append(self.render(width=480, height=480, mode="rgb_array"))
self._reset_dist = self.get_obs_dict()["object_to_target_xy_distance"]
def get_obs_dict(self) -> Dict[str, np.ndarray]:
obs_dict = super().get_obs_dict()
# Object data
object_qpos, object_qvel = obs_dict["object_qpos"], obs_dict["object_qvel"]
object_xyz, object_quat = object_qpos[:3], object_qpos[3:]
object_euler = quat2euler(object_quat)
object_z_orientation = object_euler[2]
# Target data
# NOTE: Targets need to be set in the OptiTrack coordinate space for hardware
target_xyz = self._last_target_xyz.copy()
target_euler = self._last_target_euler.copy()
target_z_orientation = target_euler[2]
# TODO(justinvyu): Add an offset to the end of the Sawyer xyz
grasp_xyz = self.sim.data.get_site_xpos("grasp").copy()
if self.is_hardware:
grasp_xyz = obs_dict["mocap_pos"][:3]
relative_xyz = object_xyz - target_xyz
circle_dist = circle_distance(object_euler, target_euler)
hand_to_obj_dist = np.linalg.norm(grasp_xyz - object_xyz)
hand_to_target_dist = np.linalg.norm(grasp_xyz - target_xyz)
x, y, z = object_xyz
x_d, y_d = np.digitize(x, self.x_bins), np.digitize(y, self.y_bins)
hand_pose_dist = np.linalg.norm(obs_dict["dhand_qpos"] - self._pose_target)
circle_dist_mod_180 = np.array([
circle_distance_mod(object_z_orientation, target_z_orientation, mod=np.pi)
])
obs_dict.update(collections.OrderedDict((
# Object info
("object_xyz", object_xyz),
("object_xy_discrete", np.array([x_d, y_d])),
("object_quat", object_quat),
("object_euler", object_euler),
("object_z_orientation", object_z_orientation),
("object_z_orientation_cos", np.array([np.cos(object_z_orientation)])),
("object_z_orientation_sin", np.array([np.sin(object_z_orientation)])),
("object_qvel", object_qvel),
# Target info
("target_xyz", target_xyz),
("target_euler", target_euler),
("target_quat", euler2quat(target_euler)),
("target_z_orientation", target_z_orientation),
("target_z_orientation_cos", np.array([np.cos(target_z_orientation)])),
("target_z_orientation_sin", np.array([np.sin(target_z_orientation)])),
# Distances
("object_to_target_xyz_distance", np.linalg.norm(relative_xyz)),
("object_to_target_xy_distance", np.linalg.norm(relative_xyz[:2])),
("object_to_target_x_distance", np.abs(relative_xyz[0])),
("object_to_target_y_distance", np.abs(relative_xyz[1])),
("object_to_target_z_distance", np.abs(relative_xyz[2])),
("object_to_target_circle_distances", circle_dist),
("object_to_target_circle_distance", np.linalg.norm(circle_dist)),
("object_to_target_mod_120_circle_distance", np.array([0])),
("object_to_target_mod_180_circle_distance", circle_dist_mod_180),
("object_to_hand_xyz_distance", hand_to_obj_dist),
("target_to_hand_xyz_distance", hand_to_target_dist),
("pose_dist", hand_pose_dist),
# Relative vectors
("object_to_target_xyz", relative_xyz),
("object_to_hand_xyz", grasp_xyz - object_xyz),
("grasp_xyz", grasp_xyz),
)))
if self._wrapped_reset_policies:
obs_dict["reset_policy_xy_distance"] = self._reset_dist.copy()
# Add these as placeholders (to be populated for each object specifically)
# Valve specific observations
obs_dict["object_site1_to_target_site1_xyz_err"] = np.zeros(3)
obs_dict["object_site2_to_target_site2_xyz_err"] = np.zeros(3)
obs_dict["object_site3_to_target_site3_xyz_err"] = np.zeros(3)
obs_dict["object_top_angle_cos"] = np.array([0])
obs_dict["object_top_angle_sin"] = np.array([0])
obs_dict["object_top_angle"] = np.array([0])
obs_dict["target_top_angle_cos"] = np.array([0])
obs_dict["target_top_angle_sin"] = np.array([0])
obs_dict["target_top_angle"] = np.array([0])
# Rod/pipe specific observations
obs_dict["object_normal_to_target_normal_err"] = np.zeros(3)
obs_dict["object_parallel_to_target_parallel_err"] = np.zeros(3)
obs_dict["object_normal_to_target_normal_distance"] = np.zeros(1)
obs_dict["object_parallel_to_target_parallel_distance"] = np.zeros(1)
obs_dict['step_counter'] = np.array([0])
if not self.initializing and self.object_type == ObjectType.Valve3:
target1_xpos = self.sim.data.site_xpos[self.target_1].copy()
valve1_xpos = self.sim.data.site_xpos[self.valve_1].copy()
valve_dir = np.array(valve1_xpos - object_xyz)
# Project onto xy plane
valve_dir[2] = 0
# Unit vector in direction of the red prong
valve_dir /= np.linalg.norm(valve_dir)
target_dir = np.array(target1_xpos - target_xyz)
# Project onto xy plane
target_dir[2] = 0
# Unit vector in direction of the red prong
target_dir /= np.linalg.norm(target_dir)
object_angle = np.arctan2(valve_dir[1], valve_dir[0])
obs_dict["object_top_angle"] = np.array([object_angle])
obs_dict["object_top_angle_cos"] = np.array([valve_dir[0]])
obs_dict["object_top_angle_sin"] | |
# coding: utf-8
# Goal: Rename multiple mp3 files with their properties to get them ready for iTunes
"""
Idea space:
- Ask user to delete the Cover_Images folder on end
"""
import eyed3 as d3
import os, os.path, datetime, requests, re, string, PIL.Image, youtube_dl, json
from bs4 import BeautifulSoup
# path = "C:\\Users\\Flavio\\Music\\Youtube\\Weiteres"
# os.chdir(path)
d3.log.setLevel("ERROR") # So there are no warnings for non-standard genres
# todo: Cut off playlist
def nameTracks(folderpath, genre="[Hip-Hop/Rap]"):
""" Tracks are saved as "Artist - TrackName"
Tracks: Option for same genre, all title numbers get a 1, same year, different artists, track name, album like "Track Name - Single"
"""
for file in os.listdir(folderpath):
if file.endswith(".mp3"):
if file.find("-") != -1:
filepath = folderpath + "/" + file
trackArtist = file.partition("-")[0].strip()
title = file.partition(" - ")[2].partition('.mp3')[0].strip()
singleCover = findSingleCover(trackArtist, title)
audiofile = d3.load(filepath)
audiofile.tag.genre = genre
audiofile.tag.recording_date = datetime.datetime.now().year
audiofile.tag.artist = trackArtist
audiofile.tag.track_num = 1
if title.find(" ft.") != -1: # cut off features als well for the album name
audiofile.tag.album = title.partition(" ft.")[0] + " - Single"
else:
audiofile.tag.album = title + ' - Single'
if singleCover != "Error":
audiofile.tag.images.set(3, open(singleCover, "rb").read(), "image/jpeg")
audiofile.tag.title = title
audiofile.tag.save()
# also rename the whole file to have just the title of the track
os.rename(filepath, folderpath + "/" + title + ".mp3")
else:
print("File already formatted or not named properly! ")
else:
print("File not formatted because not mp3!")
print("All Tracks managed! ")
def nameAlbum(folderpath, artist, album, genre="[Hip-Hop/Rap]"):
""" Albums are saved in a folder inside the folder of the artist
Album: Same Interpret, Year, Genre, Album Name, Different Track Numbers
"""
trackList = generateTracklist(artist, album)
cover = findAlbumCover(artist, album)
for file in os.listdir(folderpath):
if file.endswith(".mp3"):
title = file.partition(".mp3")[0]
audiofile = d3.load(folderpath + "/" + file)
audiofile.tag.genre = genre
audiofile.tag.recording_date = datetime.datetime.now().year
audiofile.tag.artist = artist
try:
# todo: Check so machen, dass nur groß oder nur kleinschreibung angeschaut werden und Zeichen wie ' berücksichtigen
trackNum = trackList.index(string.capwords(title.partition(" ft.")[0].partition(" feat.")[0])) + 1 # automation of track numbers
audiofile.tag.track_num = trackNum
except:
print("Error occured, track has to be numbered manually")
number = input("Enter track number of " + title + " : ")
audiofile.tag.track_num = int(number)
audiofile.tag.album = album
if cover != "Error":
audiofile.tag.images.set(3, open(cover, "rb").read(), "image/jpeg")
audiofile.tag.title = title
audiofile.tag.save()
print("Album finished! ")
def generateTracklist(artist, album):
"""
Using genius.com pattern to get the tracklist of the album.
"""
base = "https://genius.com/albums"
url = base + "/" + artist.replace(" ", "-") + "/" + album.replace(" ", "-")
raw = requests.get(url)
soup = BeautifulSoup(raw.text, "html.parser")
try:
titles = soup.findAll(class_="chart_row-content-title")
for i in range(len(titles)):
titles[i] = re.sub(" +", " ", titles[i].text.partition("Lyrics")[0].replace("\n", "").replace("\xa0", " ")).replace("’", "").strip() # das kann noch schöner
# Cut Features off for better comparison
titles[i] = string.capwords(re.sub("[(\[].*?[)\]]", "", titles[i]).strip())
if len(titles) == 0:
print("Could not find titles to album")
return titles
except:
print("Could not find titles to album")
def findAlbumCover(artist, album):
"""
Using genius.com to find the album cover to given Artist and album
"""
base = "https://genius.com/albums"
url = base + "/" + artist.replace(" ", "-") + "/" + album.replace(" ", "-")
raw = requests.get(url)
#imagePath = "C:/Users/Flavio/Music/Youtube/CoverTemp/"
imagePath = getcwdFormat() + "/" + "Cover_Images/"
if not os.path.exists("Cover_Images"):
os.mkdir("Cover_Images")
soup = BeautifulSoup(raw.text, "html.parser")
try:
imageURL = soup.findAll(
class_="cover_art-image")[0]['srcset'].split(" ")[0] # fucking bullshit
splittedLink = imageURL.split("/")
# Download images in 1000x1000 resolution
splittedLink[4] = "1000x1000"
imageURL = "/".join(splittedLink)
coverRaw = requests.get(imageURL, stream=True)
filename = artist + "_" + album + ".jpg"
with open(imagePath + filename, "wb") as outfile:
for block in coverRaw.iter_content(1024):
if not block:
break
outfile.write(block)
print("Cover found! Resolution is: " +
str(PIL.Image.open(imagePath + filename).size))
return imagePath + filename
except:
print("Error, cover not found")
return "Error"
def findSingleCover(artist, single):
"""
Using genius.com to find the song cover to given Artist and song
"""
base = "https://genius.com/"
url = base + artist.replace(" ", "-") + "-" + single.replace(",","").replace(" ", "-") + "-lyrics"
raw = requests.get(url)
# imagePath = "C:/Users/Flavio/Music/Youtube/CoverTemp/"
imagePath = getcwdFormat() + "/" + "Cover_Images/"
if not os.path.exists("Cover_Images"):
os.mkdir("Cover_Images")
soup = BeautifulSoup(raw.text, "html.parser")
try:
imageURL = soup.findAll(class_="cover_art-image")[0]["src"]
splittedLink = imageURL.split("/")
# Download images in 1000x1000 resolution
splittedLink[4] = "1000x1000"
imageURL = "/".join(splittedLink)
coverRaw = requests.get(imageURL, stream=True)
filename = artist + "_" + single + ".jpg"
with open(imagePath + filename, "wb") as outfile:
for block in coverRaw.iter_content(1024):
if not block:
break
outfile.write(block)
print("Cover found for track " + single)
return imagePath + filename
except:
print("Error, cover not found for track " + single)
return "Error"
# Download lsit of URLs
def downLoadTracks(trackList, folder=""):
ydl_opts = {
'format': 'bestaudio',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
for i in range(len(trackList)):
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
# result = ydl.extract_info("{}".format(trackList[i]))
# filename = ydl.prepare_filename(result)
ydl.download([trackList[i]])
except:
print("Could not download track!")
pass
for file in (os.listdir(getcwdFormat())):
if file.endswith(".mp3"):
try:
os.rename(getcwdFormat() + "/" + file, getcwdFormat() + "/" + folder + "/" + renameDownloadTrack(file))
except:
print("File already exists!")
pass
# Deletes the url id of youtube_dl and cuts off things in brackets like (Audio) because no one wants this
def renameDownloadTrack(trackName):
trackName = re.sub("[(\[].*?[)\]]", "", trackName[0:trackName.rindex("-")]).strip()
return re.sub(' +', ' ', trackName) + ".mp3"
# Make os path suitable for python chdir
def pathReplace(path):
path = path.replace("\\", "/")
return path
# Get cwd with right format
def getcwdFormat():
cwd = os.getcwd().replace("\\", "/")
return cwd
# Get new foldername
def getnewFolder(folder):
folderFound = False
i = 1
while not folderFound:
if os.path.exists(folder+str(i)):
i = i+1
else:
folderFound = True
return folder + "(" + str(i) + ")"
# Mainloop
print("Welcome to YouTunes!")
while True:
question = input("Download Tracks or Album? Or just json ")
# name a couple of tracks
if question in ["Tracks", "Track", "t", "T", "tr"]:
folderName = "Singles - " + str(datetime.date.today())
if os.path.exists(folderName):
folderName = getnewFolder(folderName)
os.mkdir(folderName)
else:
os.mkdir(folderName)
folderPath = getcwdFormat() + "/" + folderName
track_urls = []
question = input("Enter a song url or \"finish\": ")
while question not in ["f", "finished", "fi", "finish"]:
track_urls.append(question)
question = input("Enter a song url or \"finish\": ")
downLoadTracks(track_urls, folderName)
print("Make sure every Track is named like Artist - TrackName Features")
print("Example: Drake - Sneakin feat. 21 Savage")
print("If Track has correct name just press enter, otherwise enter correct name and then enter")
for mp3 in (os.listdir(folderPath)):
if mp3.endswith(".mp3"):
print(mp3)
newname = input("Enter or new name: ")
if newname == "":
pass
else:
os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + newname + ".mp3")
print("Saved new name!")
print("Every file in folder " + folderName + " has been named.")
print("Next up: Setting the stats for iTunes") # todo Manage different genres here
nameTracks(folderPath)
print("You can quit now or download more tracks or albums: ")
# name an album
elif question in ["Album", "a", "A", "al"]:
# os.chdir("C:\\Users\\Flavio\\Music\\Youtube")
albumArtist = input("Which Artist? ")
albumName = input("Which Album? ")
folderName = albumArtist + " - " + albumName
if os.path.exists(folderName):
folderName = getnewFolder(folderName)
os.mkdir(folderName)
else:
os.mkdir(folderName)
folderPath = getcwdFormat() + "/" + folderName
track_urls = []
question = input("Enter a song url or \"finish\": ")
while question not in ["f", "finished", "fi", "finish", "fin"]:
track_urls.append(question)
question = input("Enter a album song url or \"finish\": ")
downLoadTracks(track_urls, folderName)
print("Make sure every Track is named like: TrackName feat. Features")
print("Example: Sneakin feat. 21 Savage (as an Drake album)")
print("If Track has correct name just press enter, otherwise enter correct name and then enter")
for mp3 in (os.listdir(folderPath)):
if mp3.endswith(".mp3"):
#print(mp3.split("-")[1])
print(mp3)
newname = input("Enter or new name: ")
if newname != "":
#os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + mp3.split("-")[1] + ".mp3")
# os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + ".mp3")
# else:
os.rename(getcwdFormat() + "/" + folderName + "/" + mp3, getcwdFormat() + "/" + folderName + "/" + newname + ".mp3")
print("Saved new name!")
specialGenre = input("Name a genre (default: [Hip-Hop/Rap]): ")
print("Now doing the iTunes stats")
if specialGenre != "":
nameAlbum(folderPath, | |
= np.zeros(grid.number_of_nodes, dtype=int)
>>> ca = HexCTS(grid, nsd, trn_list, ins)
>>> cap = CAPlotter(ca, cmap=matplotlib.cm.pink)
>>> cap.gridtype
'hex'
>>> cap._cmap.name
'pink'
"""
def __init__(self, ca, cmap=None, **kwds):
"""CAPlotter() constructor keeps a reference to the CA model, and
optionally a colormap to be used with plots.
Parameters
----------
ca : LandlabCellularAutomaton object
Reference to a CA model
cmap : Matplotlib colormap, optional
Colormap to be used in plotting
"""
import matplotlib
# Set the colormap; default to matplotlib's "jet" colormap
if cmap is None:
self._cmap = matplotlib.cm.jet
else:
self._cmap = cmap
# Keep a reference to the CA model
self.ca = ca
# Initialize the plot and remember the grid type
plt.ion()
plt.figure(1)
if type(ca.grid) is landlab.grid.hex.HexModelGrid:
self.gridtype = "hex"
else:
self.gridtype = "rast"
def update_plot(self):
"""Plot the current node state grid."""
plt.clf()
if self.gridtype == "rast":
nsr = self.ca.grid.node_vector_to_raster(self.ca.node_state)
plt.imshow(nsr, interpolation="None", origin="lower", cmap=self._cmap)
else:
self.ca.grid.hexplot(self.ca.node_state, color_map=self._cmap)
plt.draw()
plt.pause(0.001)
def finalize(self):
"""Wrap up plotting.
Wrap up plotting by switching off interactive model and showing
the plot.
"""
plt.ioff()
plt.show()
class CellLabCTSModel(object):
"""Link-type (or doublet-type) cellular automaton model.
A CellLabCTSModel implements a link-type (or doublet-type) cellular
automaton model. A link connects a pair of cells. Each cell has a state
(represented by an integer code), and each link also has a state that is
determined by the states of the cell pair.
Parameters
----------
model_grid : Landlab ModelGrid object
Reference to the model's grid
node_state_dict : dict
Keys are node-state codes, values are the names associated with
these codes
transition_list : list of Transition objects
List of all possible transitions in the model
initial_node_states : array of ints (x number of nodes in grid)
Starting values for node-state grid
prop_data : array (x number of nodes in grid), optional
Array of properties associated with each node/cell
prop_reset_value : number or object, optional
Default or initial value for a node/cell property (e.g., 0.0).
Must be same type as *prop_data*.
"""
def __init__(
self,
model_grid,
node_state_dict,
transition_list,
initial_node_states,
prop_data=None,
prop_reset_value=None,
seed=0,
):
"""Initialize the CA model.
Parameters
----------
model_grid : Landlab ModelGrid object
Reference to the model's grid
node_state_dict : dict
Keys are node-state codes, values are the names associated with
these codes
transition_list : list of Transition objects
List of all possible transitions in the model
initial_node_states : array of ints (x number of nodes in grid)
Starting values for node-state grid
prop_data : array (x number of nodes in grid), optional
Array of properties associated with each node/cell
prop_reset_value : number or object, optional
Default or initial value for a node/cell property (e.g., 0.0).
Must be same type as *prop_data*.
seed : int, optional
Seed for random number generation.
"""
# Keep a copy of the model grid
self.grid = model_grid
# Initialize random number generation
np.random.seed(seed)
# Create an array that knows which links are connected to a boundary
# node
self.bnd_lnk = np.zeros(self.grid.number_of_links, dtype=np.int8)
for link_id in range(self.grid.number_of_links):
if (
self.grid.status_at_node[self.grid.node_at_link_tail[link_id]] != _CORE
or self.grid.status_at_node[self.grid.node_at_link_head[link_id]]
!= _CORE
):
self.bnd_lnk[link_id] = True
# Set up the initial node-state grid
self.set_node_state_grid(initial_node_states)
# Current simulation time starts out at zero
self.current_time = 0.0
# Figure out how many states there are, and make sure the input data
# are self consistent.
# There are 2 x (N^2) link states, where N is the number of node
# states. For example, if there are just two node states, 0 and 1, then
# the possible oriented link pairs are listed below:
# 0-0 0-1 1-0 1-1 0 0 1 1
# 0 1 0 1
self.num_node_states = len(node_state_dict)
self.num_node_states_sq = self.num_node_states * self.num_node_states
self.num_link_states = self.number_of_orientations * self.num_node_states_sq
assert type(transition_list) is list, "transition_list must be a list!"
assert transition_list, "Transition list must contain at least one transition"
last_type = None
for t in transition_list:
# TODO: make orientation optional for cases where
# self.number_of_orientations = 1
if isinstance(t.from_state, tuple) and isinstance(t.to_state, tuple):
this_type = tuple
else:
this_type = int
if this_type is tuple:
# added to allow from and to states to be tuples, not just ids
for i in t.from_state[:-1]:
assert (
i < self.num_node_states
), "Transition from_state out of range"
for i in t.to_state[:-1]:
assert i < self.num_node_states, "Transition to_state out of range"
assert (
t.from_state[-1] < self.number_of_orientations
), "Encoding for orientation in from_state must be < number of orientations."
assert (
t.to_state[-1] < self.number_of_orientations
), "Encoding for orientation in to_state must be < number of orientations."
else:
assert (
t.from_state < self.num_link_states
), "Transition from_state out of range"
assert (
t.to_state < self.num_link_states
), "Transition to_state out of range"
assert (
last_type == this_type or last_type is None
), "All transition types must be either int IDs, or all tuples."
# this test to ensure all entries are either IDs, or tuples, not
# mixed
last_type = this_type
# Create priority queue for events and next_update array for links
self.next_update = self.grid.add_zeros("link", "next_update_time")
self.priority_queue = PriorityQueue()
self.next_trn_id = -np.ones(self.grid.number_of_links, dtype=int)
# Assign link types from node types
self.create_link_state_dict_and_pair_list()
# DEJH adds: convert transition_list to IDs if necessary
# This is the new part that allows Transition from_ and to_ types
# to be specified either as ints, or as tuples.
transition_list_as_ID = transition_list[:]
if type(transition_list[0].from_state) == tuple:
# (then they all are..., because of the assertions in __init__)
for i in range(len(transition_list)):
transition_list_as_ID[i].from_state = self.link_state_dict[
transition_list[i].from_state
]
transition_list_as_ID[i].to_state = self.link_state_dict[
transition_list[i].to_state
]
# Set up the information needed to determine the orientation of links
# in the lattice. The default method just creates an array of zeros
# (all orientations considered the same), but this will be overridden
# in subclasses that do use orientation.
self.setup_array_of_orientation_codes()
# Using the grid of node states, figure out all the link states
self.assign_link_states_from_node_types()
# Create transition data for links
self.setup_transition_data(transition_list_as_ID)
# Put the various transitions on the event queue
self.push_transitions_to_event_queue()
# In order to keep track of cell "properties", we create an array of
# indices that refer to locations in the caller's code where properties
# are tracked.
self.propid = np.arange(self.grid.number_of_nodes)
if prop_data is None:
self.prop_data = np.zeros(self.grid.number_of_nodes)
self.prop_reset_value = 0.0
else:
self.prop_data = prop_data
self.prop_reset_value = prop_reset_value
def set_node_state_grid(self, node_states):
"""Set the grid of node-state codes to node_states.
Sets the grid of node-state codes to node_states. Also checks
to make sure node_states is in the proper format, which is to
say, it's a Numpy array of the same length as the number of nodes in
the grid.
**Creates**:
* self.node_state : 1D array of ints (x number of nodes in grid)
The node-state array
Parameters
----------
node_states : 1D array of ints (x number of nodes in grid)
Notes
-----
The node-state array is attached to the grid as a field with the name
'node_state'.
"""
assert (
type(node_states) is np.ndarray
), "initial_node_states must be a Numpy array"
assert (
len(node_states) == self.grid.number_of_nodes
), "length of initial_node_states must equal number of nodes in grid"
self.grid.at_node["node_state"] = node_states
self.node_state = node_states
def create_link_state_dict_and_pair_list(self):
"""Create a dict of link-state to node-state.
Creates a dictionary that can be used as a lookup table to find out
which link state corresponds to a particular pair of node states. The
dictionary keys are 3-element tuples, each of which represents the
state of the TAIL node, the HEAD node, and the orientation of the link.
The values are integer codes representing the link state numbers.
Notes
-----
Performance note: making self.node_pair a tuple does not appear to
change time to lookup values in update_node_states. Changing it to a
2D array of int actually slows it down.
"""
self.link_state_dict = {}
self.node_pair = []
k = 0
for orientation in range(self.number_of_orientations):
for tail_state in range(self.num_node_states):
for head_state in range(self.num_node_states):
self.link_state_dict[(tail_state, head_state, orientation)] = k
self.node_pair.append((tail_state, head_state, orientation))
k += 1
def setup_array_of_orientation_codes(self):
"""Create array of active link orientation codes.
Creates | |
<reponame>rsuprun/ocropy
import glob
import copy
import PIL
import cv2
import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
class record:
def __init__(self,**kw): self.__dict__.update(kw)
def disp_img(img, title, h, w):
cv2.namedWindow(title, flags=cv2.WINDOW_NORMAL)
cv2.imshow(title, img)
cv2.resizeWindow(title, (w, h))
def isintarray(a):
return a.dtype in [np.dtype('B'),np.dtype('int16'),np.dtype('int32'),np.dtype('int64'),
np.dtype('uint16'),np.dtype('uint32'),np.dtype('uint64')]
def isintegerarray(a):
return a.dtype in [np.dtype('int32'),np.dtype('int64'),np.dtype('uint32'),np.dtype('uint64')]
def dim0(s):
"""Dimension of the slice list for dimension 0."""
return s[0].stop-s[0].start
def dim1(s):
"""Dimension of the slice list for dimension 1."""
return s[1].stop-s[1].start
def norm_max(a):
return a/np.amax(a)
def width(s):
return s[1].stop-s[1].start
def area(a):
"""Return the area of the slice list (ignores anything past a[:2]."""
return np.prod([max(x.stop - x.start, 0) for x in a[:2]])
def center(s):
ycenter = np.mean([s[0].stop,s[0].start])
xcenter = np.mean([s[1].stop,s[1].start])
return (ycenter, xcenter)
def r_dilation(image,size,origin=0):
"""Dilation with rectangular structuring element using maximum_filter"""
return ndimage.maximum_filter(image,size,origin=origin)
def r_erosion(image,size,origin=0):
"""Erosion with rectangular structuring element using maximum_filter"""
return ndimage.minimum_filter(image,size,origin=origin)
def rb_dilation(image,size,origin=0):
"""Binary dilation using linear filters."""
output = np.zeros(image.shape,'f')
ndimage.uniform_filter(image,size,output=output,origin=origin,mode='constant',cval=0)
return np.array(output>0,'i')
def rb_erosion(image,size,origin=0):
"""Binary erosion using linear filters."""
output = np.zeros(image.shape,'f')
ndimage.uniform_filter(image,size,output=output,origin=origin,mode='constant',cval=1)
return np.array(output==1,'i')
def rb_opening(image,size,origin=0):
"""Binary opening using linear filters."""
image = rb_erosion(image,size,origin=origin)
return rb_dilation(image,size,origin=origin)
def select_regions(binary,f,min=0,nbest=100000):
"""Given a scoring function f over slice tuples (as returned by
find_objects), keeps at most nbest regions whose scores is higher
than min."""
labels,n = ndimage.label(binary)
objects = ndimage.find_objects(labels)
scores = [f(o) for o in objects]
best = np.argsort(scores)
keep = np.zeros(len(objects)+1,'i')
if nbest > 0:
for i in best[-nbest:]:
if scores[i]<=min: continue
keep[i+1] = 1
return keep[labels]
def check_binary(image):
assert image.dtype=='B' or image.dtype=='i' or image.dtype==np.dtype('bool'),\
"array should be binary, is %s %s"%(image.dtype,image.shape)
assert np.amin(image)>=0 and np.amax(image)<=1,\
"array should be binary, has values %g to %g"%(np.amin(image),np.amax(image))
def read_image_binary(fname, dtype='i'):
"""Read an image from disk and return it as a binary image
of the given dtype."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.ndim == 3: a = np.amax(a, axis=2)
return np.array(a > 0.5 * (np.amin(a) + np.amax(a)), dtype)
def pil2array(im, alpha=0):
if im.mode == "L":
a = np.fromstring(im.tobytes(), 'B')
a.shape = im.size[1], im.size[0]
return a
if im.mode == "RGB":
a = np.fromstring(im.tobytes(), 'B')
a.shape = im.size[1], im.size[0], 3
return a
if im.mode == "RGBA":
a = np.fromstring(im.tobytes(), 'B')
a.shape = im.size[1], im.size[0], 4
if not alpha: a = a[:, :, :3]
return a
return pil2array(im.convert("L"))
def correspondences(labels1,labels2):
"""Given two labeled images, compute an array giving the correspondences
between labels in the two images."""
q = 100000
assert np.amin(labels1)>=0 and np.amin(labels2)>=0
assert np.amax(labels2)<q
combo = labels1*q+labels2
result = np.unique(combo)
result = np.array([result//q,result%q])
return result
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def propagate_labels(image,labels,conflict=0):
"""Given an image and a set of labels, apply the labels
to all the regions in the image that overlap a label.
Assign the value `conflict` to any labels that have a conflict."""
rlabels,_ = ndimage.label(image)
cors = correspondences(rlabels,labels)
outputs = np.zeros(np.amax(rlabels)+1,'i')
oops = -(1<<30)
for o,i in cors.T:
if outputs[o]!=0: outputs[o] = oops
else: outputs[o] = i
outputs[outputs==oops] = conflict
outputs[0] = 0
return outputs[rlabels]
def spread_labels(labels,maxdist=9999999):
"""Spread the given labels to the background"""
distances,features = ndimage.distance_transform_edt(labels==0,return_distances=True,return_indices=True)
indexes = features[0]*labels.shape[1]+features[1]
spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)
spread *= (distances<maxdist)
return spread
def compute_lines(segmentation, minscale):
"""Given a line segmentation map, computes a list
of tuples consisting of 2D slices and masked images."""
lobjects = ndimage.find_objects(segmentation)
lines = []
for i,o in enumerate(lobjects):
if o is None: continue
if dim0(o) < minscale: continue
mask = (segmentation[o]==i+1)
if np.amax(mask)==0: continue
result = record()
result.label = i+1
result.bounds = o
result.mask = mask
lines.append(result)
return lines
def reading_order(lines, highlight=None):
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
order = np.zeros((len(lines),len(lines)),'B')
def x_overlaps(u,v):
return u[1].start<v[1].stop and u[1].stop>v[1].start
def above(u,v):
return u[0].start<v[0].start
def left_of(u,v):
return u[1].stop<v[1].start
def separates(w,u,v):
if w[0].stop<min(u[0].start,v[0].start): return 0
if w[0].start>max(u[0].stop,v[0].stop): return 0
if w[1].start<u[1].stop and w[1].stop>v[1].start: return 1
for i,u in enumerate(lines):
for j,v in enumerate(lines):
if x_overlaps(u,v):
if above(u,v):
order[i,j] = 1
else:
if [w for w in lines if separates(w,u,v)]==[]:
if left_of(u,v): order[i,j] = 1
if j==highlight and order[i,j]:
print((i, j), end=' ')
y0,x0 = center(lines[i])
y1,x1 = center(lines[j])
plt.plot([x0,x1+200],[y0,y1])
return order
def topsort(order):
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
n = len(order)
visited = np.zeros(n)
L = []
def visit(k):
if visited[k]: return
visited[k] = 1
for l in find(order[:,k]):
visit(l)
L.append(k)
for k in range(n):
visit(k)
return L
def array2pil(a):
if a.dtype==np.dtype("B"):
if a.ndim==2:
return PIL.Image.frombytes("L",(a.shape[1],a.shape[0]),a.tostring())
elif a.ndim==3:
return PIL.Image.frombytes("RGB",(a.shape[1],a.shape[0]),a.tostring())
else:
raise TypeError("bad image rank")
elif a.dtype==np.dtype('float32'):
return PIL.Image.fromstring("F",(a.shape[1],a.shape[0]),a.tostring())
else:
raise TypeError("unknown image type")
def int2rgb(image):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert image.ndim==2
assert isintarray(image)
a = np.zeros(list(image.shape)+[3],'B')
a[:,:,0] = (image>>16)
a[:,:,1] = (image>>8)
a[:,:,2] = image
return a
def make_seg_white(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0] = 0xffffff
return image
def midrange(image,frac=0.5):
"""Computes the center of the range of image values
(for quick thresholding)."""
return frac*(np.amin(image)+np.amax(image))
def write_page_segmentation(fname,image):
"""Writes a page segmentation, that is an RGB image whose values
encode the segmentation of a page."""
assert image.ndim==2
assert image.dtype in [np.dtype('int32'),np.dtype('int64')]
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
def write_image_binary(fname,image,verbose=0):
"""Write a binary image to disk. This verifies first that the given image
is, in fact, binary. The image may be of any type, but must consist of only
two values."""
if verbose: print("# writing", fname)
assert image.ndim==2
image = np.array(255*(image>midrange(image)),'B')
im = array2pil(image)
im.save(fname)
def remove_noise(line,minsize=8):
"""Remove small pixels from an image."""
if minsize==0: return line
bin = (line>0.5*np.amax(line))
labels,n = ndimage.label(bin)
sums = ndimage.sum(bin,labels,range(n+1))
sums = sums[labels]
good = np.minimum(bin,1-(sums>0)*(sums<minsize))
return good
def pad_image(image,d,cval=np.inf):
result = np.ones(np.array(image.shape)+2*d)
result[:,:] = np.amax(image) if cval==np.inf else cval
result[d:-d,d:-d] = image
return result
def extract(image,y0,x0,y1,x1,mode='nearest',cval=0):
h,w = image.shape
ch,cw = y1-y0,x1-x0
y,x = np.clip(y0,0,max(h-ch,0)),np.clip(x0,0,max(w-cw, 0))
sub = image[y:y+ch,x:x+cw]
try:
r = ndimage.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
if cw > w or ch > h:
pady0, padx0 = max(-y0, 0), max(-x0, 0)
r = ndimage.affine_transform(r, np.eye(2), offset=(pady0, padx0), cval=1, output_shape=(ch, cw))
return r
except RuntimeError:
# workaround for platform differences between 32bit and 64bit
# scipy.ndimage
dtype = sub.dtype
sub = np.array(sub,dtype='float64')
sub = ndimage.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
sub = np.array(sub,dtype=dtype)
return sub
def extract_masked(image,linedesc,pad=5,expand=0):
"""Extract a subimage from the image using the line descriptor.
A line descriptor consists of bounds and a mask."""
y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \
linedesc.bounds[0].stop,linedesc.bounds[1].stop]]
if pad>0:
mask = pad_image(linedesc.mask,pad,cval=0)
else:
mask = linedesc.mask
line = extract(image,y0-pad,x0-pad,y1+pad,x1+pad)
if expand>0:
mask = ndimage.maximum_filter(mask,(expand,expand))
line = np.where(mask,line,np.amax(line))
return line
def glob_all(args):
"""Given a list of command line arguments, expand all of them with glob."""
result = []
for arg in args:
if arg[0]=="@":
with open(arg[1:],"r") as stream:
expanded = stream.read().split("\n")
expanded = [s for s in expanded if s!=""]
else:
expanded = sorted(glob.glob(arg))
if len(expanded)<1:
raise FileNotFoundError("%s: expansion did not yield any files"%arg)
result += expanded
return result
def erode_hlines_and_vlines(binary, scale, args):
# generate the kernels
min_width = int(args.hline_perc * binary.shape[1])
min_height = int(args.vline_perc * binary.shape[0])
hkernel = np.ones((1, min_width), dtype='uint8')
vkernel = np.ones((min_height, 1), dtype='uint8')
bin_copy = copy.deepcopy(binary)
# remove horizontal lines
_binary = cv2.erode(binary, hkernel, iterations=1, borderValue=0, borderType=cv2.BORDER_CONSTANT)
labels, _ = ndimage.label(_binary)
objects = ndimage.find_objects(labels)
extr = 1
for i, b in enumerate(objects):
# extend the found lines half the padding kernel size
y_slc = slice(max(b[0].start-extr,0), min(b[0].stop+extr, binary.shape[0]))
x_slc = slice(max(b[1].start-(min_width//2),0), min(b[1].stop+(min_width//2), binary.shape[1]))
bin_copy[y_slc, x_slc] = 0
# remove vertical lines
_binary = cv2.erode(binary, vkernel, iterations=1, borderValue=0, borderType=cv2.BORDER_CONSTANT)
labels, _ = ndimage.label(_binary)
objects = ndimage.find_objects(labels)
extr = 1
for i, b in enumerate(objects):
# extend the found lines half the padding kernel size
y_slc = slice(max(b[0].start-(min_height//2),0), min(b[0].stop+(min_height//2), binary.shape[0]))
x_slc = slice(max(b[1].start-extr,0), min(b[1].stop+extr, binary.shape[1]))
binary[y_slc, x_slc] = 0
# merge the results
binary = np.array((bin_copy + binary == 2), dtype='uint8')
return binary
def remove_hlines_and_vlines(binary, scale, args):
min_width = int(args.hline_perc * binary.shape[1])
min_height = int(args.vline_perc * binary.shape[0])
labels, _ = ndimage.label(binary)
objects = ndimage.find_objects(labels)
for i, | |
text
:rtype: List[Tuple[str, str]]
"""
pass
@abstractmethod
def _get_normal_text(self, choice) -> List[Tuple[str, str]]:
"""Generate the formatted text for non-hovered choices.
:return: list of formatted text
:rtype: List[Tuple[str, str]]]
"""
pass
@property
def choice_count(self) -> int:
"""Get the choice count.
:return: total count of choices
:rtype: int
"""
return len(self.choices)
@property
def selection(self) -> Dict[str, Any]:
"""Get current selection value.
:return: a dictionary of name and value for the current pointed choice
:rtype: Dict[str, Any]
"""
return self.choices[self.selected_choice_index]
class FakeDocument(NamedTuple):
"""A fake `prompt_toolkit` document class.
Work around to allow non buffer type content_control to use the same
`Validator` class.
"""
text: str
class BaseComplexPrompt(BaseSimplePrompt):
"""A base class to create a complex prompt using `prompt_toolkit` Application.
This class does not create `Layout` nor `Application`, it just contains helper
functions to create a more complex prompt than the `BaseSimplePrompt`.
Use `BaseListPrompt` to create a complex list prompt.
Reference parameters through `BaseListPrompt` or `FuzzyPrompt`.
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
instruction: str = "",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
multiselect: bool = False,
keybindings: Dict[str, List[Dict[str, Union[str, FilterOrBool]]]] = None,
session_result: SessionResult = None,
) -> None:
"""Initialise the Application with Layout and keybindings."""
if not keybindings:
keybindings = {}
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
transformer=transformer,
filter=filter,
invalid_message=invalid_message,
validate=validate,
session_result=session_result,
)
self._content_control: InquirerPyUIControl
self._instruction = instruction
self._invalid_message = invalid_message
self._multiselect = multiselect
self._rendered = False
self._invalid = False
self._application: Application
@Condition
def is_multiselect() -> bool:
return self._multiselect
@Condition
def is_vim_edit() -> bool:
return self._editing_mode == EditingMode.VI
@Condition
def is_invalid() -> bool:
return self._invalid
@Condition
def is_loading() -> bool:
return self.content_control._loading
self._is_multiselect = is_multiselect
self._is_vim_edit = is_vim_edit
self._is_invalid = is_invalid
self._is_loading = is_loading
self._kb_maps = {
"down": [
{"key": "down"},
{"key": "c-n", "filter": ~self._is_vim_edit},
{"key": "j", "filter": self._is_vim_edit},
],
"up": [
{"key": "up"},
{"key": "c-p", "filter": ~self._is_vim_edit},
{"key": "k", "filter": self._is_vim_edit},
],
"toggle": [
{"key": "space"},
],
"toggle-down": [
{"key": Keys.Tab},
],
"toggle-up": [
{"key": Keys.BackTab},
],
"toggle-all": [
{"key": "alt-r"},
],
"toggle-all-true": [
{"key": "alt-a"},
],
"toggle-all-false": [],
**keybindings,
}
self._kb_func_lookup = {
"down": [{"func": self._handle_down}],
"up": [{"func": self._handle_up}],
"toggle": [{"func": self._toggle_choice}],
"toggle-down": [{"func": self._toggle_choice}, {"func": self._handle_down}],
"toggle-up": [{"func": self._toggle_choice}, {"func": self._handle_up}],
"toggle-all": [{"func": self._toggle_all}],
"toggle-all-true": [{"func": self._toggle_all, "args": [True]}],
"toggle-all-false": [{"func": self._toggle_all, "args": [False]}],
}
self._non_multiselect_action = {"down", "up"}
def keybinding_factory(keys, filter, action):
if not isinstance(keys, list):
keys = [keys]
if action not in self._non_multiselect_action:
filter = filter & self._multiselect
@self._register_kb(*keys, filter=filter)
def _(event):
for method in self._kb_func_lookup[action]:
method["func"](*method.get("args", []))
for key, item in self._kb_maps.items():
for kb in item:
keybinding_factory(kb["key"], kb.get("filter", True), key)
@self._register_kb("enter")
def _(event):
self._handle_enter(event)
def _register_kb(
self, *keys: Union[Keys, str], filter: FilterOrBool = True
) -> Callable[[KeyHandlerCallable], KeyHandlerCallable]:
"""Decorate keybinding registration function.
Ensure that invalid state is cleared on next
keybinding entered.
"""
def decorator(func: KeyHandlerCallable) -> KeyHandlerCallable:
@self.register_kb(*keys, filter=filter)
def executable(event):
if self._invalid:
self._invalid = False
func(event)
return executable
return decorator
def _after_render(self, _) -> None:
"""Render callable choices.
Forcing a check on `self._rendered` as this event is fired up on each
render, we only want this to fire up once.
"""
if not self._rendered:
self._rendered = True
if self.content_control._choice_func:
self.content_control._retrieve_choices()
def _get_prompt_message(self) -> List[Tuple[str, str]]:
"""Get the prompt message.
:return: list of formatted text
:rtype: List[Tuple[str, str]]
"""
pre_answer = ("class:instruction", " %s" % self.instruction)
post_answer = ("class:answer", " %s" % self.status["result"])
return super()._get_prompt_message(pre_answer, post_answer)
def execute(self, raise_keyboard_interrupt: bool = True) -> Any:
"""Execute the application and get the result.
:param raise_keyboard_interrupt: raise kbi exception when user hit 'c-c'
:type raise_keyboard_interrupt: bool
:return: user selected value
:rtype: Any
"""
result = self.application.run()
if result == INQUIRERPY_KEYBOARD_INTERRUPT:
if raise_keyboard_interrupt and not os.getenv(
"INQUIRERPY_NO_RAISE_KBI", False
):
raise KeyboardInterrupt
else:
result = None
if not self._filter:
return result
return self._filter(result)
@property
def instruction(self) -> str:
"""Instruction to display next to question.
:return: instruction text
:rtype: str
"""
return self._instruction
@property
def content_control(self) -> InquirerPyUIControl:
"""Get the content controller object.
Needs to be an instance of InquirerPyUIControl.
"""
if not self._content_control:
raise NotImplementedError
return self._content_control
@content_control.setter
def content_control(self, value: InquirerPyUIControl) -> None:
"""Setter of content_control."""
self._content_control = value
@property
def result_name(self) -> Any:
"""Get the result name of the application.
In multiselect scenario, return result as a list.
"""
if self._multiselect:
return [choice["name"] for choice in self.selected_choices]
else:
return self.content_control.selection["name"]
@property
def result_value(self) -> Any:
"""Get the result value of the application.
In multiselect scenario, return result as a list.
"""
if self._multiselect:
return [choice["value"] for choice in self.selected_choices]
else:
try:
return self.content_control.selection["value"]
except IndexError:
return ""
@property
def selected_choices(self) -> List[Any]:
"""Get all user selected choices.
:return: list of selected/enabled choices
:rtype: List[Any]
"""
def filter_choice(choice):
return not isinstance(choice, Separator) and choice["enabled"]
return list(filter(filter_choice, self.content_control.choices))
@property
def application(self) -> Application:
"""Get application.
Require `self._application` to be defined since this class
doesn't implement `Layout` and `Application`.
"""
if not self._application:
raise NotImplementedError
return self._application
@application.setter
def application(self, value: Application) -> None:
"""Setter for `self._application`."""
self._application = value
@abstractmethod
def _handle_enter(self, event) -> None:
"""Handle event when user input enter key."""
pass
@abstractmethod
def _handle_down(self) -> None:
"""Handle event when user attempting to move down."""
pass
@abstractmethod
def _handle_up(self) -> None:
"""Handle event when user attempting to move down."""
pass
@abstractmethod
def _toggle_choice(self) -> None:
"""Handle event when user attempting to toggle the state of the chocie."""
pass
@abstractmethod
def _toggle_all(self, value: bool) -> None:
"""Handle event when user attempting to alter the state of all choices."""
pass
class BaseListPrompt(BaseComplexPrompt):
"""A base class to create a complex prompt using `prompt_toolkit` Application.
Consists of 2 horizontally splitted Window with one being the question and the second
window responsible to dynamically generate the content.
Upon entering the answer, update the first window's formatted text.
:param message: question to display to the user
:type message: Union[str, Callable[[SessionResult], str]]
:param style: style to apply to the prompt
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param qmark: question mark to display
:type qmark: str
:param instruction: instruction to display after the question message
:type instruction: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[Any], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[Any], Any]
:param height: preferred height of the choice window
:type height: Union[str, int]
:param max_height: max height choice window should reach
:type max_height: Union[str, int]
:param validate: a callable or Validator instance to validate user selection
:type validate: Union[Callable[[Any], bool], Validator]
:param invalid_message: message to display when input is invalid
:type invalid_message: str
:param multiselect: enable multiselect mode
:type multiselect: bool
:param keybindings: custom keybindings to apply
:type keybindings: Dict[str, List[Dict[str, Union[str, FilterOrBool]]]]
:param show_cursor: display cursor at the end of the prompt
:type show_cursor: bool
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
instruction: str = "",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
height: Union[int, str] = None,
max_height: Union[int, str] = None,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
multiselect: bool = False,
keybindings: Dict[str, List[Dict[str, Union[str, FilterOrBool]]]] = None,
show_cursor: bool = True,
session_result: SessionResult = None,
) -> None:
"""Initialise the Application with Layout and keybindings."""
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
transformer=transformer,
filter=filter,
invalid_message=invalid_message,
validate=validate,
multiselect=multiselect,
instruction=instruction,
keybindings=keybindings,
session_result=session_result,
)
self._dimmension_height, self._dimmension_max_height = calculate_height(
height, max_height
)
self.layout = HSplit(
[
Window(
height=LayoutDimension.exact(1),
content=FormattedTextControl(
self._get_prompt_message_with_cursor
if show_cursor
else self._get_prompt_message,
show_cursor=show_cursor,
),
),
ConditionalContainer(
Window(
content=self.content_control,
height=Dimension(
max=self._dimmension_max_height,
preferred=self._dimmension_height,
),
dont_extend_height=True,
),
filter=~IsDone() & ~self._is_loading,
),
ConditionalContainer(
Window(FormattedTextControl([("", "")])),
filter=~IsDone(), # force validation bar to stay bottom
),
ConditionalContainer(
Window(
FormattedTextControl(
[
(
"class:validation-toolbar",
self._invalid_message,
)
]
),
dont_extend_height=True,
| |
del self.__virtual_pins[(str2pin_dict['subcomponent_str'], str2pin_dict['subcomponent_pin_str'])]
net = self.__pin_connectivity[pin]
self.__net_connectivity[net].remove(pin)
del self.__pin_connectivity[pin]
def disconnect_bus(self, pinbus_str):
if not isinstance(pinbus_str, str):
raise Exception('pinbus_str should be a string')
# TODO: need to implement disconnect primary busses
str2pin_dict = self.__2pinbus(pinbus_str)
# disconnect bus
pinbus = str2pin_dict['pinbus']
inst = str2pin_dict['subcomponent_str'] if str2pin_dict['is_pin_of_subcomponent'] else pinbus_str
for p in pinbus.all_bits():
self.disconnect(inst + Component.PIN_SEPARATOR + p.get_object_name())
def connect_bus(self, netbus, pinbus_str):
if not isinstance(pinbus_str, str):
raise Exception('pinbus_str should be a string')
netbus = self.__2netbus(netbus) # get Bus object if it was string(its name)
str2pin_dict = self.__2pinbus(pinbus_str)
# Errors
if not str2pin_dict['is_pin_of_subcomponent'] and str2pin_dict['netbus'] == 'not found':
raise Exception('cannot find netbus [' + pinbus_str + '] in component')
# connect bus
pinbus = str2pin_dict['pinbus']
netbus = str2pin_dict['netbus'] if not str2pin_dict['is_pin_of_subcomponent'] else netbus
if netbus.width() == pinbus.width():
for n, p in zip(netbus.all_bits(), pinbus.all_bits()):
if not str2pin_dict['is_pin_of_subcomponent']:
self.connect(n.get_object_name(), p.get_object_name())
else:
self.connect(n.get_object_name(), str2pin_dict['subcomponent_str'] + Component.PIN_SEPARATOR + p.get_object_name())
else:
raise Exception('bus widths does not match')
def connect_nets(self, input_net, output_net):
from .componentVIRT import ComponentVIRT
input_net = self.__2net(input_net)
output_net = self.__2net(output_net)
name = Component.VIRTUAL_COMP_NAME + str(Component.__count_VIRT_comps)
self.add_subcomponent(ComponentVIRT(name, self, input_net, output_net), name)
self.connect(input_net, name + Component.PIN_SEPARATOR + 'I')
self.connect(output_net, name + Component.PIN_SEPARATOR + 'O')
Component.__count_VIRT_comps += 1
def connect_netbusses(self, input_netbus, output_netbus):
input_netbus = self.__2netbus(input_netbus)
output_netbus = self.__2netbus(output_netbus)
if input_netbus.width() != output_netbus.width():
raise Exception('input_netbus and output_netbus should have the same width')
for input_net, output_net in zip(input_netbus.all_bits(), output_netbus.all_bits()):
self.connect_nets(input_net, output_net)
def __2net(self, net): # return net from net_str/Net
if isinstance(net, str):
try:
return self.get_net(net)
except:
raise Exception('cannot find net [' + net + '] in component')
elif isinstance(net, Net):
try:
if net is self.get_net(net.get_object_name()):
return net
else:
raise
except:
raise Exception('cant find net [' + net.get_object_name() + '] in component')
else:
raise Exception('net should be string or Net')
def __2netbus(self, netbus): # return netbus from net_str/Bus
if isinstance(netbus, str):
try:
return self.get_netbus(netbus)
except:
raise Exception('cannot find netbus [' + netbus + '] in component')
elif isinstance(netbus, Bus):
try:
if netbus is self.get_netbus(netbus.get_name()):
return netbus
else:
raise
except:
raise Exception('cannot find netbus [' + netbus.get_name() + '] in component')
else:
raise Exception('netbus should be string or Bus')
def __2pin(self, pin):
# method that gets pin and return it as a Pin (or virtual pin)
str2pin_dict = {}
if isinstance(pin, str):
lpin = pin.split(Component.PIN_SEPARATOR) # like nand.A
if len(lpin) == 1: # pin of component
str2pin_dict['is_pin_of_subcomponent'] = False
try:
str2pin_dict['pin'] = self.get_pin(pin)
except:
raise Exception('cannot find pin [' + pin + '] in component')
elif len(lpin) == 2: # pin of sub-component
inst, inst_pin = lpin # lpin[0]=instance name, lpin[1]=pin name
str2pin_dict['is_pin_of_subcomponent'] = True
str2pin_dict['subcomponent_str'] = inst
str2pin_dict['subcomponent_pin_str'] = inst_pin
sub = self.get_subcomponent(inst)
try:
str2pin_dict['pin'] = sub.get_pin(inst_pin)
except:
raise Exception('pin [' + inst_pin + '] not found in sub-component [' + inst + ']')
# Virtual pin ("pointer" to the actual pin(that inside sub-component))
if (inst, inst_pin) not in self.__virtual_pins: # for connect method
str2pin_dict['already_connected'] = False
virtual_pin = Pin(inst+Component.PIN_SEPARATOR+inst_pin, is_virtual=True, associated_comp=sub, associated_pin=str2pin_dict['pin'])
self.__virtual_pins[(inst, inst_pin)] = virtual_pin
str2pin_dict['pin'] = virtual_pin
else:
str2pin_dict['already_connected'] = True
str2pin_dict['pin'] = self.__virtual_pins[(inst, inst_pin)]
else: # too deep
raise Exception(pin + ' is too deep')
elif isinstance(pin, Pin):
if not pin.is_virtual():
str2pin_dict['is_pin_of_subcomponent'] = False
str2pin_dict['pin'] = pin
else:
lpin = pin.get_object_name().split(Component.PIN_SEPARATOR)
if len(lpin) != 2:
raise Exception(pin.get_object_name() + ' is too deep')
inst, inst_pin = lpin
str2pin_dict['is_pin_of_subcomponent'] = True
str2pin_dict['subcomponent_str'] = inst
str2pin_dict['subcomponent_pin_str'] = inst_pin
if (inst, inst_pin) in self.__virtual_pins:
str2pin_dict['already_connected'] = True
str2pin_dict['pin'] = pin
else:
raise Exception('got virtual pin that doesnt in virtual_pins of component') # add to virtual_pins instead?
else:
raise Exception('pin should be a string or Pin')
return str2pin_dict
def __2pinbus(self, pinbus):
# method that gets pinbus and return it as a Pin(Bus)
str2pin_dict = {}
if isinstance(pinbus, str):
lpin = pinbus.split(Component.PIN_SEPARATOR) # like nand.A
if len(lpin) == 1: # pin of component
str2pin_dict['is_pin_of_subcomponent'] = False
try:
str2pin_dict['pinbus'] = self.get_pinbus(pinbus)
except:
raise Exception('cannot find pinbus [' + pinbus + '] in component')
try:
str2pin_dict['netbus'] = self.get_netbus(pinbus)
except:
str2pin_dict['netbus'] = 'not found'
elif len(lpin) == 2: # pin of sub-component
inst, inst_pin = lpin # lpin[0]=instance name, lpin[1]=pin name
str2pin_dict['is_pin_of_subcomponent'] = True
str2pin_dict['subcomponent_str'] = inst
str2pin_dict['subcomponent_pin_str'] = inst_pin
sub = self.get_subcomponent(inst)
try:
str2pin_dict['pinbus'] = sub.get_pinbus(inst_pin)
except:
raise Exception('pinbus [' + inst_pin + '] not found in sub-component [' + inst + ']')
else: # too deep
raise Exception(pinbus + ' is too deep')
elif isinstance(pinbus, Bus):
str2pin_dict['pinbus'] = pinbus
else:
raise Exception('pinbus should be a string or Bus')
return str2pin_dict
def count_instances(self):
# first count the module itself
count = {self.get_object_name(): 1}
# then add the sub-counts from submodules
for inst, dev in self.get_subcomponents():
sub_count = dev.count_instances()
for m in sub_count:
if m in count:
count[m] += sub_count[m]
else:
count[m] = sub_count[m]
return count
def copy(self, copy_name):
# use `new_dev = Component(copy_name, original=old_dev)` instead
if copy_name in Component.__global_components:
raise Exception('component ['+copy_name+'] already exists')
new_dev = copy(self)
new_dev.set_object_name(copy_name)
Component.__global_components[copy_name] = new_dev
return new_dev
def deepcopy(self, copy_name):
# use `new_dev = Component(copy_name, original=old_dev)` instead
if copy_name in Component.__global_components:
raise Exception('component ['+copy_name+'] already exists')
new_dev = deepcopy(self)
new_dev.set_object_name(copy_name)
Component.__global_components[copy_name] = new_dev
return new_dev
def set_dont_uniq(self, val):
self.__dont_uniq = val
def get_dont_uniq(self):
return self.__dont_uniq
def set_dont_write_verilog(self, val):
self.__dont_write_verilog = val
def get_dont_write_verilog(self):
return self.__dont_write_verilog
def set_is_physical(self, val):
self.__is_physical = val
def get_is_physical(self):
return self.__is_physical
def set_is_sequential(self, val):
self.__is_sequential = val
def get_is_sequential(self):
return self.__is_sequential
def uniq(self, count=None, numbering=None):
# uniqueness instances in component, e.g. every instance of component will be uniq(new component)
# topmost component counts, all others should get count from above
if not count:
count = self.count_instances()
# same as count, the one that counts should setup the numbering
if not numbering:
numbering = {}
for k in count:
numbering[k] = 0
# make sure k_# not already used
while k+"_"+str(numbering[k]) in Component.__global_components:
numbering[k] +=1
for inst in sorted(self.subcomponent_names(), key=slm_sort()):
dev = self.get_subcomponent(inst)
devname = dev.get_object_name()
if count[devname]>1 and not dev.get_dont_uniq():
new_devname = devname+"_"+str(numbering[devname])
new_dev = type(self)(new_devname, dev)
self.__subcomponents.update({inst: new_devname})
# self.add_subcomponent(inst, new_dev)
# the new devname is missing from count dictionary
count[new_devname] = 1
# increment numbering to next available number
while True:
numbering[devname] += 1
next_devname_uniq = devname+"_"+str(numbering[devname])
if next_devname_uniq not in Component.__global_components:
break
dev = new_dev
dev.uniq(count, numbering)
def verilog_port_list(self):
return [x for x in [pl.verilog_port_list() for pl in self.get_pins()] if x is not None]
def __is_inst_bus_connected_any(self, inst, inst_bus): # check if inst_bus is connected, at least 1 bit
count_bits_uc = 0
for inst_pin in inst_bus.all_bits():
if (inst, inst_pin.get_object_name()) not in self.__virtual_pins:
count_bits_uc += 1 # count bits that not connected
return not count_bits_uc == inst_bus.width() # true if at least 1 bit unconnected
def legalize(self):
# check if the component is legal by checking some rules
self_instantiation = self.check_self_instantiations()
if self_instantiation:
raise Exception('self instantiations: "{}" cant have subcomponent of itself '
'(either direct or a subcomponent of a subcomponent)'.format(self_instantiation))
self.check_if_instances_not_exist()
asymmetry_name = self.check_pins_nets_names_asymmetry()
if asymmetry_name:
raise Exception('asymmetry name "{}" - cant hold net/pin with a different name from his'.format(asymmetry_name))
duplicate_name = self.check_duplicate_names()
if duplicate_name:
raise Exception('duplicate name "{}" - is a name of instance and also a wire'.format(duplicate_name))
multidriven = self.check_multidriven()
if multidriven:
raise Exception('net "{}" is driven by multiple drivers: {}'.format(multidriven[0], multidriven[1]))
self.connect_half_unconnected_pinbusses()
def check_multidriven(self):
# check if net is driven by more then 2 Input, Output of subcomponent or assignment from net
for net, pins in self.__net_connectivity.items():
drivers = []
for p in pins:
if type(p) == Input or (p.is_virtual() and type(p.get_associated_pin()) == Output):
if p.is_virtual() and p.get_associated_comp().is_virtual(): # check if its assignment from net
drivers.append(p.get_associated_comp().input_net.get_object_name())
else:
drivers.append(p.get_object_name())
if len(drivers) > 1:
return net, drivers
def check_duplicate_names(self):
# check if there is an instance and wire with the same name
instances = [*self.subcomponent_names(), self.get_object_name()]
wires = self.net_names()
for inst in instances:
if inst in wires:
return inst
return None
def check_pins_nets_names_asymmetry(self):
# check if names of pin/net/bus in self.__nets/pins/bus is the same as their actual names
items = list(self.__pins.items())+list(self.__nets.items())+list(self.__pinbusses.items())+list(self.__netbusses.items())
for str_, obj in items:
if str_ != obj.get_object_name():
return obj
def check_if_instances_not_exist(self):
self.get_descendants()
| |
LsGermsStruct to a list if needed:
validStructTypes = (_objs.LsGermsStructure, _objs.LsGermsSerialStructure)
if isinstance(circuit_structs, validStructTypes):
master = circuit_structs
circuit_structs = [master.truncate(Ls=master.Ls[0:i + 1])
for i in range(len(master.Ls))]
nested = True # (by this construction)
super().__init__([s.allstrs for s in circuit_structs], None, qubit_labels, nested)
self.circuit_structs = circuit_structs
self.auxfile_types['circuit_structs'] = 'pickle'
class CombinedExperimentDesign(ExperimentDesign): # for multiple designs on the same dataset
"""
An experiment design that combines the specifications of
one or more "sub-designs". The sub-designs are preserved as children under
the :class:`CombinedExperimentDesign` instance, creating a "data-tree" structure. The
:class:`CombinedExperimentDesign` object itself simply merges all of the circuit lists.
"""
def __init__(self, sub_designs, all_circuits=None, qubit_labels=None, sub_design_dirs=None,
interleave=False, category='EdesignBranch'):
"""
Create a new CombinedExperimentDesign object.
Parameters
----------
sub_designs : dict or list
A dictionary of other :class:`ExperimentDesign` objects whose keys
are names for each sub-edesign (used for directories and to index
the sub-edesigns from this experiment design). If a list is given instead,
a default names of the form "**<number>" are used.
all_circuits : list, optional
A list of :class:`Circuit`s, specifying all the circuits needing
data. This can include additional circuits that are not in any
of `sub_designs`. By default, the union of all the circuits in
the sub-designs is used.
qubit_labels : tuple, optional
The qubits that this experiment design applies to. If None, the line labels
of the first circuit is used.
sub_design_dirs : dict, optional
A dictionary whose values are directory names and keys are sub-edesign
names (the same as the keys of `sub_designs`). If None, then the
keys of `sub_designs` must be strings and are used as directory
names. Directory names are used when saving the object (via
:method:`write`).
category : str, optional
The category that describes the sub-edesigns of this object. This
is used as a heading for the keys of `sub_designs`.
Returns
-------
CombinedExperimentDesign
"""
if not isinstance(sub_designs, dict):
sub_designs = {("**%d" % i): des for i, des in enumerate(sub_designs)}
if all_circuits is None:
all_circuits = []
if not interleave:
for des in sub_designs.values():
all_circuits.extend(des.all_circuits_needing_data)
else:
raise NotImplementedError("Interleaving not implemented yet")
_lt.remove_duplicates_in_place(all_circuits) # Maybe don't always do this?
if qubit_labels is None and len(sub_designs) > 0:
first = sub_designs[list(sub_designs.keys())[0]].qubit_labels
if any([des.qubit_labels != first for des in sub_designs.values()]):
qubit_labels = "multiple"
else:
qubit_labels = first
super().__init__(all_circuits, qubit_labels, sub_designs, sub_design_dirs, category)
def create_subdata(self, sub_name, dataset):
"""
Creates a :class:`ProtocolData` object for the sub-experiment-design
given by `subdata_name` starting from `dataset` as the data for *this*
experiment design. This is used internally by :class:`ProtocolData`
objects, and shouldn't need to be used by external users.
"""
sub_circuits = self[sub_name].all_circuits_needing_data
truncated_ds = dataset.truncate(sub_circuits) # maybe have filter_dataset also do this?
return ProtocolData(self[sub_name], truncated_ds)
class SimultaneousExperimentDesign(ExperimentDesign):
"""
An experiment design whose circuits are the tensor-products
of the circuits from one or more :class:`ExperimentDesign` objects that
act on disjoint sets of qubits. The sub-designs are preserved as children under
the :class:`SimultaneousExperimentDesign` instance, creating a "data-tree" structure.
"""
#@classmethod
#def from_tensored_circuits(cls, circuits, template_edesign, qubit_labels_per_edesign):
# pass #Useful??? - need to break each circuit into different parts
# based on qubits, then copy (?) template edesign and just replace itself
# all_circuits_needing_data member?
def __init__(self, edesigns, tensored_circuits=None, qubit_labels=None, category='Qubits'):
"""
Create a new SimultaneousExperimentDesign object.
Parameters
----------
edesigns : list
A list of :class:`ExperimentDesign` objects whose circuits
are to occur simultaneously.
tensored_circuits : list, optional
A list of all the circuits for this experiment design. By default,
these are the circuits of those in `edesigns` tensored together.
Typically this is left as the default.
qubit_labels : tuple, optional
The qubits that this experiment design applies to. If None, the
concatenated qubit labels of `edesigns` are used (this is usually
what you want).
category : str, optional
The category name for the qubit-label-tuples correspoding to the
elements of `edesigns`.
Returns
-------
SimultaneousExperimentDesign
"""
#TODO: check that sub-designs don't have overlapping qubit_labels
assert(not any([des.qubit_labels == "multiple" for des in edesigns])), \
"SimultaneousExperimentDesign requires sub-designs with definite qubit_labels, not 'multiple'"
if qubit_labels is None:
qubit_labels = tuple(_itertools.chain(*[des.qubit_labels for des in edesigns]))
if tensored_circuits is None:
#Build tensor product of circuits
tensored_circuits = []
circuits_per_edesign = [des.all_circuits_needing_data[:] for des in edesigns]
#Pad shorter lists with None values
maxLen = max(map(len, circuits_per_edesign))
for lst in circuits_per_edesign:
if len(lst) < maxLen: lst.extend([None] * (maxLen - len(lst)))
def PAD(subcs):
maxLen = max([len(c) if (c is not None) else 0 for c in subcs])
padded = []
for c in subcs:
if c is not None and len(c) < maxLen:
cpy = c.copy(editable=True)
cpy.insert_idling_layers(None, maxLen - len(cpy))
cpy.done_editing()
padded.append(cpy)
else:
padded.append(c)
assert(all([len(c) == maxLen for c in padded if c is not None]))
return padded
padded_circuit_lists = [list() for des in edesigns]
for subcircuits in zip(*circuits_per_edesign):
c = _cir.Circuit(num_lines=0, editable=True) # Creates a empty circuit over no wires
padded_subcircuits = PAD(subcircuits)
for subc in padded_subcircuits:
if subc is not None:
c.tensor_circuit(subc)
c.line_labels = qubit_labels
c.done_editing()
tensored_circuits.append(c)
for lst, subc in zip(padded_circuit_lists, padded_subcircuits):
if subc is not None: lst.append(subc)
for des, padded_circuits in zip(edesigns, padded_circuit_lists):
des.set_actual_circuits_executed(padded_circuits)
sub_designs = {des.qubit_labels: des for des in edesigns}
sub_design_dirs = {qlbls: '_'.join(map(str, qlbls)) for qlbls in sub_designs}
super().__init__(tensored_circuits, qubit_labels, sub_designs, sub_design_dirs, category)
def create_subdata(self, qubit_labels, dataset):
"""
Creates a :class:`ProtocolData` object for the sub-experiment-design
given by `subdata_name` starting from `dataset` as the data for *this*
experiment design. This is used internally by :class:`ProtocolData`
objects, and shouldn't need to be used by external users.
"""
if isinstance(dataset, _objs.MultiDataSet):
raise NotImplementedError("SimultaneousExperimentDesigns don't work with multi-pass data yet.")
all_circuits = self.all_circuits_needing_data
qubit_ordering = all_circuits[0].line_labels # first circuit in *this* edesign determines qubit order
qubit_index = {qlabel: i for i, qlabel in enumerate(qubit_ordering)}
sub_design = self[qubit_labels]
qubit_indices = [qubit_index[ql] for ql in qubit_labels] # order determined by first circuit (see above)
filtered_ds = _cnst.filter_dataset(dataset, qubit_labels, qubit_indices) # Marginalize dataset
if sub_design.alt_actual_circuits_executed:
actual_to_desired = _collections.defaultdict(lambda: None)
actual_to_desired.update({actual: desired for actual, desired in
zip(sub_design.alt_actual_circuits_executed,
sub_design.all_circuits_needing_data)})
filtered_ds = filtered_ds.copy_nonstatic()
filtered_ds.process_circuits(lambda c: actual_to_desired[c], aggregate=False)
filtered_ds.done_adding_data()
return ProtocolData(sub_design, filtered_ds)
class ProtocolData(_TreeNode):
"""
A :class:`ProtocolData` object represents the experimental data needed to
run one or more QCVV protocols. This class contains a :class:`ProtocolIput`,
which describes a set of circuits, and a :class:`DataSet` (or :class:`MultiDataSet`)
that holds data for these circuits. These members correspond to the `.edesign`
and `.dataset` attributes.
"""
@classmethod
def from_dir(cls, dirname, parent=None, name=None):
"""
Initialize a new ProtocolData object from `dirname`.
Parameters
----------
dirname : str
The *root* directory name (under which there are 'edesign'
and 'data' subdirectories).
Returns
-------
ProtocolData
"""
p = _pathlib.Path(dirname)
edesign = parent.edesign[name] if parent and name else \
_io.load_edesign_from_dir(dirname)
data_dir = p / 'data'
#with open(data_dir / 'meta.json', 'r') as f:
# meta = _json.load(f)
#Load dataset or multidataset based on what files exist
dataset_files = sorted(list(data_dir.glob('*.txt')))
if len(dataset_files) == 0: # assume same dataset as parent
if parent is None: parent = ProtocolData.from_dir(dirname / '..')
dataset = parent.dataset
elif len(dataset_files) == 1 and dataset_files[0].name == 'dataset.txt': # a single dataset.txt file
dataset = _io.load_dataset(dataset_files[0], verbosity=0)
else:
raise NotImplementedError("Need to implement MultiDataSet.init_from_dict!")
dataset = _objs.MultiDataSet.init_from_dict(
{pth.name: _io.load_dataset(pth, verbosity=0) for pth in dataset_files})
cache = _io.read_json_or_pkl_files_to_dict(data_dir / 'cache')
ret = cls(edesign, dataset, cache)
ret._init_children(dirname, 'data') # loads child nodes
return ret
def __init__(self, edesign, dataset=None, cache=None):
"""
Create a new ProtocolData object.
Parameters
----------
edesign : ExperimentDesign
The experiment design describing what circuits this object
contains data for. If None, then an unstructured
:class:`ExperimentDesign` is created containing the circuits
present in `dataset`.
dataset : DataSet or MultiDataSet, optional
The data counts themselves.
cache : dict, optional
A cache of values which holds values derived *only* from
the experiment design and data in this object.
Returns
-------
ProtocolData
"""
self.edesign = edesign
self.dataset = dataset # MultiDataSet allowed for multi-pass data; None also allowed.
self.cache = cache if (cache is not None) else {}
if isinstance(self.dataset, _objs.MultiDataSet):
for dsname in self.dataset:
if dsname not in self.cache: self.cache[dsname] = {} # create separate caches for each pass
self._passdatas = {dsname: | |
announce for low level players
if int(death_level) < announce_threshold:
return
if char is None:
if char_name is None:
log.error("announce_death: no character or character name passed.")
return
char = yield from get_character(char_name)
if type(char) is not dict:
log.warning("announce_death: couldn't fetch character (" + char_name + ")")
return
log.info("Announcing death: {0}({1}) | {2}".format(char["name"], death_level, death_killer))
# Get correct pronouns
pronoun = get_pronouns(char["gender"])
# Find killer article (a/an)
death_killer_article = ""
if not death_by_player:
death_killer_article = death_killer.split(" ", 1)
if death_killer_article[0] in ["a", "an"] and len(death_killer_article) > 1:
death_killer = death_killer_article[1]
death_killer_article = death_killer_article[0]+" "
else:
death_killer_article = ""
# Select a message
# Todo: Add levels lost to weighedChoice, is always 0 or greater.
message = weighedChoice(deathmessages_player, char['vocation'], int(death_level)) if death_by_player else weighedChoice(deathmessages_monster, char['vocation'], int(death_level), death_killer)
# Format message with death information
deathInfo = {'charName': char["name"], 'deathLevel': death_level, 'deathKiller': death_killer,
'deathKillerArticle': death_killer_article, 'pronoun1': pronoun[0], 'pronoun2': pronoun[1],
'pronoun3': pronoun[2]}
message = message.format(**deathInfo)
# Format extra stylization
message = formatMessage(message)
message = EMOJI[":skull_crossbones:"] + " " + message
for server_id, tracked_world in tracked_worlds.items():
server = bot.get_server(server_id)
if char["world"] == tracked_world and server is not None \
and server.get_member(str(char["owner_id"])) is not None:
yield from bot.send_message(get_announce_channel(bot, server), message[:1].upper()+message[1:])
@asyncio.coroutine
def announce_level(bot, new_level, char_name=None, char=None):
"""Announces a level up on corresponding servers
One of these must be passed:
char is a character dictionary
char_name is a character's name
If char_name is passed, the character is fetched here."""
# Don't announce low level players
if int(new_level) < announce_threshold:
return
if char is None:
if char_name is None:
log.error("announce_level: no character or character name passed.")
return
char = yield from get_character(char_name)
if type(char) is not dict:
log.warning("announce_level: couldn't fetch character (" + char_name + ")")
return
log.info("Announcing level up: {0} ({1})".format(char["name"], new_level))
# Get pronouns based on gender
pronoun = get_pronouns(char['gender'])
# Select a message
message = weighedChoice(levelmessages, char['vocation'], int(new_level))
# Format message with level information
level_info = {'charName': char["name"], 'newLevel': new_level, 'pronoun1': pronoun[0], 'pronoun2': pronoun[1],
'pronoun3': pronoun[2]}
message = message.format(**level_info)
# Format extra stylization
message = formatMessage(message)
message = EMOJI[":star2:"]+" "+message
for server_id, tracked_world in tracked_worlds.items():
server = bot.get_server(server_id)
if char["world"] == tracked_world and server is not None \
and server.get_member(str(char["owner_id"])) is not None:
yield from bot.send_message(get_announce_channel(bot, server), message)
# Bot commands
@bot.command(pass_context=True, aliases=["commands"])
@asyncio.coroutine
def help(ctx, *commands: str):
"""Shows this message."""
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
bot = ctx.bot
destination = ctx.message.channel if ctx.message.channel.name == ask_channel_name else ctx.message.author
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
destination = ctx.message.channel if command.no_pm else destination
pages = bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(key))
return
except AttributeError:
yield from bot.send_message(destination, bot.command_has_no_subcommands.format(command, key))
return
pages = bot.formatter.format_help_for(ctx, command)
for page in pages:
yield from bot.send_message(destination, page)
@bot.command(pass_context=True, description='For when you wanna settle the score some other way')
@asyncio.coroutine
def choose(ctx, *choices: str):
"""Chooses between multiple choices."""
if choices is None:
return
user = ctx.message.author
yield from bot.say('Alright, **@{0}**, I choose: "{1}"'.format(user.display_name, random.choice(choices)))
@bot.command(pass_context=True, aliases=["i'm", "iam"])
@checks.is_not_lite()
@asyncio.coroutine
def im(ctx, *, char_name: str):
"""Lets you add your tibia character(s) for the bot to track.
If you need to add any more characters or made a mistake, please message an admin."""
# This is equivalent to someone using /stalk addacc on themselves.
# If im_new_only is True it will only work on users who have no characters added to their account.
user = ctx.message.author
# List of servers the user shares with the bot
user_servers = get_user_servers(bot, user.id)
# List of Tibia worlds tracked in the servers the user is
user_tibia_worlds = [world for server, world in tracked_worlds.items() if server in [s.id for s in user_servers]]
# Remove duplicate entries from list
user_tibia_worlds = list(set(user_tibia_worlds))
if not ctx.message.channel.is_private and tracked_worlds.get(ctx.message.server.id) is None:
yield from bot.say("This server is not tracking any tibia worlds.")
return
if len(user_tibia_worlds) == 0:
return
c = userDatabase.cursor()
try:
valid_mods = []
for id in (owner_ids + mod_ids):
mod = get_member(bot, id, ctx.message.server)
if mod is not None:
valid_mods.append(mod.mention)
admins_message = join_list(valid_mods, ", ", " or ")
yield from bot.send_typing(ctx.message.channel)
char = yield from get_character(char_name)
if type(char) is not dict:
if char == ERROR_NETWORK:
yield from bot.say("I couldn't fetch the character, please try again.")
elif char == ERROR_DOESNTEXIST:
yield from bot.say("That character doesn't exists.")
return
chars = char['chars']
# If the char is hidden,we still add the searched character, if we have just one, we replace it with the
# searched char, so we don't have to look him up again
if len(chars) == 0 or len(chars) == 1:
chars = [char]
skipped = []
updated = []
added = []
existent = []
for char in chars:
if char["world"] not in user_tibia_worlds:
skipped.append(char)
continue
c.execute("SELECT name, user_id as owner FROM chars WHERE name LIKE ?", (char["name"],))
db_char = c.fetchone()
if db_char is not None:
owner = get_member(bot, db_char["owner"])
# Previous owner doesn't exist anymore
if owner is None:
updated.append({'name': char['name'], 'world': char['world'], 'prevowner': db_char["owner"],
'guild': char.get("guild", "No guild")})
continue
# Char already registered to this user
elif owner.id == user.id:
existent.append("{name} ({world})".format(**char))
continue
# Character is registered to another user
else:
reply = "Sorry, a character in that account ({0}) is already claimed by **{1.mention}**.\n" \
"Maybe you made a mistake? Or someone claimed a character of yours? " \
"Message {2} if you need help!"
yield from bot.say(reply.format(db_char["name"], owner, admins_message))
return
# If we only have one char, it already contains full data
if len(chars) > 1:
yield from bot.send_typing(ctx.message.channel)
char = yield from get_character(char["name"])
if char == ERROR_NETWORK:
yield from bot.reply("I'm having network troubles, please try again.")
return
if char.get("deleted", False):
skipped.append(char)
continue
char["guild"] = char.get("guild", "No guild")
added.append(char)
if len(skipped) == len(chars):
reply = "Sorry, I couldn't find any characters from the servers I track ({0})."
yield from bot.reply(reply.format(join_list(user_tibia_worlds, ", ", " and ")))
return
reply = ""
log_reply = dict().fromkeys([server.id for server in user_servers], "")
if len(existent) > 0:
reply += "\nThe following characters were already registered to you: {0}" \
.format(join_list(existent, ", ", " and "))
if len(added) > 0:
reply += "\nThe following characters were added to your account: {0}" \
.format(join_list(["{name} ({world})".format(**c) for c in added], ", ", " and "))
for char in added:
log.info("Character {0} was assigned to {1.display_name} (ID: {1.id})".format(char['name'], user))
# Announce on server log of each server
for server in user_servers:
# Only announce on worlds where the character's world is tracked
if tracked_worlds.get(server.id, None) == char["world"]:
log_reply[server.id] += "\n\t{name} - {level} {vocation} - **{guild}**".format(**char)
if len(updated) > 0:
reply += "\nThe following characters were reassigned to you: {0}" \
.format(join_list(["{name} ({world})".format(**c)for c in updated], ", ", " and "))
for char in updated:
log.info("Character {0} was reassigned to {1.display_name} (ID: {1.id})".format(char['name'], user))
# Announce on server log of each server
for server in user_servers:
# Only announce on worlds where the character's world is tracked
if tracked_worlds.get(server.id, None) == char["world"]:
log_reply[server.id] += "\n\t{name} (Reassigned)".format(**char)
for char in updated:
c.execute("UPDATE chars SET user_id = ? WHERE name LIKE ?", (user.id, char['name']))
for char in added:
c.execute(
"INSERT INTO chars (name,last_level,vocation,user_id, world) VALUES (?,?,?,?,?)",
(char['name'], char['level']*-1, char['vocation'], user.id, char["world"])
)
c.execute("INSERT OR IGNORE INTO users (id, name) VALUES (?, ?)", (user.id, user.display_name,))
c.execute("UPDATE users SET name = ? WHERE id = ?", (user.display_name, user.id, ))
yield from bot.reply(reply)
for server_id, message in log_reply.items():
if message:
message = user.mention + " registered the following characters: " + message
| |
parametric position
* a list of float values for evaluation at the multiple parametric positions
The return value will be in the order of the input parametric position list.
This method accepts the following keyword arguments:
* ``normalize``: normalizes the output vector. Default value is *True*.
:param parpos: parametric position(s) where the evaluation will be executed
:type parpos: float, list or tuple
:return: binormal vector as a tuple of the origin point and the vector components
:rtype: tuple
"""
return operations.binormal(self, parpos, **kwargs)
@utl.export
class Surface(abstract.Surface):
""" Data storage and evaluation class for B-spline (non-rational) surfaces.
This class provides the following properties:
* :py:attr:`type` = spline
* :py:attr:`id`
* :py:attr:`order_u`
* :py:attr:`order_v`
* :py:attr:`degree_u`
* :py:attr:`degree_v`
* :py:attr:`knotvector_u`
* :py:attr:`knotvector_v`
* :py:attr:`ctrlpts`
* :py:attr:`ctrlpts_size_u`
* :py:attr:`ctrlpts_size_v`
* :py:attr:`ctrlpts2d`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`bbox`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`vis`
* :py:attr:`evaluator`
* :py:attr:`tessellator`
* :py:attr:`rational`
* :py:attr:`trims`
The following code segment illustrates the usage of Surface class:
.. code-block:: python
:linenos:
from geomdl import BSpline
# Create a BSpline surface instance (Bezier surface)
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 2
# Set control points
control_points = [[0, 0, 0], [0, 4, 0], [0, 8, -3],
[2, 0, 6], [2, 4, 0], [2, 8, 0],
[4, 0, 0], [4, 4, 0], [4, 8, 3],
[6, 0, 0], [6, 4, -3], [6, 8, 0]]
surf.set_ctrlpts(control_points, 4, 3)
# Set knot vectors
surf.knotvector_u = [0, 0, 0, 0, 1, 1, 1, 1]
surf.knotvector_v = [0, 0, 0, 1, 1, 1]
# Set evaluation delta (control the number of surface points)
surf.delta = 0.05
# Get surface points (the surface will be automatically evaluated)
surface_points = surf.evalpts
**Keyword Arguments:**
* ``precision``: number of decimal places to round to. *Default: 18*
* ``normalize_kv``: activates knot vector normalization. *Default: True*
* ``find_span_func``: sets knot span search implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: sets knot insertion implementation. *Default:* :func:`.operations.insert_knot`
* ``remove_knot_func``: sets knot removal implementation. *Default:* :func:`.operations.remove_knot`
Please refer to the :py:class:`.abstract.Surface()` documentation for more details.
"""
# __slots__ = ('_insert_knot_func', '_remove_knot_func', '_control_points2D')
def __init__(self, **kwargs):
super(Surface, self).__init__(**kwargs)
self._evaluator = evaluators.SurfaceEvaluator(find_span_func=self._span_func)
self._tsl_component = tessellate.TriangularTessellate()
self._control_points2D = self._init_array() # control points, 2-D array [u][v]
self._insert_knot_func = kwargs.get('insert_knot_func', operations.insert_knot)
self._remove_knot_func = kwargs.get('remove_knot_func', operations.remove_knot)
@property
def ctrlpts2d(self):
""" 2-dimensional array of control points.
The getter returns a tuple of 2D control points (weighted control points + weights if NURBS) in *[u][v]* format.
The rows of the returned tuple correspond to v-direction and the columns correspond to u-direction.
The following example can be used to traverse 2D control points:
.. code-block:: python
:linenos:
# Create a BSpline surface
surf_bs = BSpline.Surface()
# Do degree, control points and knot vector assignments here
# Each u includes a row of v values
for u in surf_bs.ctrlpts2d:
# Each row contains the coordinates of the control points
for v in u:
print(str(v)) # will be something like (1.0, 2.0, 3.0)
# Create a NURBS surface
surf_nb = NURBS.Surface()
# Do degree, weighted control points and knot vector assignments here
# Each u includes a row of v values
for u in surf_nb.ctrlpts2d:
# Each row contains the coordinates of the weighted control points
for v in u:
print(str(v)) # will be something like (0.5, 1.0, 1.5, 0.5)
When using **NURBS.Surface** class, the output of :py:attr:`~ctrlpts2d` property could be confusing since,
:py:attr:`~ctrlpts` always returns the unweighted control points, i.e. :py:attr:`~ctrlpts` property returns 3D
control points all divided by the weights and you can use :py:attr:`~weights` property to access the weights
vector, but :py:attr:`~ctrlpts2d` returns the weighted ones plus weights as the last element.
This difference is intentionally added for compatibility and interoperability purposes.
To explain this situation in a simple way;
* If you need the weighted control points directly, use :py:attr:`~ctrlpts2d`
* If you need the control points and the weights separately, use :py:attr:`~ctrlpts` and :py:attr:`~weights`
.. note::
Please note that the setter doesn't check for inconsistencies and using the setter is not recommended.
Instead of the setter property, please use :func:`.set_ctrlpts()` function.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the control points as a 2-dimensional array in [u][v] format
:setter: Sets the control points as a 2-dimensional array in [u][v] format
:type: list
"""
return self._control_points2D
@ctrlpts2d.setter
def ctrlpts2d(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("The input must be a list or tuple")
# Clean up the surface and control points
self.reset(evalpts=True, ctrlpts=True)
# Assume that the user has prepared the lists correctly
size_u = len(value)
size_v = len(value[0])
# Estimate dimension by checking the size of the first element
self._dimension = len(value[0][0])
# Make sure that all numbers are float type
ctrlpts = [[] for _ in range(size_u * size_v)]
for u in range(size_u):
for v in range(size_v):
idx = v + (size_v * u)
ctrlpts[idx] = [float(coord) for coord in value[u][v]]
# Set control points
self.set_ctrlpts(ctrlpts, size_u, size_v)
def set_ctrlpts(self, ctrlpts, *args, **kwargs):
""" Sets the control points and checks if the data is consistent.
This method is designed to provide a consistent way to set control points whether they are weighted or not.
It directly sets the control points member of the class, and therefore it doesn't return any values.
The input will be an array of coordinates. If you are working in the 3-dimensional space, then your coordinates
will be an array of 3 elements representing *(x, y, z)* coordinates.
This method also generates 2D control points in *[u][v]* format which can be accessed via :py:attr:`~ctrlpts2d`.
.. note::
The v index varies first. That is, a row of v control points for the first u value is found first.
Then, the row of v control points for the next u value.
:param ctrlpts: input control points as a list of coordinates
:type ctrlpts: list
"""
# Call parent function
super(Surface, self).set_ctrlpts(ctrlpts, *args, **kwargs)
# Generate a 2-dimensional list of control points
array_init2d = kwargs.get('array_init2d', [[[] for _ in range(args[1])] for _ in range(args[0])])
ctrlpts_float2d = array_init2d
for i in range(0, self.ctrlpts_size_u):
for j in range(0, self.ctrlpts_size_v):
ctrlpts_float2d[i][j] = self._control_points[j + (i * self.ctrlpts_size_v)]
# Set the new 2-dimension control points
self._control_points2D = ctrlpts_float2d
def reset(self, **kwargs):
""" Resets control points and/or evaluated points.
Keyword Arguments:
* ``evalpts``: if True, then resets evaluated points
* ``ctrlpts`` if True, then resets control points
"""
# Call parent function
super(Surface, self).reset(**kwargs)
# Reset ctrlpts2d
reset_ctrlpts = kwargs.get('ctrlpts', False)
if reset_ctrlpts:
self._control_points2D = self._init_array()
def save(self, file_name):
""" Saves the surface as a pickled file.
.. deprecated:: 5.2.4
Use :func:`.exchange.export_json()` instead.
:param file_name: name of the file to be saved
:type file_name: str
"""
return None
def load(self, file_name):
""" Loads the surface from a pickled file.
.. deprecated:: 5.2.4
Use :func:`.exchange.import_json()` instead.
:param file_name: name of the file to be loaded
:type file_name: str
"""
return None
def transpose(self):
""" Transposes the surface by swapping u and v parametric directions. """
operations.transpose(self, inplace=True)
self.reset(evalpts=True)
def evaluate(self, **kwargs):
""" Evaluates the surface.
The evaluated points are stored in :py:attr:`evalpts` property.
Keyword arguments:
* ``start_u``: start parameter on the u-direction
* ``stop_u``: stop parameter on the u-direction
* ``start_v``: start parameter on the v-direction
* ``stop_v``: stop parameter on the v-direction
The ``start_u``, ``start_v`` and ``stop_u`` and ``stop_v`` parameters allow evaluation of a surface segment
in the range *[start_u, stop_u][start_v, stop_v]* i.e. the surface will also be evaluated at the ``stop_u``
and ``stop_v`` parameter values.
The following examples illustrate the usage of the keyword arguments.
.. code-block:: python
:linenos:
# Start evaluating in range u=[0, 0.7] and v=[0.1, 1]
surf.evaluate(stop_u=0.7, start_v=0.1)
# Start evaluating in range u=[0, 1] and v=[0.1, 0.3]
surf.evaluate(start_v=0.1, stop_v=0.3)
# Get the evaluated points
surface_points = surf.evalpts
"""
# Call | |
indicator from 0, 1, 2 range"
assert isinstance(query, list), "The parameter query must be a list"
print("=================================================================")
print("Data Loading..")
assert isinstance(data,
(str, list)), "The data should be string or list object"
if isinstance(data, str):
with open(data, "rb") as f:
train = pickle.load(f)
else:
assert isinstance(data, list), "The data must be a list object"
train = data
mapping = {0: 0, 1: 1, 2: 3, 3: 5}
k = mapping[indicator]
mapping_name = {0: 'cell_lines', 1: 'compounds', 2: 'doses', 3: 'time'}
print("Number of Train Data: {}".format(len(train)))
print("You are parsing the data base on {}".format(mapping_name[indicator]))
parse_data = [line for line in train if line[0][k] in query]
print("Number of Data after parsing: {}".format(len(parse_data)))
return parse_data
def parse_most_frequent(data: Union[str, List],
indicator: int = 0,
n: int = 3) -> List:
"""Returns most frequent data (based on cell line, compound, ...)
This function takes the directory of dataset, indicator that indicates
whether you want to subset the data based on cell line, compound, dose, or time
and a n which how much frequent items you want to keep.
The output will be a list of desired parsed dataset.
Parameters
----------
data: Union[str, List]
the data can be a string which is the directory of the dataset.
dataset should be a pickle file. e.g., valid argument is something like this:
'./Data/level3_trt_cp_landmark.pkl'
or it can be a list which contains the gene expression and metadata.
It must be a list of tuples with the following format:
line[0]:(cell_line, drug, drug_type, does, does_type, time, time_type)
line[1]: 978 or 12328-dimensional Vector(Gene_expression_profile)
indicator: int, optional (default n=0)
It must be an integer from 0 1 2 and 3 that shows whether
we want to retrieve the data based on cells, compound or dose.
0: cell_lines
1:compounds
2:doses
3:time
Default=0
n: int, optional (default n=3)
number of most frequent cells or compounds or doses that we want to retrieve.
The list depends on the indicator. If the indicator is 0, you should enter the
number of desired cell lines and so on. Default=3
Returns
-------
parse_data: List
A list containing data that belongs to desired list.
"""
assert isinstance(indicator, int), "The indicator must be an int object"
assert indicator in [0, 1, 2,
3], "You should choose indicator from 0, 1, 2, 3 range"
assert isinstance(n, int), "The parameter n must be an integer"
print("=================================================================")
print("Data Loading..")
assert isinstance(data,
(str, list)), "The data should be string or list object"
if isinstance(data, str):
with open(data, "rb") as f:
train = pickle.load(f)
else:
assert isinstance(data, list), "The data must be a list object"
train = data
mapping = {0: 0, 1: 1, 2: 3, 3: 5}
k = mapping[indicator]
mapping_name = {0: 'cell_lines', 1: 'compounds', 2: 'doses', 3: 'time'}
mylist = []
for i in tqdm(range(len(train))):
mylist.append(train[i][0][k])
print("Number of unique {}: {}".format(mapping_name[indicator],
len(set(mylist))))
print("Most frequent {}: {}".format(mapping_name[indicator],
Counter(mylist).most_common(n)))
assert n <= len(set(mylist)), "n is out of valid range!"
# List of n most frequent cell lines
y = list(map(lambda x: x[0], Counter(mylist).most_common(n)))
parse_data = [line for line in train if line[0][k] in y]
return parse_data
def parse_chunk_frequent(data: Union[str, List],
indicator: int = 0,
start: int = 0,
end: int = 3) -> List:
"""
This function takes the directory of dataset, indicator that indicates
whether you want to subset the data based on cell line, compound, dose, or time
and a start and end which shows what chunk of data is desirable.
E.g., if start=0 and end=3, you are subsetting 3 most frequent data.
The output will be a list of desired parsed dataset.
Parameters
----------
data: Union[str, List]
the data can be a string which is the directory of the dataset.
dataset should be a pickle file. e.g., valid argument is something like this:
'./Data/level3_trt_cp_landmark.pkl'
or it can be a list which contains the gene expression and metadata.
It must be a list of tuples with the following format:
line[0]:(cell_line, drug, drug_type, does, does_type, time, time_type)
line[1]: 978 or 12328-dimensional Vector(Gene_expression_profile)
indicator: int, optional (default n=0)
It must be an integer from 0 1 2 and 3 that shows whether
we want to retrieve the data based on cells, compound or dose.
0: cell_lines
1:compounds
2:doses
3:time
Default=0
start: int
indicates the start of the list you want to subset. Default=0
end: int
indicates the end of the list you want to subset. Default=3
Returns
-------
parse_data: List
A list containing data that belongs to desired list.
"""
assert isinstance(indicator, int), "The indicator must be an int object"
assert indicator in [0, 1,
2], "You should choose indicator from 0, 1, 2 range"
assert isinstance(start, int), "The parameter start must be an integer"
assert isinstance(end, int), "The parameter end must be an integer"
assert start <= end, "The start should be less than the end!!"
print("=================================================================")
print("Data Loading..")
assert isinstance(data,
(str, list)), "The data should be string or list object"
if isinstance(data, str):
with open(data, "rb") as f:
train = pickle.load(f)
else:
assert isinstance(data, list), "The data must be a list object"
train = data
mapping = {0: 0, 1: 1, 2: 3, 3: 5}
k = mapping[indicator]
mapping_name = {0: 'cell_lines', 1: 'compounds', 2: 'doses', 3: 'time'}
mylist = []
for i in range(len(train)):
mylist.append(train[i][0][k])
print("Number of unique {}: {}".format(mapping_name[indicator],
len(set(mylist))))
assert end < len(set(mylist)), "end is out of valid range!"
# List of n most frequent cell lines
y = list(map(lambda x: x[0], Counter(mylist).most_common()))[start:end]
print("Desired {}: {}".format(mapping_name[indicator], y))
parse_data = [line for line in train if line[0][k] in y]
return parse_data
def parse_dose_range(data: Union[str, List],
dose_min: int = 0,
dose_max: int = 5) -> List:
"""
This function takes the directory of dataset minimum and maximum dose
and return a list of data that are within the desired range.
Parameters
----------
data: Union[str, List]
the data can be a string which is the directory of the dataset.
dataset should be a pickle file. e.g., valid argument is something like this:
'./Data/level3_trt_cp_landmark.pkl'
or it can be a list which contains the gene expression and metadata.
It must be a list of tuples with the following format:
line[0]:(cell_line, drug, drug_type, does, does_type, time, time_type)
line[1]: 978 or 12328-dimensional Vector(Gene_expression_profile)
dose_min: int, optional (default dose_min=0)
minimum dose. Default=0
dose_max: int, optional (default dose_max=5)
maximum_dose. Default=5
Returns
--------
parse_data: List
A list containing data that belongs to desired list (
Desired range of doses).
"""
assert isinstance(dose_min, int), "The parameter dose_min must be an integer"
assert isinstance(dose_max, int), "The parameter dose_max must be an integer"
assert dose_min < dose_max, "The minimum dose must be less than the maximum dose !!"
print("=================================================================")
print("Data Loading..")
assert isinstance(data,
(str, list)), "The data should be string or list object"
if isinstance(data, str):
with open(data, "rb") as f:
train = pickle.load(f)
else:
assert isinstance(data, list), "The data must be a list object"
train = data
print("Number of Train Data: {}".format(len(train)))
parse_data = [
line for line in train if line[0][3] > dose_min and line[0][3] < dose_max
]
print("Number of Data after parsing: {}".format(len(parse_data)))
return parse_data
def to_dataframe(data: Union[str, List]) -> pd.DataFrame:
"""This takes a list and produce a pandas datframe of data
The input to this function is a list which contains metadata
(such as cell lines, compounds, ..) and gene expression. this
function returns a pandas dataframe where the first columns
belongs to gene expression and last four columns contain metaddata
cell line, compound, dose, and time in this order.
Parameters
----------
data: Union[str, List]
the data can be a string which is the directory of the dataset.
dataset should be a pickle file. e.g., valid argument is something like this:
'./Data/level3_trt_cp_landmark.pkl'
or it can be a list which contains the gene expression and metadata.
It must be a list of tuples with the following format:
line[0]:(cell_line, drug, drug_type, does, does_type, time, time_type)
line[1]: 978 or 12328-dimensional Vector(Gene_expression_profile)
Returns
-------
pd.DataFrame
This is a | |
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2453", name="WIFI-2453")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs7
@pytest.mark.nss1
def test_client_wpa2_personal_vlan_mcs7_nss1_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 7 OFDM, HT, VHT'],
['spatial_streams: 1'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS7_NSS1",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2454", name="WIFI-2454")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs8
@pytest.mark.nss1
def test_client_wpa2_personal_vlan_mcs8_nss1_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 8 VHT'],
['spatial_streams: 1'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS8_NSS1",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2455", name="WIFI-2455")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs9
@pytest.mark.nss1
def test_client_wpa2_personal_vlan_mcs9_nss1_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 9 VHT'],
['spatial_streams: 1'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS9_NSS1",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
# Test case for mcs0-9,Nss 2, bw 20MHz
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2458", name="WIFI-2458")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs0
@pytest.mark.nss2
def test_client_wpa2_personal_vlan_mcs0_nss2_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 0 CCK, OFDM, HT, VHT'],
['spatial_streams: 2'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS0_NSS2",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2459", name="WIFI-2459")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs1
@pytest.mark.nss2
def test_client_wpa2_personal_vlan_mcs1_nss2_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 1 CCK, OFDM, HT, VHT'],
['spatial_streams: 2'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS1_NSS2",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2460", name="WIFI-2460")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs2
@pytest.mark.nss2
def test_client_wpa2_personal_vlan_mcs2_nss2_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 2 CCK, OFDM, HT, VHT'],
['spatial_streams: 2'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS2_NSS2",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2461", name="WIFI-2461")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs3
@pytest.mark.nss2
def test_client_wpa2_personal_vlan_mcs3_nss2_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "VLAN"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 3 CCK, OFDM, HT, VHT'],
['spatial_streams: 2'], ['bandw_options: 20'], ['txo_sgi: ON'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2], ["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_VLAN_5G_MCS3_NSS2",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2462", name="WIFI-2462")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.mcs4
@pytest.mark.nss2
def test_client_wpa2_personal_vlan_mcs4_nss2_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity vlan Mode
pytest -m "rx_sensitivity_test and vlan and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 | |
import numpy as np
import cv2
import cv2.aruco as aruco
import math
"""
**************************************************************************
* E-Yantra Robotics Competition
* ================================
* This software is intended to check version compatiability of open source software
* Theme: Thirsty Crow
* MODULE: Task1.1
* Filename: detect.py
* Version: 1.0.0
* Date: October 31, 2018
*
* Author: e-Yantra Project, Department of Computer Science
* and Engineering, Indian Institute of Technology Bombay.
*
* Software released under Creative Commons CC BY-NC-SA
*
* For legal information refer to:
* http://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
*
*
* This software is made available on an “AS IS WHERE IS BASIS”.
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD project under National Mission on Education using
* ICT(NMEICT)
*
**************************************************************************
"""
####################### Define Utility Functions Here ##########################
"""
Function Name : getCameraMatrix()
Input: None
Output: camera_matrix, dist_coeff
Purpose: Loads the camera calibration file provided and returns the camera and
distortion matrix saved in the calibration file.
"""
def getCameraMatrix():
with np.load('Camera.npz') as X:
camera_matrix, dist_coeff, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
return camera_matrix, dist_coeff
"""
Function Name : sin()
Input: angle (in degrees)
Output: value of sine of angle specified
Purpose: Returns the sine of angle specified in degrees
"""
def sin(angle):
return math.sin(math.radians(angle))
"""
Function Name : cos()
Input: angle (in degrees)
Output: value of cosine of angle specified
Purpose: Returns the cosine of angle specified in degrees
"""
def cos(angle):
return math.cos(math.radians(angle))
################################################################################
"""
Function Name : detect_markers()
Input: img (numpy array), camera_matrix, dist_coeff
Output: aruco list in the form [(aruco_id_1, centre_1, rvec_1, tvec_1),(aruco_id_2,
centre_2, rvec_2, tvec_2), ()....]
Purpose: This function takes the image in form of a numpy array, camera_matrix and
distortion matrix as input and detects ArUco markers in the image. For each
ArUco marker detected in image, paramters such as ID, centre coord, rvec
and tvec are calculated and stored in a list in a prescribed format. The list
is returned as output for the function
"""
def detect_markers(img, camera_matrix, dist_coeff):
markerLength = 100
aruco_list = []
######################## INSERT CODE HERE ########################
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(gray, aruco_dict, parameters = parameters)
with np.load('Camera.npz') as X:
camera_matrix, dist_coeff, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
corners, ids, _ = aruco.detectMarkers(gray, aruco_dict, parameters = parameters)
rvec, tvec,_= aruco.estimatePoseSingleMarkers(corners, markerLength, camera_matrix, dist_coeff)
j=0
for i in ids:
Xc=(corners[j][0][0][0]+corners[j][0][1][0]+corners[j][0][2][0]+corners[j][0][3][0])/4
Yc=(corners[j][0][0][1]+corners[j][0][1][1]+corners[j][0][2][1]+corners[j][0][3][1])/4
aruco_centre=(Xc,Yc)
rvect = np.array([rvec[j].reshape(1,1,3)])
tvect = np.array([tvec[j].reshape(1,1,3)])
tup1 = (int(i),aruco_centre,rvect[0],tvect[0])
aruco_list.append(tup1)
j=j+1
print(aruco_list)
##################################################################
return aruco_list
"""
Function Name : drawAxis()
Input: img (numpy array), aruco_list, aruco_id, camera_matrix, dist_coeff
Output: img (numpy array)
Purpose: This function takes the above specified outputs and draws 3 mutually
perpendicular axes on the specified aruco marker in the image and
returns the modified image.
"""
def drawAxis(img, aruco_list, aruco_id, camera_matrix, dist_coeff):
for x in aruco_list:
if aruco_id == x[0]:
rvec, tvec = x[2], x[3]
markerLength = 100
m = markerLength/2
pts = np.float32([[-m,m,0],[m,m,0],[-m,-m,0],[-m,m,m]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,255,0), 4)
img = cv2.line(img, src, dst2, (255,0,0), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
return img
"""
Function Name : drawCube()
Input: img (numpy array), aruco_list, aruco_id, camera_matrix, dist_coeff
Output: img (numpy array)
Purpose: This function takes the above specified outputs and draws a cube
on the specified aruco marker in the image and returns the modified
image.
"""
def drawCube(img, ar_list, ar_id, camera_matrix, dist_coeff):
for x in ar_list:
if ar_id == x[0]:
rvec, tvec = x[2], x[3]
markerLength = 100
m = markerLength/2
######################## INSERT CODE HERE ########################
pts = np.float32([[-m,m,0],[m,m,0],[-m,-m,0],[-m,m,m]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,0,255), 4)
img = cv2.line(img, src, dst2, (0,0,255), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
pts = np.float32([[m,-m,0],[m,m,0],[-m,-m,0],[m,-m,m]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,0,255), 4)
img = cv2.line(img, src, dst2, (0,0,255), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
pts = np.float32([[m,m,m],[m,-m,m],[-m,m,m],[m,m,0]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,0,255), 4)
img = cv2.line(img, src, dst2, (0,0,255), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
pts = np.float32([[-m,-m,m],[-m,m,m],[m,-m,m],[-m,-m,0]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
img = cv2.line(img, src, dst1, (0,0,255), 4)
img = cv2.line(img, src, dst2, (0,0,255), 4)
img = cv2.line(img, src, dst3, (0,0,255), 4)
##################################################################
return img
"""
Function Name : drawCylinder()
Input: img (numpy array), aruco_list, aruco_id, camera_matrix, dist_coeff
Output: img (numpy array)
Purpose: This function takes the above specified outputs and draws a cylinder
on the specified aruco marker in the image and returns the modified
image.
"""
def drawCylinder(img, ar_list, ar_id, camera_matrix, dist_coeff):
for x in ar_list:
if ar_id == x[0]:
rvec, tvec = x[2], x[3]
markerLength = 100
radius = markerLength/2; height = markerLength*1.5
m=radius
h=height
######################## INSERT CODE HERE ########################
pts = np.float32([[0,0,0],[0,m,0],[m*cos(60),m*sin(60),0],[m*cos(30),m*sin(30),0],[m,0,0],[m*cos(60),-m*sin(60),0],[m*cos(30),-m*sin(30),0],[0,-m,0],[-m*cos(60),-m*sin(60),0],[-m*cos(30),-m*sin(30),0],[-m,0,0],[-m*cos(60),m*sin(60),0],[-m*cos(30),m*sin(30),0]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
dst4 = pt_dict[tuple(pts[4])]; dst5 = pt_dict[tuple(pts[5])];
dst6 = pt_dict[tuple(pts[6])]; dst7 = pt_dict[tuple(pts[7])];
dst8 = pt_dict[tuple(pts[8])]; dst9 = pt_dict[tuple(pts[9])];
dst10 = pt_dict[tuple(pts[10])]; dst11 = pt_dict[tuple(pts[11])];
dst12 = pt_dict[tuple(pts[12])];
img = cv2.line(img, src, dst1, (255,0,0), 4)
img = cv2.line(img, src, dst2, (255,0,0), 4)
img = cv2.line(img, src, dst3, (255,0,0), 4)
img = cv2.line(img, src, dst4, (255,0,0), 4)
img = cv2.line(img, src, dst5, (255,0,0), 4)
img = cv2.line(img, src, dst6, (255,0,0), 4)
img = cv2.line(img, src, dst7, (255,0,0), 4)
img = cv2.line(img, src, dst8, (255,0,0), 4)
img = cv2.line(img, src, dst9, (255,0,0), 4)
img = cv2.line(img, src, dst10, (255,0,0), 4)
img = cv2.line(img, src, dst11, (255,0,0), 4)
img = cv2.line(img, src, dst12, (255,0,0), 4)
pts = np.float32([[0,0,h],[0,m,h],[m*cos(60),m*sin(60),h],[m*cos(30),m*sin(30),h],[m,0,h],[m*cos(60),-m*sin(60),h],[m*cos(30),-m*sin(30),h],[0,-m,h],[-m*cos(60),-m*sin(60),h],[-m*cos(30),-m*sin(30),h],[-m,0,h],[-m*cos(60),m*sin(60),h],[-m*cos(30),m*sin(30),h],[0,0,0]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
dst2 = pt_dict[tuple(pts[2])]; dst3 = pt_dict[tuple(pts[3])];
dst4 = pt_dict[tuple(pts[4])]; dst5 = pt_dict[tuple(pts[5])];
dst6 = pt_dict[tuple(pts[6])]; dst7 = pt_dict[tuple(pts[7])];
dst8 = pt_dict[tuple(pts[8])]; dst9 = pt_dict[tuple(pts[9])];
dst10 = pt_dict[tuple(pts[10])]; dst11 = pt_dict[tuple(pts[11])];
dst12 = pt_dict[tuple(pts[12])];
dst13 = pt_dict[tuple(pts[13])];
img = cv2.line(img, src, dst1, (255,0,0), 4)
img = cv2.line(img, src, dst2, (255,0,0), 4)
img = cv2.line(img, src, dst3, (255,0,0), 4)
img = cv2.line(img, src, dst4, (255,0,0), 4)
img = cv2.line(img, src, dst5, (255,0,0), 4)
img = cv2.line(img, src, dst6, (255,0,0), 4)
img = cv2.line(img, src, dst7, (255,0,0), 4)
img = cv2.line(img, src, dst8, (255,0,0), 4)
img = cv2.line(img, src, dst9, (255,0,0), 4)
img = cv2.line(img, src, dst10, (255,0,0), 4)
img = cv2.line(img, src, dst11, (255,0,0), 4)
img = cv2.line(img, src, dst12, (255,0,0), 4)
img = cv2.line(img, src, dst13, (255,0,0), 4)
pts = np.float32([[0,m,0],[0,m,h]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
img = cv2.line(img, src, dst1, (255,0,0), 4)
pts = np.float32([[m,0,0],[m,0,h]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = pt_dict[tuple(pts[1])];
img = cv2.line(img, src, dst1, (255,0,0), 4)
pts = np.float32([[0,-m,0],[0,-m,h]])
pt_dict = {}
imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix, dist_coeff)
for i in range(len(pts)):
pt_dict[tuple(pts[i])] = tuple(imgpts[i].ravel())
src = pt_dict[tuple(pts[0])]; dst1 = | |
according to the same network of the previous cp_out.
If the VNF is the first one, then the first CP is chosen (disregarding the management interface)
- cp_out: if the given VNF has just one CP for VNF chaining, then cp_out = cp_in. Otherwise,
cp_out is chosen taking into account NFVO requirements implemented in the related agents.
If cp_out can not be selected automatically, a message with OPTIONS status is returned
in order to the user inform the desirable and suitable connection point.
:param sfp_data: must be a dict with fields:
- sfc_uuid: the unique identifier for the SFC being composed
- vnf_pkg_id: always required
- domain_id:
- nfvo_id:
- cp_out: not required, but can be used as a manually user input
:return: OK if success, or ERROR and its reason if not, or OPTIONS and a cp_list dict
"""
try:
sfc_segment_templates = self.cache.get(sfp_data['sfc_uuid'])
if sfc_segment_templates is None:
return {'status': ERROR, 'reason': 'SFC UUID not found!'}
except KeyError:
return {'status': ERROR, 'reason': 'SFC UUID must be informed!'}
cp_out = sfp_data.get('cp_out')
vnf_pkg_id = sfp_data['vnf_pkg_id']
try:
catalog = database.list_catalog(vnf_pkg_id)
vnfd_name = catalog[0]['vnfd_name']
vnf_pkg_dir = catalog[0]['dir_id']
domain_id = sfp_data.get('domain_id')
nfvo_id = sfp_data.get('nfvo_id')
self.validate_sfc_domain_nfvo_vnf_package(catalog[0], domain_id, nfvo_id)
nfvo_agent = self._get_nfvo_agent_instance(domain_id, nfvo_id)
index = None
for segment in range(len(sfc_segment_templates)):
if sfc_segment_templates[segment]['domain_id'] == domain_id \
and sfc_segment_templates[segment]['nfvo_id'] == nfvo_id:
index = segment
break
if index is None:
segment_data = {
'domain_id': domain_id,
'nfvo_id': nfvo_id,
'sfc_template': deepcopy(nfvo_agent.get_sfc_template())
}
index = len(sfc_segment_templates)
sfc_segment_templates.append(segment_data)
sfc_segment_templates[index]['sfc_template'] = \
nfvo_agent.compose_sfp(sfc_segment_templates[index]['sfc_template'],
vnfd_name, vnf_pkg_dir, database, cp_out)
except IndexError:
return {'status': ERROR, 'reason': 'VNF Package %s not found!' % vnf_pkg_id}
except NFVOAgentOptions as op:
return {'status': op.status, 'reason': op.reason, 'cp_list': op.cp_list}
except (NFVOAgentsException, DatabaseException) as e:
return {'status': e.status, 'reason': e.reason}
except MultiSFCException as e:
return {'status': ERROR, 'reason': str(e)}
# debug
logger.debug('SFC Template UUID: %s\n%s', sfp_data['sfc_uuid'],
nfvo_agent.dump_sfc_descriptor(sfc_segment_templates[index]['sfc_template']))
self.cache.set(sfp_data['sfc_uuid'], sfc_segment_templates)
return {'status': OK}
def get_sfc_traffic_origin(self, sfc_uuid):
"""Retrieves fields and eligible VNF information for traffic incoming of the first SFC segment"""
sfc_segments = self.cache.get(sfc_uuid)
if sfc_segments is None:
return {'status': ERROR, 'reason': 'SFC UUID not found!'}
domain_id = sfc_segments[0]['domain_id']
nfvo_id = sfc_segments[0]['nfvo_id']
nfvo_agent = self._get_nfvo_agent_instance(domain_id, nfvo_id)
fields, src_vnfs = nfvo_agent.get_sfc_traffic_origin(self)
return {
'status': OK,
'fields': fields,
'vnfs': src_vnfs
}
def include_classifier_policy(self, policy_data):
"""Includes ACL criteria according to INTERNAL or EXTERNAL traffic source in the first SFC segment
INTERNAL traffic is sourced from VNFs managed by NFVO, while EXTERNAL traffic is sourced from everything
out from NFVO networks.
One important rule is applied:
1. NFVO's network_name from the origin VNF CP must be the same as the input CP of the first VNF
in the chain.
If there are more CPs than 1, then a message with status OPTIONS and a cp_list is replied to the
user to inform a desirable connection point.
:param policy_data: input arguments are:
:sfc_uuid: the unique identifier to the SFC being composed
:origin: if the SFC traffic source is INTERNAL or EXTERNAL
:src_id: the VNF or VNF Package unique identifier from the NFVO when using INTERNAL traffic origin
:resource: optional when using INTERNAL origin. Identifies the manual user input of the source cp_out
:return: OK if success, or ERROR and its reason if not, or OPTIONS and a cp_list dict
"""
try:
sfc_template = self.cache.get(policy_data['sfc_uuid'])
if not sfc_template:
return {'status': ERROR, 'reason': 'SFC UUID not found!'}
except KeyError:
return {'status': ERROR, 'reason': 'SFC UUID must be informed!'}
domain_id = sfc_template[0]['domain_id']
nfvo_id = sfc_template[0]['nfvo_id']
try:
nfvo_agent = self._get_nfvo_agent_instance(domain_id, nfvo_id)
except MultiSFCException as e:
return {'status': ERROR, 'reason': str(e)}
origin = policy_data['origin']
src_id = policy_data.get('src_id')
# resource means the CP_out
cp_out = policy_data.get('resource')
if origin == INTERNAL:
# getting the VNF Package information from src_id
vnf = database.list_vnf_instances(vnf_id=src_id)
try:
vnfp = database.list_catalog(vnf_pkg_id=vnf[0]['vnf_pkg_id'])[0]
except (IndexError, KeyError):
try:
vnfp = database.list_catalog(vnf_pkg_id=src_id)[0]
except IndexError:
vnfp = None
try:
self.validate_sfc_domain_nfvo_vnf_package(vnfp, domain_id, nfvo_id)
except MultiSFCException as e:
return {'status': ERROR, 'reason': str(e)}
try:
sfc_template[0]['sfc_template'] = nfvo_agent.configure_traffic_src_policy(
sfc_template[0]['sfc_template'], origin, src_id, cp_out, database)
except NFVOAgentOptions as op:
return {'status': op.status, 'cp_list': op.cp_list}
except (NFVOAgentsException, DatabaseException) as e:
return {'status': e.status, 'reason': e.reason}
# debug
logger.debug('SFC Template UUID: %s\n%s', policy_data['sfc_uuid'],
nfvo_agent.dump_sfc_descriptor(sfc_template[0]['sfc_template']))
self.cache.set(policy_data['sfc_uuid'], sfc_template)
return {'status': OK}
def configure_policies(self, policy_data):
"""Includes ACL criteria on the first SFC segment
JSON arguments are:
- sfc_uuid: the unique identifier of the SFC being composed
- acl: a dict containing the acl criteria to be added into the SFC template
:return: OK if success, or ERROR and its reason if not
"""
try:
sfc_template = self.cache.get(policy_data['sfc_uuid'])
if not sfc_template:
return {'status': ERROR, 'reason': 'SFC UUID not found!'}
except KeyError:
return {'status': ERROR, 'reason': 'SFC UUID must be informed!'}
policies = policy_data['policies']
# configuring policies of SFC incoming traffic
domain_id = sfc_template[0]['domain_id']
nfvo_id = sfc_template[0]['nfvo_id']
try:
nfvo_agent = self._get_nfvo_agent_instance(domain_id, nfvo_id)
sfc_template[0]['sfc_template'] = nfvo_agent.configure_policies(sfc_template[0]['sfc_template'],
policies)
except NFVOAgentsException as e:
return {'status': e.status, 'reason': e.reason}
except MultiSFCException as e:
return {'status': ERROR, 'reason': str(e)}
# debug
logger.debug('SFC Template UUID: %s\n%s', request.json['sfc_uuid'],
nfvo_agent.dump_sfc_descriptor(sfc_template[0]['sfc_template']))
self.cache.set(policy_data['sfc_uuid'], sfc_template)
return {'status': OK}
def parse_segment_classifier_policy(self, policy, input_platform, output_platform):
""" Maps an ACL type from one NFV Orchestrator to the same ACL on another NFV Orchestrator
:param policy: a dict containing the classifier policy (ACL name) as the key and its value
:param input_platform: the platform name of the policy param (e.g. tacker, osm)
:param output_platform: the platform name of the policy to be parsed
:return: a dict with the parsed classifier policy of the output platform, or None if the policy was not found
"""
policy_mapping = {
# Syntax 'platform': ['protocol', src_ip', 'dst_ip', 'src_port', 'dst_port']
TACKER_NFVO: ['ip_proto', 'ip_src_prefix', 'ip_dst_prefix', 'source_port_range', 'destination_port_range'],
OSM_NFVO: ['ip-proto', 'source-ip-address', 'destination-ip-address', 'source-port', 'destination-port']
}
key = [*policy][0]
value = policy[key]
try:
index = policy_mapping[input_platform].index(key)
except ValueError:
return None
if input_platform == TACKER_NFVO:
if key in ('ip_src_prefix', 'ip_dst_prefix'): # IP
value = value.split(sep='/')[0]
elif key in ('source_port_range', 'destination_port_range'): # PORTS
value = value.split(sep='-')[0]
elif key == 'ip_proto':
# converting int from Tacker to str for OSM
value = str(value)
else: # just remained OSM, so far
if key in ('source-ip-address', 'destination-ip-address'):
value = ''.join([value, '/32'])
elif key in ('source-port', 'destination-port'):
value = ''.join([value, '-', value])
elif key == 'ip-proto':
# converting string from OSM to int for Tacker
value = int(value)
key = policy_mapping[output_platform][index]
return {key: value}
def configure_multi_sfc_segments(self, sfc_segments):
"""Configures all multi-sfc segment tunnels and classifiers according to the configuration of the
first classifier
It configures all classifiers and tunnel vnfs in order to steer the traffic of an SFC through all segments
:param sfc_segments: must contain a list with domains/nfvos and their respective sfc templates
:return: sfc_segments with configured classifiers
Raises
------
MultiSFCException
ReRaises
------
NFVOAgentsException, DatabaseException
"""
tunnel = None
input_platform = None
input_policies = None
for segment in sfc_segments:
domain_id = segment['domain_id']
nfvo_id = segment['nfvo_id']
platform = self._domain_catalog[domain_id][nfvo_id]['platform']
nfvo_agent = self._get_nfvo_agent_instance(domain_id, nfvo_id)
if input_platform is None: # it means that the segment is the first one
input_platform = platform
tunnel = self._domain_catalog[domain_id][nfvo_id]['tunnel']
input_policies = nfvo_agent.get_policies(segment['sfc_template'])
if self._domain_catalog[domain_id][nfvo_id]['tunnel'] != tunnel or tunnel is None:
raise MultiSFCException("Configuration of segment tunnels mismatch!")
tunnel_vnfp = database.list_catalog(platform=platform, tunnel=tunnel)
if not tunnel_vnfp:
raise MultiSFCException(
"No VNF Package found to configure %s tunnel! See domain configuration" % tunnel)
vnfp_dir = tunnel_vnfp[0]['dir_id']
vnfd_name = tunnel_vnfp[0]['vnfd_name']
# if it is not the first segment, then add the tunnel vnf at the beginning of the segment
if sfc_segments.index(segment) > 0:
# Configuring acl policies based on the first segment classifier
segment_policies = []
for policy in input_policies:
policies = {}
for k, v in policy.items():
parsed_criteria = self.parse_segment_classifier_policy({k: v}, input_platform, platform)
if parsed_criteria is not None:
policies.update(parsed_criteria)
segment_policies.append(policies)
segment['sfc_template'] = nfvo_agent.configure_policies(segment['sfc_template'], segment_policies)
# Configuring the segment input VNF (tunnel)
if platform == TACKER_NFVO:
# In case of Tacker, the VNF needs to be instantiated previously to be added on the classifier
vnf_name = str(vnfd_name).replace('vnfd', 'vnf')
vnfp_dir = '/'.join(['repository', vnfp_dir])
response = nfvo_agent.create_vnf(vnfp_dir, vnfd_name, vnf_name)
src_id = response['vnf_id']
database.insert_vnf_instance(tunnel_vnfp[0]['_id'], domain_id, nfvo_id,
response['vnfd_id'], response['vnf_id'])
segment['incoming_vnf'] = response['vnf_id']
else:
src_id = | |
<reponame>whisller/aws-lambda-powertools-python<gh_stars>0
import base64
import json
import logging
import os
import re
import traceback
import warnings
import zlib
from abc import ABC, abstractmethod
from enum import Enum
from functools import partial
from http import HTTPStatus
from typing import Any, Callable, Dict, List, Match, Optional, Pattern, Set, Tuple, Type, Union
from aws_lambda_powertools.event_handler import content_types
from aws_lambda_powertools.event_handler.exceptions import NotFoundError, ServiceError
from aws_lambda_powertools.shared import constants
from aws_lambda_powertools.shared.functions import resolve_truthy_env_var_choice
from aws_lambda_powertools.shared.json_encoder import Encoder
from aws_lambda_powertools.utilities.data_classes import ALBEvent, APIGatewayProxyEvent, APIGatewayProxyEventV2
from aws_lambda_powertools.utilities.data_classes.common import BaseProxyEvent
from aws_lambda_powertools.utilities.typing import LambdaContext
logger = logging.getLogger(__name__)
_DYNAMIC_ROUTE_PATTERN = r"(<\w+>)"
_SAFE_URI = "-._~()'!*:@,;" # https://www.ietf.org/rfc/rfc3986.txt
# API GW/ALB decode non-safe URI chars; we must support them too
_UNSAFE_URI = "%<> \[\]{}|^" # noqa: W605
_NAMED_GROUP_BOUNDARY_PATTERN = fr"(?P\1[{_SAFE_URI}{_UNSAFE_URI}\\w]+)"
class ProxyEventType(Enum):
"""An enumerations of the supported proxy event types."""
APIGatewayProxyEvent = "APIGatewayProxyEvent"
APIGatewayProxyEventV2 = "APIGatewayProxyEventV2"
ALBEvent = "ALBEvent"
class CORSConfig:
"""CORS Config
Examples
--------
Simple cors example using the default permissive cors, not this should only be used during early prototyping
```python
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
app = APIGatewayRestResolver()
@app.get("/my/path", cors=True)
def with_cors():
return {"message": "Foo"}
```
Using a custom CORSConfig where `with_cors` used the custom provided CORSConfig and `without_cors`
do not include any cors headers.
```python
from aws_lambda_powertools.event_handler.api_gateway import (
APIGatewayRestResolver, CORSConfig
)
cors_config = CORSConfig(
allow_origin="https://wwww.example.com/",
expose_headers=["x-exposed-response-header"],
allow_headers=["x-custom-request-header"],
max_age=100,
allow_credentials=True,
)
app = APIGatewayRestResolver(cors=cors_config)
@app.get("/my/path")
def with_cors():
return {"message": "Foo"}
@app.get("/another-one", cors=False)
def without_cors():
return {"message": "Foo"}
```
"""
_REQUIRED_HEADERS = ["Authorization", "Content-Type", "X-Amz-Date", "X-Api-Key", "X-Amz-Security-Token"]
def __init__(
self,
allow_origin: str = "*",
allow_headers: Optional[List[str]] = None,
expose_headers: Optional[List[str]] = None,
max_age: Optional[int] = None,
allow_credentials: bool = False,
):
"""
Parameters
----------
allow_origin: str
The value of the `Access-Control-Allow-Origin` to send in the response. Defaults to "*", but should
only be used during development.
allow_headers: Optional[List[str]]
The list of additional allowed headers. This list is added to list of
built-in allowed headers: `Authorization`, `Content-Type`, `X-Amz-Date`,
`X-Api-Key`, `X-Amz-Security-Token`.
expose_headers: Optional[List[str]]
A list of values to return for the Access-Control-Expose-Headers
max_age: Optional[int]
The value for the `Access-Control-Max-Age`
allow_credentials: bool
A boolean value that sets the value of `Access-Control-Allow-Credentials`
"""
self.allow_origin = allow_origin
self.allow_headers = set(self._REQUIRED_HEADERS + (allow_headers or []))
self.expose_headers = expose_headers or []
self.max_age = max_age
self.allow_credentials = allow_credentials
def to_dict(self) -> Dict[str, str]:
"""Builds the configured Access-Control http headers"""
headers = {
"Access-Control-Allow-Origin": self.allow_origin,
"Access-Control-Allow-Headers": ",".join(sorted(self.allow_headers)),
}
if self.expose_headers:
headers["Access-Control-Expose-Headers"] = ",".join(self.expose_headers)
if self.max_age is not None:
headers["Access-Control-Max-Age"] = str(self.max_age)
if self.allow_credentials is True:
headers["Access-Control-Allow-Credentials"] = "true"
return headers
class Response:
"""Response data class that provides greater control over what is returned from the proxy event"""
def __init__(
self,
status_code: int,
content_type: Optional[str],
body: Union[str, bytes, None],
headers: Optional[Dict] = None,
):
"""
Parameters
----------
status_code: int
Http status code, example 200
content_type: str
Optionally set the Content-Type header, example "application/json". Note this will be merged into any
provided http headers
body: Union[str, bytes, None]
Optionally set the response body. Note: bytes body will be automatically base64 encoded
headers: dict
Optionally set specific http headers. Setting "Content-Type" hear would override the `content_type` value.
"""
self.status_code = status_code
self.body = body
self.base64_encoded = False
self.headers: Dict = headers or {}
if content_type:
self.headers.setdefault("Content-Type", content_type)
class Route:
"""Internally used Route Configuration"""
def __init__(
self, method: str, rule: Pattern, func: Callable, cors: bool, compress: bool, cache_control: Optional[str]
):
self.method = method.upper()
self.rule = rule
self.func = func
self.cors = cors
self.compress = compress
self.cache_control = cache_control
class ResponseBuilder:
"""Internally used Response builder"""
def __init__(self, response: Response, route: Optional[Route] = None):
self.response = response
self.route = route
def _add_cors(self, cors: CORSConfig):
"""Update headers to include the configured Access-Control headers"""
self.response.headers.update(cors.to_dict())
def _add_cache_control(self, cache_control: str):
"""Set the specified cache control headers for 200 http responses. For non-200 `no-cache` is used."""
self.response.headers["Cache-Control"] = cache_control if self.response.status_code == 200 else "no-cache"
def _compress(self):
"""Compress the response body, but only if `Accept-Encoding` headers includes gzip."""
self.response.headers["Content-Encoding"] = "gzip"
if isinstance(self.response.body, str):
logger.debug("Converting string response to bytes before compressing it")
self.response.body = bytes(self.response.body, "utf-8")
gzip = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
self.response.body = gzip.compress(self.response.body) + gzip.flush()
def _route(self, event: BaseProxyEvent, cors: Optional[CORSConfig]):
"""Optionally handle any of the route's configure response handling"""
if self.route is None:
return
if self.route.cors:
self._add_cors(cors or CORSConfig())
if self.route.cache_control:
self._add_cache_control(self.route.cache_control)
if self.route.compress and "gzip" in (event.get_header_value("accept-encoding", "") or ""):
self._compress()
def build(self, event: BaseProxyEvent, cors: Optional[CORSConfig] = None) -> Dict[str, Any]:
"""Build the full response dict to be returned by the lambda"""
self._route(event, cors)
if isinstance(self.response.body, bytes):
logger.debug("Encoding bytes response with base64")
self.response.base64_encoded = True
self.response.body = base64.b64encode(self.response.body).decode()
return {
"statusCode": self.response.status_code,
"headers": self.response.headers,
"body": self.response.body,
"isBase64Encoded": self.response.base64_encoded,
}
class BaseRouter(ABC):
current_event: BaseProxyEvent
lambda_context: LambdaContext
@abstractmethod
def route(
self,
rule: str,
method: Any,
cors: Optional[bool] = None,
compress: bool = False,
cache_control: Optional[str] = None,
):
raise NotImplementedError()
def get(self, rule: str, cors: Optional[bool] = None, compress: bool = False, cache_control: Optional[str] = None):
"""Get route decorator with GET `method`
Examples
--------
Simple example with a custom lambda handler using the Tracer capture_lambda_handler decorator
```python
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
tracer = Tracer()
app = APIGatewayRestResolver()
@app.get("/get-call")
def simple_get():
return {"message": "Foo"}
@tracer.capture_lambda_handler
def lambda_handler(event, context):
return app.resolve(event, context)
```
"""
return self.route(rule, "GET", cors, compress, cache_control)
def post(self, rule: str, cors: Optional[bool] = None, compress: bool = False, cache_control: Optional[str] = None):
"""Post route decorator with POST `method`
Examples
--------
Simple example with a custom lambda handler using the Tracer capture_lambda_handler decorator
```python
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
tracer = Tracer()
app = APIGatewayRestResolver()
@app.post("/post-call")
def simple_post():
post_data: dict = app.current_event.json_body
return {"message": post_data["value"]}
@tracer.capture_lambda_handler
def lambda_handler(event, context):
return app.resolve(event, context)
```
"""
return self.route(rule, "POST", cors, compress, cache_control)
def put(self, rule: str, cors: Optional[bool] = None, compress: bool = False, cache_control: Optional[str] = None):
"""Put route decorator with PUT `method`
Examples
--------
Simple example with a custom lambda handler using the Tracer capture_lambda_handler decorator
```python
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
tracer = Tracer()
app = APIGatewayRestResolver()
@app.put("/put-call")
def simple_put():
put_data: dict = app.current_event.json_body
return {"message": put_data["value"]}
@tracer.capture_lambda_handler
def lambda_handler(event, context):
return app.resolve(event, context)
```
"""
return self.route(rule, "PUT", cors, compress, cache_control)
def delete(
self, rule: str, cors: Optional[bool] = None, compress: bool = False, cache_control: Optional[str] = None
):
"""Delete route decorator with DELETE `method`
Examples
--------
Simple example with a custom lambda handler using the Tracer capture_lambda_handler decorator
```python
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
tracer = Tracer()
app = APIGatewayRestResolver()
@app.delete("/delete-call")
def simple_delete():
return {"message": "deleted"}
@tracer.capture_lambda_handler
def lambda_handler(event, context):
return app.resolve(event, context)
```
"""
return self.route(rule, "DELETE", cors, compress, cache_control)
def patch(
self, rule: str, cors: Optional[bool] = None, compress: bool = False, cache_control: Optional[str] = None
):
"""Patch route decorator with PATCH `method`
Examples
--------
Simple example with a custom lambda handler using the Tracer capture_lambda_handler decorator
```python
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
tracer = Tracer()
app = APIGatewayRestResolver()
@app.patch("/patch-call")
def simple_patch():
patch_data: dict = app.current_event.json_body
patch_data["value"] = patched
return {"message": patch_data}
@tracer.capture_lambda_handler
def lambda_handler(event, context):
return app.resolve(event, context)
```
"""
return self.route(rule, "PATCH", cors, compress, cache_control)
class ApiGatewayResolver(BaseRouter):
"""API Gateway and ALB proxy resolver
Examples
--------
Simple example with a custom lambda handler using the Tracer capture_lambda_handler decorator
```python
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
tracer = Tracer()
app = APIGatewayRestResolver()
@app.get("/get-call")
def simple_get():
return {"message": "Foo"}
@app.post("/post-call")
def simple_post():
post_data: dict = app.current_event.json_body
return {"message": post_data["value"]}
@tracer.capture_lambda_handler
def lambda_handler(event, context):
return app.resolve(event, context)
```
"""
def __init__(
self,
proxy_type: Enum = ProxyEventType.APIGatewayProxyEvent,
cors: Optional[CORSConfig] = None,
debug: Optional[bool] = None,
serializer: Optional[Callable[[Dict], str]] = None,
strip_prefixes: Optional[List[str]] = None,
):
"""
Parameters
----------
proxy_type: ProxyEventType
Proxy request type, defaults to API Gateway V1
cors: CORSConfig
Optionally configure and enabled CORS. Not each route will need to have to cors=True
debug: Optional[bool]
Enables debug mode, by default False. Can be also be enabled by "POWERTOOLS_EVENT_HANDLER_DEBUG"
environment variable
serializer : Callable, optional
function to serialize `obj` to a JSON formatted `str`, by default json.dumps
strip_prefixes: List[str], optional
optional list of prefixes to be removed from the request | |
= 78
kNurbsToSubdiv = 752
kObjectAttrFilter = 672
kObjectBinFilter = 933
kObjectFilter = 668
kObjectMultiFilter = 669
kObjectNameFilter = 670
kObjectRenderFilter = 673
kObjectScriptFilter = 674
kObjectTypeFilter = 671
kOcean = 866
kOceanShader = 889
kOffsetCos = 81
kOffsetCosManip = 171
kOffsetCurve = 82
kOffsetCurveManip = 172
kOffsetSurface = 636
kOffsetSurfaceManip = 644
kOldGeometryConstraint = 442
kOpticalFX = 443
kOrientConstraint = 239
kOrientationComponent = 549
kOrientationLocator = 286
kOrientationMarker = 284
kOrthoGrid = 291
kPASolver = 360
kPairBlend = 917
kParamDimension = 275
kParentConstraint = 242
kParticle = 311
kParticleAgeMapper = 444
kParticleCloud = 445
kParticleColorMapper = 446
kParticleIncandecenceMapper = 447
kParticleSamplerInfo = 798
kParticleTransparencyMapper = 448
kPartition = 449
kPassContributionMap = 779
kPfxGeometry = 935
kPfxHair = 936
kPfxToon = 961
kPhong = 369
kPhongExplorer = 370
kPhongMaterial = 384
kPivotComponent = 534
kPivotManip2D = 191
kPlace2dTexture = 450
kPlace3dTexture = 451
kPlanarProjectionManip = 207
kPlanarTrimSrf = 83
kPlane = 288
kPlugin = 574
kPluginBlendShape = 1106
kPluginCameraSet = 1000
kPluginClientDevice = 1066
kPluginConstraintNode = 1005
kPluginData = 593
kPluginDeformerNode = 607
kPluginDependNode = 452
kPluginEmitterNode = 723
kPluginFieldNode = 722
kPluginGeometryData = 759
kPluginGeometryFilter = 1105
kPluginHardwareShader = 881
kPluginHwShaderNode = 882
kPluginIkSolver = 753
kPluginImagePlaneNode = 994
kPluginLocatorNode = 453
kPluginManipContainer = 688
kPluginManipulatorNode = 1022
kPluginMotionPathNode = 439
kPluginObjectSet = 914
kPluginParticleAttributeMapperNode = 998
kPluginShape = 703
kPluginSkinCluster = 1104
kPluginSpringNode = 724
kPluginThreadedDevice = 1067
kPluginTransformNode = 903
kPlusMinusAverage = 454
kPointArrayData = 594
kPointConstraint = 240
kPointLight = 309
kPointManip = 236
kPointMatrixMult = 455
kPointOnCurveInfo = 84
kPointOnCurveManip = 208
kPointOnLineManip = 211
kPointOnPolyConstraint = 1048
kPointOnSurfaceInfo = 85
kPointOnSurfaceManip = 212
kPoleVectorConstraint = 243
kPolyAppend = 396
kPolyAppendVertex = 788
kPolyArrow = 969
kPolyAutoProj = 842
kPolyAutoProjManip = 957
kPolyAverageVertex = 841
kPolyBevel = 394
kPolyBevel2 = 1086
kPolyBevel3 = 1090
kPolyBlindData = 750
kPolyBoolOp = 609
kPolyBridgeEdge = 983
kPolyCBoolOp = 1087
kPolyCaddyManip = 1096
kPolyChipOff = 397
kPolyCloseBorder = 398
kPolyCollapseEdge = 399
kPolyCollapseF = 400
kPolyColorDel = 733
kPolyColorMod = 732
kPolyColorPerVertex = 727
kPolyComponentData = 975
kPolyCone = 430
kPolyConnectComponents = 1049
kPolyContourProj = 1099
kPolyCreaseEdge = 949
kPolyCreateFacet = 436
kPolyCreateToolManip = 140
kPolyCreator = 428
kPolyCube = 431
kPolyCut = 892
kPolyCutManip = 896
kPolyCutManipContainer = 895
kPolyCylProj = 401
kPolyCylinder = 432
kPolyDelEdge = 402
kPolyDelFacet = 403
kPolyDelVertex = 404
kPolyDuplicateEdge = 963
kPolyEdgeToCurve = 1007
kPolyEditEdgeFlow = 1079
kPolyExtrudeEdge = 785
kPolyExtrudeFacet = 405
kPolyExtrudeManip = 1062
kPolyExtrudeManipContainer = 1063
kPolyExtrudeVertex = 916
kPolyFlipEdge = 784
kPolyFlipUV = 879
kPolyHelix = 976
kPolyHoleFace = 1047
kPolyLayoutUV = 843
kPolyMapCut = 406
kPolyMapDel = 407
kPolyMapSew = 408
kPolyMapSewMove = 844
kPolyMappingManip = 194
kPolyMergeEdge = 409
kPolyMergeFacet = 410
kPolyMergeUV = 900
kPolyMergeVert = 690
kPolyMesh = 433
kPolyMirror = 948
kPolyModifierManip = 195
kPolyModifierManipContainer = 1097
kPolyMoveEdge = 411
kPolyMoveFacet = 412
kPolyMoveFacetUV = 413
kPolyMoveUV = 414
kPolyMoveUVManip = 193
kPolyMoveVertex = 415
kPolyMoveVertexManip = 196
kPolyMoveVertexUV = 416
kPolyNormal = 417
kPolyNormalPerVertex = 751
kPolyNormalizeUV = 878
kPolyPinUV = 950
kPolyPipe = 972
kPolyPlanProj = 418
kPolyPlatonicSolid = 971
kPolyPoke = 893
kPolyPokeManip = 897
kPolyPrimitive = 429
kPolyPrimitiveMisc = 970
kPolyPrism = 958
kPolyProj = 419
kPolyProjectCurve = 1060
kPolyProjectionManip = 174
kPolyPyramid = 959
kPolyQuad = 420
kPolyReduce = 762
kPolyRemesh = 1098
kPolySelectEditFeedbackManip = 1030
kPolySeparate = 456
kPolySewEdge = 689
kPolySmooth = 421
kPolySmoothFacet = 691
kPolySmoothProxy = 934
kPolySoftEdge = 422
kPolySphProj = 423
kPolySphere = 434
kPolySpinEdge = 1046
kPolySplit = 424
kPolySplitEdge = 806
kPolySplitRing = 960
kPolySplitToolManip = 141
kPolySplitVert = 802
kPolyStraightenUVBorder = 901
kPolySubdEdge = 425
kPolySubdFacet = 426
kPolyToSubdiv = 677
kPolyToolFeedbackManip = 1029
kPolyToolFeedbackShape = 312
kPolyTorus = 435
kPolyTransfer = 840
kPolyTriangulate = 427
kPolyTweak = 395
kPolyTweakUV = 701
kPolyUVRectangle = 1058
kPolyUnite = 437
kPolyVertexNormalManip = 197
kPolyWedgeFace = 894
kPositionMarker = 285
kPostProcessList = 457
kPrecompExport = 780
kPrimitive = 86
kProjectCurve = 87
kProjectTangent = 88
kProjectTangentManip = 177
kProjection = 458
kProjectionManip = 173
kProjectionMultiManip = 176
kProjectionUVManip = 175
kPropModManip = 178
kPropMoveTriadManip = 138
kProxy = 108
kProxyManager = 956
kPsdFileTexture = 938
kQuadPtOnLineManip = 179
kQuadShadingSwitch = 915
kRBFsurface = 89
kRPsolver = 362
kRadial = 261
kRadius = 274
kRamp = 497
kRampBackground = 26
kRampShader = 887
kRbfSrfManip = 180
kRebuildCurve = 90
kRebuildSurface = 91
kRecord = 459
kReference = 747
kReflect = 367
kRemapColor = 928
kRemapHsv = 929
kRemapValue = 927
kRenderBox = 859
kRenderCone = 97
kRenderGlobals = 516
kRenderGlobalsList = 517
kRenderLayer = 777
kRenderLayerManager = 778
kRenderPass = 775
kRenderPassSet = 776
kRenderQuality = 518
kRenderRect = 277
kRenderSetup | |
for import-like statements.
Arguments:
src: The Python source code to analyze for dependencies.
module_name: The name of the module that ``src`` corresponds to.
is_package: Whether this module should be treated as a package.
See :py:meth:`save_source_string` for more info.
Returns:
A list containing modules detected as direct dependencies in
``src``. The items in the list are guaranteed to be unique.
"""
package_name = (
module_name if is_package else module_name.rsplit(".", maxsplit=1)[0]
)
try:
dep_pairs = find_files_source_depends_on(src, package_name)
except Exception as e:
self.dependency_graph.add_node(
module_name,
error=PackagingErrorReason.DEPENDENCY_RESOLUTION_FAILED,
error_context=str(e),
)
return []
# Use a dict to get uniquing but also deterministic order
dependencies = {}
for dep_module_name, dep_module_obj in dep_pairs:
# handle the case where someone did something like `from pack import sub`
# where `sub` is a submodule. In this case we don't have to save pack, just sub.
# this ensures we don't pick up additional dependencies on pack.
# However, in the case where `sub` is not a submodule but an object, then we do have
# to save pack.
if dep_module_obj is not None:
possible_submodule = f"{dep_module_name}.{dep_module_obj}"
if self._module_exists(possible_submodule):
dependencies[possible_submodule] = True
# we don't need to save `pack`
continue
if self._module_exists(dep_module_name):
dependencies[dep_module_name] = True
return list(dependencies.keys())
def save_source_string(
self,
module_name: str,
src: str,
is_package: bool = False,
dependencies: bool = True,
):
"""Adds ``src`` as the source code for ``module_name`` in the exported package.
Args:
module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code for this package.
src (str): The Python source code to save for this package.
is_package (bool, optional): If ``True``, this module is treated as a package. Packages are allowed to have submodules
(e.g. ``my_package.my_subpackage.my_subsubpackage``), and resources can be saved inside them. Defaults to ``False``.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
self.dependency_graph.add_node(
module_name,
source=src,
is_package=is_package,
provided=True,
action=_ModuleProviderAction.INTERN,
)
if dependencies:
deps = self._get_dependencies(src, module_name, is_package)
for dep in deps:
self.dependency_graph.add_edge(module_name, dep)
self.add_dependency(dep)
def _write_source_string(
self,
module_name: str,
src: str,
is_package: bool = False,
):
"""Write ``src`` as the source code for ``module_name`` in the zip archive.
Arguments are otherwise the same as for :meth:`save_source_string`.
"""
extension = "/__init__.py" if is_package else ".py"
filename = module_name.replace(".", "/") + extension
self._write(filename, src)
def _import_module(self, module_name: str):
try:
return self.importer.import_module(module_name)
except ModuleNotFoundError as e:
if not is_mangled(module_name):
raise
msg = (
f"Module not found: '{module_name}'. Modules imported "
"from a torch.package cannot be re-exported directly."
)
raise ModuleNotFoundError(msg) from None
def _module_exists(self, module_name: str) -> bool:
try:
self._import_module(module_name)
return True
except Exception:
return False
def _get_source_of_module(self, module: types.ModuleType) -> Optional[str]:
filename = getattr(module, "__file__", None)
result = (
None
if filename is None or not filename.endswith(".py")
else linecache.getlines(filename, module.__dict__)
)
if result is None:
return None
return "".join(result)
def add_dependency(self, module_name: str, dependencies=True):
"""Given a module, add it to the dependency graph according to patterns
specified by the user.
"""
if (
module_name in self.dependency_graph
and self.dependency_graph.nodes[module_name].get("provided") is True
):
return
if module_name == "_mock":
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.REPACKAGED_MOCK_MODULE,
provided=True,
)
return
if self._can_implicitly_extern(module_name):
self.dependency_graph.add_node(
module_name, action=_ModuleProviderAction.EXTERN, provided=True
)
return
for pattern, pattern_info in self.patterns.items():
if pattern.matches(module_name):
pattern_info.was_matched = True
self.dependency_graph.add_node(
module_name, action=pattern_info.action, provided=True
)
if pattern_info.action == _ModuleProviderAction.DENY:
# Requiring a denied module just adds an error to the graph.
self.dependency_graph.add_node(
module_name, error=PackagingErrorReason.DENIED
)
# If we are interning this module, we need to retrieve its
# dependencies and package those as well.
if pattern_info.action == _ModuleProviderAction.INTERN:
self._intern_module(module_name, dependencies)
return
# No patterns have matched. Explicitly add this as an error.
self.dependency_graph.add_node(
module_name, error=PackagingErrorReason.NO_ACTION
)
def save_module(self, module_name: str, dependencies=True):
"""Save the code for ``module`` into the package. Code for the module is resolved using the ``importers`` path to find the
module object, and then using its ``__file__`` attribute to find the source code.
Args:
module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code
for this package.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
if not isinstance(module_name, str):
raise TypeError(
"save_module() expects a string input, did you perhaps mean to pass `__name__`?"
)
self.dependency_graph.add_node(
module_name,
provided=True,
)
self._intern_module(module_name, dependencies)
def _intern_module(
self,
module_name: str,
dependencies: bool,
):
"""Adds the module to the dependency graph as an interned module,
along with any metadata needed to write it out to the zipfile at serialization time.
"""
module_obj = self._import_module(module_name)
# Find dependencies of this module and require them as well.
is_package = hasattr(module_obj, "__path__")
source = self._get_source_of_module(module_obj)
if source is None:
# Couldn't find a source! Add it to our dependency graph as broken
# and continue.
filename = getattr(module_obj, "__file__", None)
error_context = None
if filename is None:
packaging_error = PackagingErrorReason.NO_DUNDER_FILE
elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):
packaging_error = PackagingErrorReason.IS_EXTENSION_MODULE
else:
packaging_error = PackagingErrorReason.SOURCE_FILE_NOT_FOUND
error_context = f"filename: {filename}"
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.INTERN,
is_package=is_package,
error=packaging_error,
error_context=error_context,
)
return
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.INTERN,
is_package=is_package,
source=source,
provided=True,
)
if dependencies:
deps = self._get_dependencies(source, module_name, is_package)
for dep in deps:
self.dependency_graph.add_edge(module_name, dep)
self.add_dependency(dep)
def save_pickle(
self, package: str, resource: str, obj: Any, dependencies: bool = True
):
"""Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into
the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects.
If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required
to reconstruct them and save the relevant code.
To be able to save an object where ``type(obj).__name__`` is ``my_module.MyObject``,
``my_module.MyObject`` must resolve to the class of the object according to the ``importer`` order. When saving objects that
have previously been packaged, the importer's ``import_module`` method will need to be present in the ``importer`` list
for this to work.
Args:
package (str): The name of module package this resource should go in (e.g. ``"my_package.my_subpackage"``).
resource (str): A unique name for the resource, used to identify it to load.
obj (Any): The object to save, must be picklable.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
filename = self._filename(package, resource)
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = create_pickler(data_buf, self.importer)
pickler.persistent_id = self._persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
name_in_dependency_graph = f"<{package}.{resource}>"
self.dependency_graph.add_node(
name_in_dependency_graph,
action=_ModuleProviderAction.INTERN,
provided=True,
is_pickle=True,
)
if dependencies:
all_dependencies = []
for opcode, arg, pos in pickletools.genops(data_value):
if opcode.name == "GLOBAL": # a global reference
assert isinstance(arg, str)
module, field = arg.split(" ")
if module not in all_dependencies:
all_dependencies.append(module)
for module_name in all_dependencies:
self.dependency_graph.add_edge(name_in_dependency_graph, module_name)
self.add_dependency(module_name)
self._write(filename, data_value)
def save_text(self, package: str, resource: str, text: str):
"""Save text data to the package.
Args:
package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``).
resource (str): A unique name for the resource, used to identify it to load.
text (str): The contents to save.
"""
return self.save_binary(package, resource, text.encode("utf-8"))
def save_binary(self, package, resource, binary: bytes):
"""Save raw bytes to the package.
Args:
package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``).
resource (str): A unique name for the resource, used to identify it to load.
binary (str): The data to save.
"""
filename = self._filename(package, resource)
self._write(filename, binary)
def register_extern_hook(self, hook: ActionHook) -> RemovableHandle:
"""Registers an extern hook on the exporter.
The hook will be called each time a module matches against an :meth:`extern` pattern.
It should have the following signature::
hook(exporter: PackageExporter, module_name: str) -> None
Hooks will be called in order of registration.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
A handle that can be used to remove the added hook by calling
``handle.remove()``.
"""
handle = RemovableHandle(self._extern_hooks)
self._extern_hooks[handle.id] = hook
return handle
def register_mock_hook(self, hook: ActionHook) -> RemovableHandle:
"""Registers a mock hook on the exporter.
The hook will be called each time a module matches against a :meth:`mock` pattern.
It should have the following signature::
hook(exporter: PackageExporter, module_name: str) -> None
Hooks will be called in order of registration.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
A handle that can be used to | |
<filename>mechanistic/ssn.py
#!/usr/bin/env python
###
# A mini library containing the functions typically used when running
# simulations using the supralinear stabilized network (Rubin et al., 2015).
#
# <NAME>, September 2015
import numpy as np
import scipy.io
import matplotlib.image as mpimg
class SSNetwork:
# Default constructor - use the Rubin et al. parameters to produce a SSN layer:
def __init__(self, sig_EE=8, sig_IE=12, sig_EI=4, sig_II=4, J_EE=0.1, J_IE=0.38, J_EI=0.089, J_II=0.096, ori_map=0, ocd_map=0, N_pairs=75, field_size=16.):
self.N_pairs = N_pairs # no. of E/I pairs to a side of a grid
self.field_size = field_size # size of field to a side (degrees)
self.dx = field_size / N_pairs
self.sig_FF = 32.
self.sig_RF = self.dx
self.k = np.random.normal(0.012, 0.05*0.012, (N_pairs, N_pairs))
self.n_E = np.random.normal(2.0, 0.05*2.0, (N_pairs, N_pairs))
self.n_I = np.random.normal(2.2, 0.05*2.2, (N_pairs, N_pairs))
self.tau_E = np.random.normal(0.02, 0.05*0.02, (N_pairs, N_pairs))
self.tau_I = np.random.normal(0.01, 0.05*0.01, (N_pairs, N_pairs))
# Connection weight parameters (from supp. materials S1.1.2):
self.kappa_E = 0.1
self.kappa_I = 0.5
# kappa_E = 0.18
# kappa_I = .85
self.J_EE = J_EE
self.J_IE = J_IE
self.J_EI = J_EI
self.J_II = J_II
self.sig_EE = sig_EE*self.dx
self.sig_IE = sig_IE*self.dx
self.sig_EI = sig_EI*self.dx
self.sig_II = sig_II*self.dx
self.sig_ori = 45.
self.OP_map = ori_map
if np.all(self.OP_map == 0):
try:
# load OP map from Bryan's extracted Kaschube map
data = scipy.io.loadmat('orientation-map.mat')
self.OP_map = data['map']
except ValueError:
raise ValueError("Could not find orientation-map.mat!")
self.OD_map = ocd_map
if np.all(self.OD_map == 0):
self.OD_map = load_OD_map()
[self.W_EE, self.W_IE, self.W_EI, self.W_II] = generate_connetion_weights( self.N_pairs, self.field_size, self.OP_map, self.kappa_E, self.kappa_I, self.J_EE, self.J_IE, self.J_EI, self.J_II, self.sig_EE, self.sig_IE, self.sig_EI, self.sig_II, self.sig_ori, quiet=True )
self.sum_field_sizes_E = np.zeros((self.N_pairs, self.N_pairs))
self.sum_field_sizes_I = np.zeros((self.N_pairs, self.N_pairs))
def run_simulation(self, dt, timesteps, c, h, init_cond=np.zeros( (2, 75, 75) ) ):
r_E = np.zeros((timesteps, self.N_pairs, self.N_pairs))
r_I = np.copy(r_E)
# add initial conditions:
r_E[0,:,:] = init_cond[0]
r_I[0,:,:] = init_cond[1]
I_E = np.zeros((timesteps, self.N_pairs, self.N_pairs))
I_I = np.copy(I_E)
# rSS_E = np.copy(I_E)
# rSS_I = np.copy(I_I)
for t in range(1,timesteps):
# Input drive from external input and network
I_E[t,:,:] = c*h + np.sum( np.sum( self.W_EE * r_E[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T - np.sum( np.sum( self.W_EI * r_I[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T
I_I[t,:,:] = c*h + np.sum( np.sum( self.W_IE * r_E[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T - np.sum( np.sum( self.W_II * r_I[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T
# steady state firing rates - power law I/O
rSS_E = np.multiply(self.k, np.power(np.fmax(0,I_E[t,:,:]), self.n_E))
rSS_I = np.multiply(self.k, np.power(np.fmax(0,I_I[t,:,:]), self.n_I))
# set negative steady state rates to zero
rSS_E[rSS_E < 0] = 0
rSS_I[rSS_I < 0] = 0
# instantaneous firing rates approaching steady state
r_E[t,:,:] = r_E[t-1,:,:] + dt*(np.divide(-r_E[t-1,:,:]+rSS_E, self.tau_E))
r_I[t,:,:] = r_I[t-1,:,:] + dt*(np.divide(-r_I[t-1,:,:]+rSS_I, self.tau_I))
return [r_E, r_I, I_E, I_I]
# function to reproduce figure 6B of Rubin et al, 2015, showing the SSN
# transition from external to network drive with dominant inhibition
def plot_network_contrast_response(self, r_units=np.floor( 75*np.random.rand(25,2) ), c_range=np.linspace(3, 50, 12) ):
pass
def diff(x,y):
return np.abs( np.mod( x - y + 90, 180) - 90 )
def G(x,y,sigma):
return np.exp(-1*diff(x,y)**2/(2*sigma**2))
def G2D(x_range, y_range, mean, sigma):
x0 = mean[0]
y0 = mean[1]
return np.exp( -1*( ( x_range-x0)**2 + (y_range-y0)**2) / (2*sigma**2) )
def mean_connections(W_ab):
total = 0.
for i in range(W_ab.shape[0]):
sub_mat = W_ab[i,:,:]
total = total + sub_mat[sub_mat != 0].size
return total / W_ab.shape[0]
def stimulus_mask(x,length,sig_RF):
return (1.+np.exp(-(x + length/2.)/sig_RF) )**-1. * (1. - (1.+np.exp(-(x - length/2.)/sig_RF))**-1. )
# generate external drive for an oriented grating stimulus (circular or full frame)
# ori- orientation (degrees)
# size - diameter (degrees)
# centre - position in field of centre of stimulus (degrees, fsize/2 being the centre of the stimulus)
# ocularity - a scaling factor for which eye the stimulus is presented to (1 = contralateral, 0 = ipsilateral)
# sig_RF - sigma for the stimulus mask
# sig_FF - sigma for full field
# fsize - size of field (degrees, square field)
# full_frame - bool for using the full frame instead of a mask
def generate_ext_stimulus(ori, size, centre, OP_map, OD_map, ocularity, sig_RF=16./75, sig_FF = 32., fsize=16., full_frame=False):
if ocularity != 0 and ocularity != 1:
raise ValueError('Ocularity must be either 0 (ipsilateral) or 1 (contralateral).')
if centre[0] > fsize or centre[1] > fsize:
raise ValueError('Centre of stimulus is off the grid of neurons!')
G_FF = G(ori, OP_map, sig_FF)
N_pairs = OP_map.shape[0]
v_range = np.linspace(0, fsize, N_pairs, False)
xv, yv = np.meshgrid( v_range, v_range )
if full_frame==True:
h = G_FF
else:
x_distance = np.abs(xv - centre[0])
y_distance = np.abs(yv - centre[1])
dist = np.sqrt(x_distance**2 + y_distance**2)
mask = stimulus_mask(dist, size, sig_RF)
h = np.multiply( mask, G_FF )
if ocularity == 1:
h = h * OD_map
else:
h = h * np.abs(OD_map-1)
return h
def generate_mono_stimulus(ori, size, centre, OP_map, sig_RF=16./75, sig_FF=32., fsize=16., full_frame=False):
if centre[0] > fsize or centre[1] > fsize:
raise ValueError('Centre of stimulus is off the grid of neurons!')
G_FF = G(ori, OP_map, sig_FF)
N_pairs = OP_map.shape[0]
v_range = np.linspace(0, fsize, N_pairs, False)
xv, yv = np.meshgrid( v_range, v_range )
if full_frame==True:
h = G_FF
else:
x_distance = np.abs(xv - centre[0])
y_distance = np.abs(yv - centre[1])
dist = np.sqrt(x_distance**2 + y_distance**2)
mask = stimulus_mask(dist, size, sig_RF)
h = np.multiply( mask, G_FF )
return h
# generate external drive for an annular stimulus for surround suppression experiments
# orientation - of the stimulus (degrees)
# inner_d - inner diameter of the stimulus (degrees)
# outer_d - inner diameter of the stimulus (degrees
# ocularity - a scaling factor for which eye the stimulus is presented to (1 = contralateral, 0 = ipsilateral)
# mono - boolean for generating a monocular stimulus
# centre - about which the ring is placed
def generate_ring_stimulus(orientation, inner_d, outer_d, centre, ocularity, OP_map, OD_map=0, mono=False, sig_RF=16./75, sig_FF = 32., fsize=16.):
if centre[0] > fsize or centre[1] > fsize:
raise ValueError('Centre of stimulus is off the grid of neurons!')
if ocularity != 0 and ocularity != 1 and mono==False:
raise ValueError('Ocularity must be either 0 (ipsilateral) or 1 (contralateral).')
if inner_d >= outer_d:
raise ValueError('Inner diameter must be less than the outer diameter (duh).')
G_FF = G(orientation, OP_map, sig_FF)
N_pairs = OP_map.shape[0]
v_range = np.linspace(0, fsize, N_pairs, False)
xv, yv = np.meshgrid( v_range, v_range )
x_distance = np.abs(xv - centre[0])
y_distance = np.abs(yv - centre[1])
dist = np.sqrt(x_distance**2 + y_distance**2)
ring_mask = stimulus_mask(dist, outer_d, sig_RF) - stimulus_mask(dist, inner_d, sig_RF)
if mono == False:
if ocularity == 1:
h = ring_mask * G_FF * OD_map
else:
h = ring_mask * G_FF * np.abs(OD_map-1)
else:
h = ring_mask * G_FF
return h
# randomly generate connection weights for all the units in a square grid
def generate_connetion_weights( N_pairs, field_size, OP_map, kappa_E, kappa_I, J_EE, J_IE, J_EI, J_II, sig_EE, sig_IE, sig_EI, sig_II, sig_ori , quiet=False):
xy_range = np.linspace(0, field_size, N_pairs, False)
xv, yv = np.meshgrid(xy_range, xy_range) # x and y grid values (degrees)
G_EE = np.zeros((N_pairs**2, N_pairs, N_pairs))
G_IE = np.copy(G_EE)
# may not need these
G_EI = np.copy(G_EE)
G_II = np.copy(G_EE)
G_ori = np.copy(G_EE)
G_OD = np.copy(G_EE)
pW_EE = np.copy(G_EE)
pW_IE = np.copy(G_EE)
pW_EI = np.copy(G_EE)
pW_II = np.copy(G_EE)
rnd_EE = np.copy(G_EE)
rnd_IE = np.copy(G_EE)
rnd_EI = np.copy(G_EE)
rnd_II = np.copy(G_EE)
np.random.seed(1)
# iterate through each E/I pair:
for i in range(N_pairs):
for j in range(N_pairs):
G_EE[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_EE)
G_IE[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_IE)
G_EI[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_EI)
G_II[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_II)
G_ori[N_pairs*i+j,:,:] = G(OP_map[j,i], OP_map, sig_ori)
# Does ocular dominance affect connectivity?
# Not according to <NAME> Singer, 1992, pg. 210-11:
# "Analyses... provided no evidence for eye-specific selectivity of tangential connections"
# Leaving this commented for future experiments though:
# G_OD[N_pairs*i+j,:,:] = G(OD_map[j,i], OD_map, sig_OD)
rnd_EE[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
rnd_IE[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
rnd_EI[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
rnd_II[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
for i in range(N_pairs**2):
pW_EE[i,:,:] = kappa_E * np.multiply(G_EE[i,:,:], G_ori[i,:,:])
pW_IE[i,:,:] = kappa_E * np.multiply(G_IE[i,:,:], G_ori[i,:,:])
pW_EI[i,:,:] = kappa_I * np.multiply(G_EI[i,:,:], G_ori[i,:,:])
pW_II[i,:,:] = kappa_I * np.multiply(G_II[i,:,:], G_ori[i,:,:])
# for OD | |
headers = {}
return await self.get_loadbalance_health_ex_async(request, headers, runtime)
def get_loadbalance_health_ex(
self,
request: cas_models.GetLoadbalanceHealthRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetLoadbalanceHealthResponse:
"""
Description: 获取单个LB的健康状态
Summary: 获取单个LB的健康状态
"""
UtilClient.validate_model(request)
return cas_models.GetLoadbalanceHealthResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.health.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_loadbalance_health_ex_async(
self,
request: cas_models.GetLoadbalanceHealthRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetLoadbalanceHealthResponse:
"""
Description: 获取单个LB的健康状态
Summary: 获取单个LB的健康状态
"""
UtilClient.validate_model(request)
return cas_models.GetLoadbalanceHealthResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.loadbalance.health.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_loadbalance_viptype(
self,
request: cas_models.AllLoadbalanceViptypeRequest,
) -> cas_models.AllLoadbalanceViptypeResponse:
"""
Description: 获取所有的vip类型
Summary: 获取所有的vip类型
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_loadbalance_viptype_ex(request, headers, runtime)
async def all_loadbalance_viptype_async(
self,
request: cas_models.AllLoadbalanceViptypeRequest,
) -> cas_models.AllLoadbalanceViptypeResponse:
"""
Description: 获取所有的vip类型
Summary: 获取所有的vip类型
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.all_loadbalance_viptype_ex_async(request, headers, runtime)
def all_loadbalance_viptype_ex(
self,
request: cas_models.AllLoadbalanceViptypeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllLoadbalanceViptypeResponse:
"""
Description: 获取所有的vip类型
Summary: 获取所有的vip类型
"""
UtilClient.validate_model(request)
return cas_models.AllLoadbalanceViptypeResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.viptype.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def all_loadbalance_viptype_ex_async(
self,
request: cas_models.AllLoadbalanceViptypeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllLoadbalanceViptypeResponse:
"""
Description: 获取所有的vip类型
Summary: 获取所有的vip类型
"""
UtilClient.validate_model(request)
return cas_models.AllLoadbalanceViptypeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.loadbalance.viptype.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_loadbalance_cluster(
self,
request: cas_models.AllLoadbalanceClusterRequest,
) -> cas_models.AllLoadbalanceClusterResponse:
"""
Description: 获取所有的LB集群
Summary: 获取所有的LB集群
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_loadbalance_cluster_ex(request, headers, runtime)
async def all_loadbalance_cluster_async(
self,
request: cas_models.AllLoadbalanceClusterRequest,
) -> cas_models.AllLoadbalanceClusterResponse:
"""
Description: 获取所有的LB集群
Summary: 获取所有的LB集群
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.all_loadbalance_cluster_ex_async(request, headers, runtime)
def all_loadbalance_cluster_ex(
self,
request: cas_models.AllLoadbalanceClusterRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllLoadbalanceClusterResponse:
"""
Description: 获取所有的LB集群
Summary: 获取所有的LB集群
"""
UtilClient.validate_model(request)
return cas_models.AllLoadbalanceClusterResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.cluster.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def all_loadbalance_cluster_ex_async(
self,
request: cas_models.AllLoadbalanceClusterRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllLoadbalanceClusterResponse:
"""
Description: 获取所有的LB集群
Summary: 获取所有的LB集群
"""
UtilClient.validate_model(request)
return cas_models.AllLoadbalanceClusterResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.loadbalance.cluster.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_vpc(
self,
request: cas_models.QueryVpcRequest,
) -> cas_models.QueryVpcResponse:
"""
Description: 查询vpc详细信息
Summary: 查询vpc详细信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_vpc_ex(request, headers, runtime)
async def query_vpc_async(
self,
request: cas_models.QueryVpcRequest,
) -> cas_models.QueryVpcResponse:
"""
Description: 查询vpc详细信息
Summary: 查询vpc详细信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_vpc_ex_async(request, headers, runtime)
def query_vpc_ex(
self,
request: cas_models.QueryVpcRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryVpcResponse:
"""
Description: 查询vpc详细信息
Summary: 查询vpc详细信息
"""
UtilClient.validate_model(request)
return cas_models.QueryVpcResponse().from_map(
self.do_request('1.0', 'antcloud.cas.vpc.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_vpc_ex_async(
self,
request: cas_models.QueryVpcRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryVpcResponse:
"""
Description: 查询vpc详细信息
Summary: 查询vpc详细信息
"""
UtilClient.validate_model(request)
return cas_models.QueryVpcResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.vpc.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_vpc_import(
self,
request: cas_models.ListVpcImportRequest,
) -> cas_models.ListVpcImportResponse:
"""
Description: 查询已导入workspace的vpc
Summary: 查询已导入workspace的vpc
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_vpc_import_ex(request, headers, runtime)
async def list_vpc_import_async(
self,
request: cas_models.ListVpcImportRequest,
) -> cas_models.ListVpcImportResponse:
"""
Description: 查询已导入workspace的vpc
Summary: 查询已导入workspace的vpc
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_vpc_import_ex_async(request, headers, runtime)
def list_vpc_import_ex(
self,
request: cas_models.ListVpcImportRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListVpcImportResponse:
"""
Description: 查询已导入workspace的vpc
Summary: 查询已导入workspace的vpc
"""
UtilClient.validate_model(request)
return cas_models.ListVpcImportResponse().from_map(
self.do_request('1.0', 'antcloud.cas.vpc.import.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_vpc_import_ex_async(
self,
request: cas_models.ListVpcImportRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListVpcImportResponse:
"""
Description: 查询已导入workspace的vpc
Summary: 查询已导入workspace的vpc
"""
UtilClient.validate_model(request)
return cas_models.ListVpcImportResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.vpc.import.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_vpc_cidrblock(
self,
request: cas_models.ListVpcCidrblockRequest,
) -> cas_models.ListVpcCidrblockResponse:
"""
Description: 查询vpc cidrblock信息
Summary: 查询vpc cidrblock信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_vpc_cidrblock_ex(request, headers, runtime)
async def list_vpc_cidrblock_async(
self,
request: cas_models.ListVpcCidrblockRequest,
) -> cas_models.ListVpcCidrblockResponse:
"""
Description: 查询vpc cidrblock信息
Summary: 查询vpc cidrblock信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_vpc_cidrblock_ex_async(request, headers, runtime)
def list_vpc_cidrblock_ex(
self,
request: cas_models.ListVpcCidrblockRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListVpcCidrblockResponse:
"""
Description: 查询vpc cidrblock信息
Summary: 查询vpc cidrblock信息
"""
UtilClient.validate_model(request)
return cas_models.ListVpcCidrblockResponse().from_map(
self.do_request('1.0', 'antcloud.cas.vpc.cidrblock.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_vpc_cidrblock_ex_async(
self,
request: cas_models.ListVpcCidrblockRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListVpcCidrblockResponse:
"""
Description: 查询vpc cidrblock信息
Summary: 查询vpc cidrblock信息
"""
UtilClient.validate_model(request)
return cas_models.ListVpcCidrblockResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.vpc.cidrblock.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_vpc_vswitch(
self,
request: cas_models.QueryVpcVswitchRequest,
) -> cas_models.QueryVpcVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_vpc_vswitch_ex(request, headers, runtime)
async def query_vpc_vswitch_async(
self,
request: cas_models.QueryVpcVswitchRequest,
) -> cas_models.QueryVpcVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_vpc_vswitch_ex_async(request, headers, runtime)
def query_vpc_vswitch_ex(
self,
request: cas_models.QueryVpcVswitchRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryVpcVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
UtilClient.validate_model(request)
return cas_models.QueryVpcVswitchResponse().from_map(
self.do_request('1.0', 'antcloud.cas.vpc.vswitch.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_vpc_vswitch_ex_async(
self,
request: cas_models.QueryVpcVswitchRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryVpcVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
UtilClient.validate_model(request)
return cas_models.QueryVpcVswitchResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.vpc.vswitch.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_vpc_vroutertable(
self,
request: cas_models.QueryVpcVroutertableRequest,
) -> cas_models.QueryVpcVroutertableResponse:
"""
Description: 查询路由表信息
Summary: 查询路由表信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_vpc_vroutertable_ex(request, headers, runtime)
async def query_vpc_vroutertable_async(
self,
request: cas_models.QueryVpcVroutertableRequest,
) -> cas_models.QueryVpcVroutertableResponse:
"""
Description: 查询路由表信息
Summary: 查询路由表信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_vpc_vroutertable_ex_async(request, headers, runtime)
def query_vpc_vroutertable_ex(
self,
request: cas_models.QueryVpcVroutertableRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryVpcVroutertableResponse:
"""
Description: 查询路由表信息
Summary: 查询路由表信息
"""
UtilClient.validate_model(request)
return cas_models.QueryVpcVroutertableResponse().from_map(
self.do_request('1.0', 'antcloud.cas.vpc.vroutertable.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_vpc_vroutertable_ex_async(
self,
request: cas_models.QueryVpcVroutertableRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryVpcVroutertableResponse:
"""
Description: 查询路由表信息
Summary: 查询路由表信息
"""
UtilClient.validate_model(request)
return cas_models.QueryVpcVroutertableResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.vpc.vroutertable.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_loadbalance_rule(
self,
request: cas_models.GetLoadbalanceRuleRequest,
) -> cas_models.GetLoadbalanceRuleResponse:
"""
Description: 获取单个rule
Summary: 获取单个rule
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_loadbalance_rule_ex(request, headers, runtime)
async def get_loadbalance_rule_async(
self,
request: cas_models.GetLoadbalanceRuleRequest,
) -> cas_models.GetLoadbalanceRuleResponse:
"""
Description: 获取单个rule
Summary: 获取单个rule
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_loadbalance_rule_ex_async(request, headers, runtime)
def get_loadbalance_rule_ex(
self,
request: cas_models.GetLoadbalanceRuleRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetLoadbalanceRuleResponse:
"""
Description: 获取单个rule
Summary: 获取单个rule
"""
UtilClient.validate_model(request)
return cas_models.GetLoadbalanceRuleResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.rule.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_loadbalance_rule_ex_async(
self,
request: cas_models.GetLoadbalanceRuleRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetLoadbalanceRuleResponse:
"""
Description: 获取单个rule
Summary: 获取单个rule
"""
UtilClient.validate_model(request)
return cas_models.GetLoadbalanceRuleResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.loadbalance.rule.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_loadbalance_rule(
self,
request: cas_models.QueryLoadbalanceRuleRequest,
) -> cas_models.QueryLoadbalanceRuleResponse:
"""
Description: 批量查询Lb rule
Summary: 批量查询Lb rule
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_loadbalance_rule_ex(request, headers, runtime)
async def query_loadbalance_rule_async(
self,
request: cas_models.QueryLoadbalanceRuleRequest,
) -> cas_models.QueryLoadbalanceRuleResponse:
"""
Description: 批量查询Lb rule
Summary: 批量查询Lb rule
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_loadbalance_rule_ex_async(request, headers, runtime)
def query_loadbalance_rule_ex(
self,
request: cas_models.QueryLoadbalanceRuleRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryLoadbalanceRuleResponse:
"""
Description: 批量查询Lb rule
Summary: 批量查询Lb rule
"""
UtilClient.validate_model(request)
return cas_models.QueryLoadbalanceRuleResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.rule.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_loadbalance_rule_ex_async(
self,
request: cas_models.QueryLoadbalanceRuleRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryLoadbalanceRuleResponse:
"""
Description: 批量查询Lb rule
Summary: 批量查询Lb rule
"""
UtilClient.validate_model(request)
return cas_models.QueryLoadbalanceRuleResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.loadbalance.rule.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_loadbalance(
self,
request: cas_models.GetLoadbalanceRequest,
) -> cas_models.GetLoadbalanceResponse:
"""
Description: 获取单个Lb ById
Summary: 获取单个Lb ById
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_loadbalance_ex(request, headers, runtime)
async def get_loadbalance_async(
self,
request: cas_models.GetLoadbalanceRequest,
) -> cas_models.GetLoadbalanceResponse:
"""
Description: 获取单个Lb ById
Summary: 获取单个Lb ById
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_loadbalance_ex_async(request, headers, runtime)
def get_loadbalance_ex(
self,
request: cas_models.GetLoadbalanceRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetLoadbalanceResponse:
"""
Description: 获取单个Lb ById
Summary: 获取单个Lb ById
"""
UtilClient.validate_model(request)
return cas_models.GetLoadbalanceResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_loadbalance_ex_async(
self,
request: cas_models.GetLoadbalanceRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.GetLoadbalanceResponse:
"""
Description: 获取单个Lb ById
Summary: 获取单个Lb ById
"""
UtilClient.validate_model(request)
return cas_models.GetLoadbalanceResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.loadbalance.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_loadbalance_mount(
self,
request: cas_models.AllLoadbalanceMountRequest,
) -> cas_models.AllLoadbalanceMountResponse:
"""
Description: 获取LB后端所有主机
Summary: 获取LB后端所有主机
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_loadbalance_mount_ex(request, headers, runtime)
async def all_loadbalance_mount_async(
self,
request: cas_models.AllLoadbalanceMountRequest,
) -> cas_models.AllLoadbalanceMountResponse:
"""
Description: 获取LB后端所有主机
Summary: 获取LB后端所有主机
"""
runtime = util_models.RuntimeOptions()
headers = | |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from collections import Counter
from pyspark.rdd import _load_from_socket
from pyspark.sql.pandas.serializers import ArrowCollectSerializer
from pyspark.sql.types import IntegralType
from pyspark.sql.types import ByteType, ShortType, IntegerType, LongType, FloatType, \
DoubleType, BooleanType, MapType, TimestampType, StructType, DataType
from pyspark.traceback_utils import SCCallSiteSync
class PandasConversionMixin(object):
"""
Min-in for the conversion from Spark to pandas. Currently, only :class:`DataFrame`
can use this class.
"""
def toPandas(self):
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting Pandas's :class:`DataFrame` is
expected to be small, as all the data is loaded into the driver's memory.
Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
Examples
--------
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import numpy as np
import pandas as pd
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
if self.sql_ctx._conf.arrowPySparkEnabled():
use_arrow = True
try:
from pyspark.sql.pandas.types import to_arrow_schema
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if self.sql_ctx._conf.arrowPySparkFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % str(e))
warnings.warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to "
"false.\n %s" % str(e))
warnings.warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.
if use_arrow:
try:
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# Rename columns to avoid duplicated column names.
tmp_column_names = ['col_{}'.format(i) for i in range(len(self.columns))]
self_destruct = self.sql_ctx._conf.arrowPySparkSelfDestructEnabled()
batches = self.toDF(*tmp_column_names)._collect_as_arrow(
split_batches=self_destruct)
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
# Ensure only the table has a reference to the batches, so that
# self_destruct (if enabled) is effective
del batches
# Pandas DataFrame created from PyArrow uses datetime64[ns] for date type
# values, but we should use datetime.date to match the behavior with when
# Arrow optimization is disabled.
pandas_options = {'date_as_object': True}
if self_destruct:
# Configure PyArrow to use as little memory as possible:
# self_destruct - free columns as they are converted
# split_blocks - create a separate Pandas block for each column
# use_threads - convert one column at a time
pandas_options.update({
'self_destruct': True,
'split_blocks': True,
'use_threads': False,
})
pdf = table.to_pandas(**pandas_options)
# Rename back to the original column names.
pdf.columns = self.columns
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_localize_timestamps(pdf[field.name], timezone)
elif isinstance(field.dataType, MapType):
pdf[field.name] = \
_convert_map_items_to_dict(pdf[field.name])
return pdf
else:
return pd.DataFrame.from_records([], columns=self.columns)
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and can not continue. Note that "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an "
"effect on failures in the middle of "
"computation.\n %s" % str(e))
warnings.warn(msg)
raise
# Below is toPandas without Arrow optimization.
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
column_counter = Counter(self.columns)
dtype = [None] * len(self.schema)
for fieldIdx, field in enumerate(self.schema):
# For duplicate column name, we use `iloc` to access it.
if column_counter[field.name] > 1:
pandas_col = pdf.iloc[:, fieldIdx]
else:
pandas_col = pdf[field.name]
pandas_type = PandasConversionMixin._to_corrected_pandas_type(field.dataType)
# SPARK-21766: if an integer field is nullable and has null values, it can be
# inferred by pandas as float column. Once we convert the column with NaN back
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
# float type, not the corrected type from the schema in this case.
if pandas_type is not None and \
not(isinstance(field.dataType, IntegralType) and field.nullable and
pandas_col.isnull().any()):
dtype[fieldIdx] = pandas_type
# Ensure we fall back to nullable numpy types, even when whole column is null:
if isinstance(field.dataType, IntegralType) and pandas_col.isnull().any():
dtype[fieldIdx] = np.float64
if isinstance(field.dataType, BooleanType) and pandas_col.isnull().any():
dtype[fieldIdx] = np.object
df = pd.DataFrame()
for index, t in enumerate(dtype):
column_name = self.schema[index].name
# For duplicate column name, we use `iloc` to access it.
if column_counter[column_name] > 1:
series = pdf.iloc[:, index]
else:
series = pdf[column_name]
if t is not None:
series = series.astype(t, copy=False)
# `insert` API makes copy of data, we only do it for Series of duplicate column names.
# `pdf.iloc[:, index] = pdf.iloc[:, index]...` doesn't always work because `iloc` could
# return a view or a copy depending by context.
if column_counter[column_name] > 1:
df.insert(index, column_name, series, allow_duplicates=True)
else:
df[column_name] = series
pdf = df
if timezone is None:
return pdf
else:
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
for field in self.schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_convert_timestamps_local_tz(pdf[field.name], timezone)
return pdf
@staticmethod
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas :class:`DataFrame`, the inferred data type
may be wrong. This method gets the corrected data type for Pandas if that type may be
inferred incorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == LongType:
return np.int64
elif type(dt) == FloatType:
return np.float32
elif type(dt) == DoubleType:
return np.float64
elif type(dt) == BooleanType:
return np.bool
elif type(dt) == TimestampType:
return np.datetime64
else:
return None
def _collect_as_arrow(self, split_batches=False):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
This is an experimental feature.
:param split_batches: split batches such that each column is in its own allocation, so
that the selfDestruct optimization is effective; default False.
.. note:: Experimental.
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
with SCCallSiteSync(self._sc):
port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
try:
batch_stream = _load_from_socket((port, auth_secret), ArrowCollectSerializer())
if split_batches:
# When spark.sql.execution.arrow.pyspark.selfDestruct.enabled, ensure
# each column in each record batch is contained in its own allocation.
# Otherwise, selfDestruct does nothing; it frees each column as its
# converted, but each column will actually be a list of slices of record
# batches, and so no memory is actually freed until all columns are
# converted.
import pyarrow as pa
results = []
for batch_or_indices in batch_stream:
if isinstance(batch_or_indices, pa.RecordBatch):
batch_or_indices = pa.RecordBatch.from_arrays([
# This call actually reallocates the array
pa.concat_arrays([array])
for array in batch_or_indices
], schema=batch_or_indices.schema)
results.append(batch_or_indices)
else:
results = list(batch_stream)
finally:
# Join serving thread and raise any exceptions from collectAsArrowToPython
jsocket_auth_server.getResult()
# Separate RecordBatches from batch order indices in results
batches = results[:-1]
batch_order = results[-1]
# Re-order the batch list using the correct order
return [batches[i] for i in batch_order]
class SparkConversionMixin(object):
"""
Min-in for the conversion from pandas to Spark. Currently, only :class:`SparkSession`
can use this class.
"""
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
from pyspark.sql import SparkSession
assert isinstance(self, | |
v1 API"""
if "Token" in www_authenticate:
return self.v1_auth_header
return ""
def get_v1_image_tags(self, endpoint, imagerepo):
"""Get list of tags in a repo from Docker Hub"""
url = endpoint + "/v1/repositories/" + imagerepo + "/tags"
Msg().err("tags url:", url, l=Msg.DBG)
(hdr, buf) = self._get_url(url)
try:
return(hdr.data, json.loads(buf.getvalue()))
except (IOError, OSError, AttributeError, ValueError, TypeError):
return(hdr.data, [])
def get_v1_image_tag(self, endpoint, imagerepo, tag):
"""Get list of tags in a repo from Docker Hub"""
url = endpoint + "/v1/repositories/" + imagerepo + "/tags/" + tag
Msg().err("tags url:", url, l=Msg.DBG)
(hdr, buf) = self._get_url(url)
try:
return(hdr.data, json.loads(buf.getvalue()))
except (IOError, OSError, AttributeError, ValueError, TypeError):
return(hdr.data, [])
def get_v1_image_ancestry(self, endpoint, image_id):
"""Get the ancestry which is an ordered list of layers"""
url = endpoint + "/v1/images/" + image_id + "/ancestry"
Msg().err("ancestry url:", url, l=Msg.DBG)
(hdr, buf) = self._get_url(url)
try:
return(hdr.data, json.loads(buf.getvalue()))
except (IOError, OSError, AttributeError, ValueError, TypeError):
return(hdr.data, [])
def get_v1_image_json(self, endpoint, layer_id):
"""Get the JSON metadata for a specific layer"""
url = endpoint + "/v1/images/" + layer_id + "/json"
Msg().err("json url:", url, l=Msg.DBG)
filename = self.localrepo.layersdir + "/" + layer_id + ".json"
if self._get_file(url, filename, 0):
self.localrepo.add_image_layer(filename)
return True
return False
def get_v1_image_layer(self, endpoint, layer_id):
"""Get a specific layer data file (layer files are tarballs)"""
url = endpoint + "/v1/images/" + layer_id + "/layer"
Msg().err("layer url:", url, l=Msg.DBG)
filename = self.localrepo.layersdir + "/" + layer_id + ".layer"
if self._get_file(url, filename, 3):
self.localrepo.add_image_layer(filename)
return True
return False
def get_v1_layers_all(self, endpoint, layer_list):
"""Using a layer list download data and metadata files"""
files = []
if layer_list:
for layer_id in reversed(layer_list):
Msg().err("Downloading layer:", layer_id, l=Msg.INF)
filesize = self.get_v1_image_json(endpoint, layer_id)
if not filesize:
return []
files.append(layer_id + ".json")
filesize = self.get_v1_image_layer(endpoint, layer_id)
if not filesize:
return []
files.append(layer_id + ".layer")
return files
def _get_v2_auth(self, www_authenticate, retry):
"""Authentication for v2 API"""
auth_header = ""
(bearer, auth_data) = www_authenticate.rsplit(" ", 1)
if bearer == "Bearer":
auth_fields = self._split_fields(auth_data)
if "realm" in auth_fields:
auth_url = auth_fields["realm"] + "?"
for field in auth_fields:
if field != "realm":
auth_url += field + "=" + auth_fields[field] + "&"
header = []
if self.v2_auth_token:
header = ["Authorization: Basic %s" % (self.v2_auth_token)]
(dummy, auth_buf) = self._get_url(auth_url, header=header, RETRY=retry)
token_buf = auth_buf.getvalue()
if token_buf and "token" in token_buf:
try:
auth_token = json.loads(token_buf)
except (IOError, OSError, AttributeError,
ValueError, TypeError):
return auth_header
auth_header = "Authorization: Bearer " + \
auth_token["token"]
self.v2_auth_header = auth_header
# PR #126
elif 'BASIC' in bearer or 'Basic' in bearer:
auth_header = "Authorization: Basic %s" %(self.v2_auth_token)
self.v2_auth_header = auth_header
return auth_header
def get_v2_login_token(self, username, password):
"""Get a login token from username and password"""
if not (username and password):
return ""
try:
self.v2_auth_token = \
base64.b64encode("%s:%s" % (username, password))
except (KeyError, AttributeError, TypeError, ValueError, NameError):
self.v2_auth_token = ""
return self.v2_auth_token
def set_v2_login_token(self, v2_auth_token):
"""Load previously created login token"""
self.v2_auth_token = v2_auth_token
def is_v2(self):
"""Check if registry is of type v2"""
(hdr, dummy) = self._get_url(self.registry_url + "/v2/")
try:
if ("200" in hdr.data["X-ND-HTTPSTATUS"] or
"401" in hdr.data["X-ND-HTTPSTATUS"]):
return True
except (KeyError, AttributeError, TypeError):
pass
return False
def get_v2_image_manifest(self, imagerepo, tag):
"""Get the image manifest which contains JSON metadata
that is common to all layers in this image tag
"""
if self._is_docker_registry() and "/" not in imagerepo:
url = self.registry_url + "/v2/library/" + \
imagerepo + "/manifests/" + tag
else:
url = self.registry_url + "/v2/" + imagerepo + \
"/manifests/" + tag
Msg().err("manifest url:", url, l=Msg.DBG)
(hdr, buf) = self._get_url(url)
try:
return(hdr.data, json.loads(buf.getvalue()))
except (IOError, OSError, AttributeError, ValueError, TypeError):
return(hdr.data, [])
def get_v2_image_layer(self, imagerepo, layer_id):
"""Get one image layer data file (tarball)"""
if self._is_docker_registry() and "/" not in imagerepo:
url = self.registry_url + "/v2/library/" + \
imagerepo + "/blobs/" + layer_id
else:
url = self.registry_url + "/v2/" + imagerepo + \
"/blobs/" + layer_id
Msg().err("layer url:", url, l=Msg.DBG)
filename = self.localrepo.layersdir + "/" + layer_id
if self._get_file(url, filename, 3):
self.localrepo.add_image_layer(filename)
return True
return False
def get_v2_layers_all(self, imagerepo, fslayers):
"""Get all layer data files belonging to a image tag"""
files = []
if fslayers:
for layer in reversed(fslayers):
Msg().err("Downloading layer:", layer["blobSum"],
l=Msg.INF)
if not self.get_v2_image_layer(imagerepo, layer["blobSum"]):
return []
files.append(layer["blobSum"])
return files
def get_v2(self, imagerepo, tag):
"""Pull container with v2 API"""
files = []
(dummy, manifest) = self.get_v2_image_manifest(imagerepo, tag)
try:
if not (self.localrepo.setup_tag(tag) and
self.localrepo.set_version("v2")):
Msg().err("Error: setting localrepo v2 tag and version")
return []
self.localrepo.save_json("manifest", manifest)
Msg().err("v2 layers: %s" % (imagerepo), l=Msg.DBG)
files = self.get_v2_layers_all(imagerepo,
manifest["fsLayers"])
except (KeyError, AttributeError, IndexError, ValueError, TypeError):
pass
return files
def _get_v1_id_from_tags(self, tags_obj, tag):
"""Get image id from array of tags"""
if isinstance(tags_obj, dict):
try:
return tags_obj[tag]
except KeyError:
pass
elif isinstance(tags_obj, []):
try:
for tag_dict in tags_obj:
if tag_dict["name"] == tag:
return tag_dict["layer"]
except KeyError:
pass
return ""
def _get_v1_id_from_images(self, images_array, short_id):
"""Get long image id from array of images using the short id"""
try:
for image_dict in images_array:
if image_dict["id"][0:8] == short_id:
return image_dict["id"]
except KeyError:
pass
return ""
def get_v1(self, imagerepo, tag):
"""Pull container with v1 API"""
Msg().err("v1 image id: %s" % (imagerepo), l=Msg.DBG)
(hdr, images_array) = self.get_v1_repo(imagerepo)
if not images_array:
Msg().err("Error: image not found")
return []
try:
endpoint = "http://" + hdr["x-docker-endpoints"]
except KeyError:
endpoint = self.index_url
(dummy, tags_array) = self.get_v1_image_tags(endpoint, imagerepo)
image_id = self._get_v1_id_from_tags(tags_array, tag)
if not image_id:
Msg().err("Error: image tag not found")
return []
if len(image_id) <= 8:
image_id = self._get_v1_id_from_images(images_array, image_id)
if not image_id:
Msg().err("Error: image id not found")
return []
if not (self.localrepo.setup_tag(tag) and
self.localrepo.set_version("v1")):
Msg().err("Error: setting localrepo v1 tag and version")
return []
Msg().err("v1 ancestry: %s" % image_id, l=Msg.DBG)
(dummy, ancestry) = self.get_v1_image_ancestry(endpoint, image_id)
if not ancestry:
Msg().err("Error: ancestry not found")
return []
self.localrepo.save_json("ancestry", ancestry)
Msg().err("v1 layers: %s" % image_id, l=Msg.DBG)
files = self.get_v1_layers_all(endpoint, ancestry)
return files
def _parse_imagerepo(self, imagerepo):
"""Parse imagerepo to extract registry"""
remoterepo = imagerepo
registry = ""
registry_url = ""
index_url = ""
components = imagerepo.split("/")
if '.' in components[0] and len(components) >= 2:
registry = components[0]
if components[1] == "library":
remoterepo = "/".join(components[2:])
del components[1]
imagerepo = "/".join(components)
else:
remoterepo = "/".join(components[1:])
else:
if components[0] == "library" and len(components) >= 1:
del components[0]
remoterepo = "/".join(components)
imagerepo = "/".join(components)
if registry:
try:
registry_url = Config.docker_registries[registry][0]
index_url = Config.docker_registries[registry][1]
except (KeyError, NameError, TypeError):
registry_url = "https://%s" % registry
index_url = registry_url
if registry_url:
self.registry_url = registry_url
if index_url:
self.index_url = index_url
return (imagerepo, remoterepo)
def get(self, imagerepo, tag):
"""Pull a docker image from a v2 registry or v1 index"""
Msg().err("get imagerepo: %s tag: %s" % (imagerepo, tag), l=Msg.DBG)
(imagerepo, remoterepo) = self._parse_imagerepo(imagerepo)
if self.localrepo.cd_imagerepo(imagerepo, tag):
new_repo = False
else:
self.localrepo.setup_imagerepo(imagerepo)
new_repo = True
if self.is_v2():
files = self.get_v2(remoterepo, tag) # try v2
else:
files = self.get_v1(remoterepo, tag) # try v1
if new_repo and not files:
self.localrepo.del_imagerepo(imagerepo, tag, False)
return files
def search_init(self, pause):
"""Setup new search"""
self.search_pause = pause
self.search_page = 0
self.search_link = ""
self.search_ended = False
def search_get_page_v1(self, expression):
"""Get search results from Docker Hub using v1 API"""
if expression:
url = self.index_url + "/v1/search?q=" + expression
else:
url = self.index_url + "/v1/search?"
url += "&page=" + str(self.search_page)
(dummy, buf) = self._get_url(url)
try:
repo_list = json.loads(buf.getvalue())
if repo_list["page"] == repo_list["num_pages"]:
self.search_ended = True
return repo_list
except (IOError, OSError, AttributeError,
ValueError, TypeError):
self.search_ended = True
return []
def catalog_get_page_v2(self, lines):
"""Get search results from Docker Hub using v2 API"""
url = self.registry_url + "/v2/_catalog"
if self.search_pause:
if self.search_page == 1:
url += "?n=" + str(lines)
else:
url = self.registry_url + self.search_link
(hdr, buf) = self._get_url(url)
try:
match = re.search(r"\<([^>]+)\>", hdr.data["link"])
if match:
self.search_link = match.group(1)
except (AttributeError, NameError, KeyError):
self.search_ended = True
try:
return json.loads(buf.getvalue())
except (IOError, OSError, AttributeError,
ValueError, TypeError):
self.search_ended = True
return []
def search_get_page(self, expression):
"""Get search results from Docker Hub"""
if self.search_ended:
return []
else:
self.search_page += 1
if self.is_v2() and not self._is_docker_registry():
return self.catalog_get_page_v2(self.search_lines)
return self.search_get_page_v1(expression)
class DockerLocalFileAPI(object):
"""Manipulate container and/or image files produced by Docker"""
def __init__(self, localrepo):
self.localrepo = localrepo
def _load_structure(self, tmp_imagedir):
"""Load the structure of a Docker pulled image"""
structure = {}
structure["layers"] = dict()
if FileUtil(tmp_imagedir).isdir():
for | |
<gh_stars>10-100
"""
The module contains a set of auxiliary functions facilitating the tight-binding computations
"""
from __future__ import print_function
from __future__ import absolute_import
from itertools import product
import numpy as np
import yaml
def accum(accmap, input, func=None, size=None, fill_value=0, dtype=None):
"""An accumulation function similar to Matlab's `accumarray` function.
Parameters
----------
accmap : ndarray
This is the "accumulation map". It maps input (i.e. indices into
`a`) to their destination in the output array. The first `a.ndim`
dimensions of `accmap` must be the same as `a.shape`. That is,
`accmap.shape[:a.ndim]` must equal `a.shape`. For example, if `a`
has shape (15,4), then `accmap.shape[:2]` must equal (15,4). In this
case `accmap[i,j]` gives the index into the output array where
element (i,j) of `a` is to be accumulated. If the output is, say,
a 2D, then `accmap` must have shape (15,4,2). The value in the
last dimension give indices into the output array. If the output is
1D, then the shape of `accmap` can be either (15,4) or (15,4,1)
input : ndarray
The input data to be accumulated.
func : callable or None
The accumulation function. The function will be passed a list
of values from `a` to be accumulated.
If None, numpy.sum is assumed. (Default value = None)
size : ndarray or None
The size of the output array. If None, the size will be determined
from `accmap`. (Default value = None)
fill_value : scalar
The default value for elements of the output array.
dtype : numpy data type, or None
The data type of the output array. If None, the data type of
`a` is used. (Default value = None)
Returns
-------
out : ndarray
The accumulated results.
The shape of `out` is `size` if `size` is given. Otherwise the
shape is determined by the (lexicographically) largest indices of
the output found in `accmap`.
Examples
--------
>>> from numpy import array, prod
>>> a = array([[1, 2, 3], [4, -1, 6], [-1, 8, 9]])
>>> a
array([[ 1, 2, 3],
[ 4, -1, 6],
[-1, 8, 9]])
>>> # Sum the diagonals.
>>> accmap = array([[0, 1, 2], [2, 0, 1], [1, 2, 0]])
>>> s = accum(accmap,a)
>>> s
array([ 9, 7, 15])
>>> # A 2D output, from sub-arrays with shapes and positions like this:
>>> # [ (2,2) (2,1)]
>>> # [ (1,2) (1,1)]
>>> accmap = array([[[0,0],[0,0],[0,1]],[[0,0],[0,0],[0,1]],[[1,0],[1,0],[1,1]],])
>>> # Accumulate using a product.
>>> accum(accmap,a,func=prod,dtype=float)
array([[-8., 18.],
[-8., 9.]])
>>> # Same accmap, but create an array of lists of values.
>>> accum(accmap,a,func=lambda x: x,dtype='O')
array([[list([1, 2, 4, -1]), list([3, 6])],
[list([-1, 8]), list([9])]], dtype=object)
"""
# Check for bad arguments and handle the defaults.
if accmap.shape[:input.ndim] != input.shape:
raise ValueError("The initial dimensions of accmap must be the same as a.shape")
if func is None:
func = np.sum
if dtype is None:
dtype = input.dtype
if accmap.shape == input.shape:
accmap = np.expand_dims(accmap, -1)
adims = tuple(range(input.ndim))
if size is None:
size = 1 + np.squeeze(np.apply_over_axes(np.max, accmap, axes=adims))
size = np.atleast_1d(size)
# Create an array of python lists of values.
vals = np.empty(size, dtype='O')
for s in product(*[range(k) for k in size]):
vals[s] = []
for s in product(*[range(k) for k in input.shape]):
indx = tuple(accmap[s])
val = input[s]
vals[indx].append(val)
# Create the output array.
out = np.empty(size, dtype=dtype)
for s in product(*[range(k) for k in size]):
if vals[s] == []:
out[s] = fill_value
else:
out[s] = func(vals[s])
return out
def xyz2np(xyz):
"""Transforms xyz-file formatted string to lists of atomic labels and coordinates
Parameters
----------
xyz :
xyz-formatted string
Returns
-------
list, list
list of labels and list of coordinates
"""
xyz = xyz.splitlines()
num_of_atoms = int(xyz[0])
ans = np.zeros((num_of_atoms, 3))
j = 0
atoms = []
unique_labels = dict()
for line in xyz[2:]:
if len(line.strip()) > 0:
temp = line.split()
label = ''.join([i for i in temp[0] if not i.isdigit()])
try:
unique_labels[label] += 1
temp[0] = label + str(unique_labels[label])
except KeyError:
temp[0] = label + '1'
unique_labels[label] = 1
atoms.append(temp[0])
ans[j, 0] = float(temp[1])
ans[j, 1] = float(temp[2])
ans[j, 2] = float(temp[3])
j += 1
return atoms, ans
def count_species(list_of_labels):
"""From the list of labels creates a dictionary where the keys represents labels and
values are numbers of their repetitions in the list
Parameters
----------
list_of_labels :
return:
Returns
-------
"""
counter = {}
for item in list_of_labels:
key = ''.join([i for i in item if not i.isdigit()])
try:
counter[key] += 1
except KeyError:
counter[key] = 1
return counter
def get_k_coords(special_points, num_of_points, label):
"""Generates a array of the coordinates in the k-space from the set of
high-symmetry points and number of nodes between them
Parameters
----------
special_points :
list of labels for high-symmetry points
num_of_points :
list of node numbers in each section of the path in the k-space
label :
chemical element
Returns
-------
numpy.ndarray
array of coordinates in k-space
"""
from nanonet.tb.special_points import SPECIAL_K_POINTS_BI, SPECIAL_K_POINTS_SI
if isinstance(label, str):
if label == 'Bi':
SPECIAL_K_POINTS = SPECIAL_K_POINTS_BI
if label == 'Si':
SPECIAL_K_POINTS = SPECIAL_K_POINTS_SI
else:
SPECIAL_K_POINTS = label
k_vectors = np.zeros((sum(num_of_points), 3))
offset = 0
for j in range(len(num_of_points)):
sequence1 = np.linspace(SPECIAL_K_POINTS[special_points[j]][0],
SPECIAL_K_POINTS[special_points[j + 1]][0], num_of_points[j])
sequence2 = np.linspace(SPECIAL_K_POINTS[special_points[j]][1],
SPECIAL_K_POINTS[special_points[j + 1]][1], num_of_points[j])
sequence3 = np.linspace(SPECIAL_K_POINTS[special_points[j]][2],
SPECIAL_K_POINTS[special_points[j + 1]][2], num_of_points[j])
k_vectors[offset:offset + num_of_points[j], :] = \
np.vstack((sequence1, sequence2, sequence3)).T
offset += num_of_points[j]
return k_vectors
def dict2xyz(input_data):
"""
Parameters
----------
input_data :
return:
Returns
-------
"""
if not isinstance(input_data, dict):
return input_data
output = str(input_data['num_atoms']) + '\n'
output += str(input_data['title']) + '\n'
for j in range(input_data['num_atoms']):
output += list(input_data['atoms'][j].keys())[0] + \
" " + str(list(input_data['atoms'][j].values())[0][0]) + \
" " + str(list(input_data['atoms'][j].values())[0][1]) + \
" " + str(list(input_data['atoms'][j].values())[0][2]) + "\n"
return output
def yaml_parser(input_data):
"""
Parameters
----------
input_data :
return:
Returns
-------
"""
output = None
if input_data.lower().endswith(('.yml', '.yaml')):
with open(input_data, 'r') as stream:
try:
output = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
else:
try:
output = yaml.safe_load(input_data)
except yaml.YAMLError as exc:
print(exc)
output['primitive_cell'] = np.array(output['primitive_cell']) * output['lattice_constant']
return output
def print_table(myDict, colList=None, sep='\uFFFA'):
"""Pretty print a list of dictionaries (myDict) as a dynamically sized table.
If column names (colList) aren't specified, they will show in random order.
sep: row separator. Ex: sep='\n' on Linux. Default: dummy to not split line.
Author: <NAME> - Use it as you want but don't blame me.
Parameters
----------
myDict :
colList :
(Default value = None)
sep :
(Default value = '\uFFFA')
Returns
-------
"""
if not colList:
colList = list(myDict[0].keys() if myDict else [])
myList = [colList] # 1st row = header
for item in myDict:
myList.append([str(item[col]) for col in colList])
colSize = [max(map(len, (sep.join(col)).split(sep))) for col in zip(*myList)]
formatStr = ' | '.join(["{{:<{}}}".format(i) for i in colSize])
line = formatStr.replace(' | ', '-+-').format(*['-' * i for i in colSize])
item = myList.pop(0)
lineDone = False
out = "\n"
while myList:
if all(not i for i in item):
item = myList.pop(0)
if line and (sep != '\uFFFA' or not lineDone):
out += line
out += "\n"
lineDone = True
row = [i.split(sep, 1) for i in item]
out += formatStr.format(*[i[0] for i in row])
out += "\n"
item = [i[1] if len(i) > 1 else '' for i in row]
out += line
out += "\n"
return out
def print_dict(dictionary):
"""
Parameters
----------
dictionary :
Returns
-------
"""
out = "{:<18} {:<15} \n".format('Label', 'Coordinates')
for key, value in dictionary.items():
out += "{:<18} {:<15} \n".format(key, str(value))
return out
# def split_into_subblocks(h_0, h_l, h_r):
# """
# Split Hamiltonian matrix and coupling matrices into subblocks
#
# :param h_0: Hamiltonian matrix
# :param h_l: left inter-cell coupling matrices
# :param h_r: right inter-cell coupling matrices
# :return h_0_s, h_l_s, h_r_s: lists of subblocks
# """
#
# def find_nonzero_lines(mat, order):
#
# if order == 'top':
# line = mat.shape[0]
# while line > 0:
# if np.count_nonzero(mat[line - 1, :]) == 0:
# line -= 1
# else:
# break
# elif order == 'bottom':
# line = -1
# while line < mat.shape[0] - 1:
# if np.count_nonzero(mat[line + 1, :]) == 0:
# line += 1
# else:
# line = mat.shape[0] - | |
<reponame>AlfredoSequeida/termtosvg
import io
import logging
from collections import namedtuple
from itertools import groupby
from typing import Dict, List, Iterable, Iterator, Union, Tuple
import pyte.graphics
import pyte.screens
from lxml import etree
# Ugliest hack: Replace the first 16 colors rgb values by their names so that termtosvg can
# distinguish FG_BG_256[0] (which defaults to black #000000 but can be styled with themes)
# from FG_BG_256[16] (which is also black #000000 but should be displayed as is).
_COLORS = ['black', 'red', 'green', 'brown', 'blue', 'magenta', 'cyan', 'white']
_BRIGHTCOLORS = ['bright{}'.format(color) for color in _COLORS]
NAMED_COLORS = _COLORS + _BRIGHTCOLORS
pyte.graphics.FG_BG_256 = NAMED_COLORS + pyte.graphics.FG_BG_256[16:]
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Id for the very last SVG animation. This is used to make the first animations start when the
# last one ends (animation looping)
LAST_ANIMATION_ID = 'anim_last'
# XML namespaces
SVG_NS = 'http://www.w3.org/2000/svg'
XLINK_NS = 'http://www.w3.org/1999/xlink'
TERMTOSVG_NS = 'https://github.com/nbedos/termtosvg'
class TemplateError(Exception):
pass
_CharacterCell = namedtuple('_CharacterCell', ['text', 'color',
'background_color', 'bold',
'italics', 'underscore',
'strikethrough'])
# Make Last four arguments of _CharacterCell constructor default to False (bold, italics,
# underscore and strikethrough)
_CharacterCell.__new__.__defaults__ = (False,) * 4
_CharacterCell.__doc__ = 'Representation of a character cell'
_CharacterCell.text.__doc__ = 'Text content of the cell'
_CharacterCell.bold.__doc__ = 'Bold modificator flag'
_CharacterCell.italics.__doc__ = 'Italics modificator flag'
_CharacterCell.underscore.__doc__ = 'Underscore modificator flag'
_CharacterCell.strikethrough.__doc__ = 'Strikethrough modificator flag'
_CharacterCell.color.__doc__ = 'Color of the text'
_CharacterCell.background_color.__doc__ = 'Background color of the cell'
class CharacterCell(_CharacterCell):
@classmethod
def from_pyte(cls, char):
# type: (pyte.screens.Char) -> CharacterCell
"""Create a CharacterCell from a pyte character"""
if char.fg == 'default':
text_color = 'foreground'
else:
if char.bold and not str(char.fg).startswith('bright'):
named_color = 'bright{}'.format(char.fg)
else:
named_color = char.fg
if named_color in NAMED_COLORS:
text_color = 'color{}'.format(NAMED_COLORS.index(named_color))
elif len(char.fg) == 6:
# HEXADECIMAL COLORS
# raise ValueError if char.fg is not an hexadecimal number
int(char.fg, 16)
text_color = '#{}'.format(char.fg)
else:
raise ValueError('Invalid foreground color: {}'.format(char.fg))
if char.bg == 'default':
background_color = 'background'
elif char.bg in NAMED_COLORS:
background_color = 'color{}'.format(NAMED_COLORS.index(char.bg))
elif len(char.bg) == 6:
# Hexadecimal colors
# raise ValueError if char.bg is not an hexadecimal number
int(char.bg, 16)
background_color = '#{}'.format(char.bg)
else:
raise ValueError('Invalid background color')
if char.reverse:
text_color, background_color = background_color, text_color
return CharacterCell(char.data, text_color, background_color,
char.bold, char.italics, char.underscore,
char.strikethrough)
CharacterCellConfig = namedtuple('CharacterCellConfig', ['width', 'height'])
CharacterCellLineEvent = namedtuple('CharacterCellLineEvent', ['row', 'line', 'time', 'duration'])
CharacterCellRecord = Union[CharacterCellConfig, CharacterCellLineEvent]
class ConsecutiveWithSameAttributes:
"""Callable to be used as a key for itertools.groupby to group together consecutive elements
of a list with the same attributes"""
def __init__(self, attributes):
self.group_index = None
self.last_index = None
self.attributes = attributes
self.last_key_attributes = None
def __call__(self, arg):
index, obj = arg
key_attributes = {name: getattr(obj, name) for name in self.attributes}
if self.last_index != index - 1 or self.last_key_attributes != key_attributes:
self.group_index = index
self.last_index = index
self.last_key_attributes = key_attributes
return self.group_index, key_attributes
def make_rect_tag(column, length, height, cell_width, cell_height, background_color):
# type: (int, int, int, int, int, str) -> etree.ElementBase
attributes = {
'x': str(column * cell_width),
'y': str(height),
'width': str(length * cell_width),
'height': str(cell_height)
}
if background_color.startswith('#'):
attributes['fill'] = background_color
else:
attributes['class'] = background_color
rect_tag = etree.Element('rect', attributes)
return rect_tag
def _render_line_bg_colors(screen_line, height, cell_height, cell_width):
# type: (Dict[int, CharacterCell], int, int, int) -> List[etree.ElementBase]
"""Return a list of 'rect' tags representing the background of 'screen_line'
If consecutive cells have the same background color, a single 'rect' tag is returned for all
these cells.
If a cell background uses default_bg_color, no 'rect' will be generated for this cell since
the default background is always displayed.
:param screen_line: Mapping between column numbers and CharacterCells
:param height: Vertical position of the line on the screen in pixels
:param cell_height: Height of the a character cell in pixels
:param cell_width: Width of a character cell in pixels
"""
non_default_bg_cells = [(column, cell) for (column, cell) in sorted(screen_line.items())
if cell.background_color != 'background']
key = ConsecutiveWithSameAttributes(['background_color'])
rect_tags = [make_rect_tag(column, len(list(group)), height, cell_width, cell_height,
attributes['background_color'])
for (column, attributes), group in groupby(non_default_bg_cells, key)]
return rect_tags
def make_text_tag(column, attributes, text, cell_width):
# type: (List[Tuple[int, CharacterCell]], Dict[str, str], str, int) -> etree.ElementBase
text_tag_attributes = {
'x': str(column * cell_width),
'textLength': str(len(text) * cell_width),
'lengthAdjust': 'spacingAndGlyphs'
}
if attributes['bold']:
text_tag_attributes['font-weight'] = 'bold'
if attributes['italics']:
text_tag_attributes['font-style'] = 'italic'
decoration = ''
if attributes['underscore']:
decoration = 'underline'
if attributes['strikethrough']:
decoration += ' line-through'
text_tag_attributes['text-decoration'] = decoration
if attributes['color'].startswith('#'):
text_tag_attributes['fill'] = attributes['color']
else:
text_tag_attributes['class'] = attributes['color']
text_tag = etree.Element('text', text_tag_attributes)
# Replace usual spaces with unbreakable spaces so that indenting the SVG does not mess up
# the whole animation; this is somewhat better than the 'white-space: pre' CSS option
text_tag.text = text.replace(' ', '\u00A0')
return text_tag
def _render_characters(screen_line, cell_width):
# type: (Dict[int, CharacterCell], int) -> List[etree.ElementBase]
"""Return a list of 'text' elements representing the line of the screen
Consecutive characters with the same styling attributes (text color, font weight...) are
grouped together in a single text element.
:param screen_line: Mapping between column numbers and characters
:param cell_width: Width of a character cell in pixels
"""
line = sorted(screen_line.items())
key = ConsecutiveWithSameAttributes(['color', 'bold', 'italics', 'underscore', 'strikethrough'])
text_tags = [make_text_tag(column, attributes, ''.join(c.text for _, c in group), cell_width)
for (column, attributes), group in groupby(line, key)]
return text_tags
_BG_RECT_TAG_ATTRIBUTES = {
'class': 'background',
'height': '100%',
'width': '100%',
'x': '0',
'y': '0'
}
BG_RECT_TAG = etree.Element('rect', _BG_RECT_TAG_ATTRIBUTES)
def make_animated_group(records, time, duration, cell_height, cell_width, defs):
# type: (Iterable[CharacterCellLineEvent], int, int, int, int, Dict[str, etree.ElementBase]) -> Tuple[etree.ElementBase, Dict[str, etree.ElementBase]]
"""Return a group element containing an SVG version of the provided records. This group is
animated, that is to say displayed then removed according to the timing arguments.
:param records: List of lines that should be included in the group
:param time: Time the group should appear on the screen (milliseconds)
:param duration: Duration of the appearance on the screen (milliseconds)
:param cell_height: Height of a character cell in pixels
:param cell_width: Width of a character cell in pixels
:param defs: Existing definitions
:return: A tuple consisting of the animated group and the new definitions
"""
animation_group_tag = etree.Element('g', attrib={'display': 'none'})
new_definitions = {}
for event_record in records:
# Background elements
rect_tags = _render_line_bg_colors(screen_line=event_record.line,
height=event_record.row * cell_height,
cell_height=cell_height,
cell_width=cell_width)
for tag in rect_tags:
animation_group_tag.append(tag)
# Group text elements for the current line into text_group_tag
text_group_tag = etree.Element('g')
text_tags = _render_characters(event_record.line, cell_width)
for tag in text_tags:
text_group_tag.append(tag)
# Find or create a definition for text_group_tag
text_group_tag_str = etree.tostring(text_group_tag)
if text_group_tag_str in defs:
group_id = defs[text_group_tag_str].attrib['id']
elif text_group_tag_str in new_definitions:
group_id = new_definitions[text_group_tag_str].attrib['id']
else:
group_id = 'g{}'.format(len(defs) + len(new_definitions) + 1)
assert group_id not in defs.values() and group_id not in new_definitions.values()
text_group_tag.attrib['id'] = group_id
new_definitions[text_group_tag_str] = text_group_tag
# Add a reference to the definition of text_group_tag with a 'use' tag
use_attributes = {
'{{{namespace}}}href'.format(namespace=XLINK_NS): '#{_id}'.format(_id=group_id),
'y': str(event_record.row * cell_height),
}
use_tag = etree.Element('use', use_attributes)
animation_group_tag.append(use_tag)
# Finally, add an animation tag so that the whole group goes from 'display: none' to
# 'display: inline' at the time the line should appear on the screen
if time == 0:
# Animations starting at 0ms should also start when the last animation ends (looping)
begin_time = '0ms; {id}.end'.format(id=LAST_ANIMATION_ID)
else:
begin_time = '{time}ms; {id}.end+{time}ms'.format(time=time, id=LAST_ANIMATION_ID)
attributes = {
'attributeName': 'display',
'from': 'inline',
'to': 'inline',
'begin': begin_time,
'dur': '{}ms'.format(duration)
}
animation = etree.Element('animate', attributes)
animation_group_tag.append(animation)
return animation_group_tag, new_definitions
"""
This fucntion is simar to the make_animated_group function, however, it ommits
time and rectags as they are not needed for a still frame
"""
def make_group(records, cell_height, cell_width, defs):
animation_group_tag = etree.Element('g', attrib={'display': 'none'})
new_definitions = {}
for event_record in records:
# Group text elements for the current line into text_group_tag
text_group_tag = etree.Element('g')
text_tags = _render_characters(event_record.line, cell_width)
for tag in text_tags:
text_group_tag.append(tag)
# Find or create a definition for text_group_tag
text_group_tag_str = etree.tostring(text_group_tag)
if text_group_tag_str in defs:
group_id = defs[text_group_tag_str].attrib['id']
elif text_group_tag_str in new_definitions:
group_id = new_definitions[text_group_tag_str].attrib['id']
else:
group_id = 'g{}'.format(len(defs) + len(new_definitions) + 1)
assert group_id not in defs.values() and group_id not in new_definitions.values()
text_group_tag.attrib['id'] = group_id
new_definitions[text_group_tag_str] = text_group_tag
# Add a reference to the definition of text_group_tag with a 'use' tag
use_attributes = {
'{{{namespace}}}href'.format(namespace=XLINK_NS): '#{_id}'.format(_id=group_id),
'y': str(event_record.row * cell_height),
}
use_tag = etree.Element('use', use_attributes)
animation_group_tag.append(use_tag)
attributes = {
'attributeName': 'display',
'from': 'inline',
'to': 'inline',
}
animation = etree.Element('animate', attributes)
animation_group_tag.append(animation)
return animation_group_tag, new_definitions
def render_animation(records, filename, template, cell_width=8, cell_height=17):
| |
returned errors
:type continue_on_error: bool
:param context: A dictionary of extra values to include in the context header
:type context: dict
:param control_extra: A dictionary of extra values to include in the control header
:type control_extra: dict
:return: The job response
:rtype: list[union(JobResponse, Exception)]
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge,
MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
"""
return self.call_jobs_parallel_future(
jobs,
expansions=expansions,
raise_job_errors=raise_job_errors,
raise_action_errors=raise_action_errors,
catch_transport_errors=catch_transport_errors,
timeout=timeout,
**kwargs
).result()
# Non-blocking methods that send a request and then return a future from which the response can later be obtained.
def call_action_future(
self,
service_name,
action,
body=None,
**kwargs
):
"""
This method is identical in signature and behavior to `call_action`, except that it sends the request and
then immediately returns a `FutureResponse` instead of blocking waiting on a response and returning
an `ActionResponse`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the action response can later be retrieved
:rtype: Client.FutureResponse
"""
action_request = ActionRequest(
action=action,
body=body or {},
)
future = self.call_actions_future(
service_name,
[action_request],
**kwargs
)
def get_result(_timeout):
result = future.result(_timeout)
if result.errors:
# This can only happen if raise_job_errors is set to False, so return the list of errors, just like
# other methods do below.
return result.errors
return result.actions[0]
return self.FutureResponse(get_result)
def call_actions_future(
self,
service_name,
actions,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
timeout=None,
**kwargs
):
"""
This method is identical in signature and behavior to `call_actions`, except that it sends the request and
then immediately returns a `FutureResponse` instead of blocking waiting on a response and returning a
`JobResponse`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the job response can later be retrieved
:rtype: Client.FutureResponse
"""
kwargs.pop('suppress_response', None) # If this kwarg is used, this method would always result in a timeout
if timeout:
kwargs['message_expiry_in_seconds'] = timeout
expected_request_id = self.send_request(service_name, actions, **kwargs)
def get_response(_timeout=None):
# Get all responses
responses = list(self.get_all_responses(service_name, receive_timeout_in_seconds=_timeout or timeout))
# Try to find the expected response
found = False
response = None
for request_id, response in responses:
if request_id == expected_request_id:
found = True
break
if not found:
# This error should be impossible if `get_all_responses` is behaving correctly, but let's raise a
# meaningful error just in case.
raise Exception(
'Got unexpected response(s) with ID(s) {} for request with ID {}'.format(
[r[0] for r in responses],
expected_request_id,
)
)
# Process errors at the Job and Action level
if response.errors and raise_job_errors:
raise self.JobError(response.errors)
if raise_action_errors:
error_actions = [action for action in response.actions if action.errors]
if error_actions:
raise self.CallActionError(error_actions)
if expansions:
kwargs.pop('continue_on_error', None)
self._perform_expansion(response.actions, expansions, **kwargs)
return response
return self.FutureResponse(get_response)
def call_actions_parallel_future(self, service_name, actions, **kwargs):
"""
This method is identical in signature and behavior to `call_actions_parallel`, except that it sends the requests
and then immediately returns a `FutureResponse` instead of blocking waiting on responses and returning a
generator. Just call `result(timeout=None)` on the future response to block for an available response (which
will be a generator). Some of the possible exceptions may be raised when this method is called; others may be
raised when the future is used.
If argument `raise_job_errors` is supplied and is `False`, some items in the result list might be lists of job
errors instead of individual `ActionResponse`s. Be sure to check for that if used in this manner.
If argument `catch_transport_errors` is supplied and is `True`, some items in the result list might be instances
of `Exception` instead of individual `ActionResponse`s. Be sure to check for that if used in this manner.
:return: A generator of action responses that blocks waiting on responses once you begin iteration
:rtype: Client.FutureResponse
"""
job_responses = self.call_jobs_parallel_future(
jobs=({'service_name': service_name, 'actions': [action]} for action in actions),
**kwargs
)
def parse_results(results):
for job in results:
if isinstance(job, Exception):
yield job
elif job.errors:
yield job.errors
else:
yield job.actions[0]
return self.FutureResponse(lambda _timeout: (x for x in parse_results(job_responses.result(_timeout))))
def call_jobs_parallel_future(
self,
jobs,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
catch_transport_errors=False,
timeout=None,
**kwargs
):
"""
This method is identical in signature and behavior to `call_jobs_parallel`, except that it sends the requests
and then immediately returns a `FutureResponse` instead of blocking waiting on all responses and returning
a `list` of `JobResponses`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the list of job responses can later be retrieved
:rtype: Client.FutureResponse
"""
kwargs.pop('suppress_response', None) # If this kwarg is used, this method would always result in a timeout
if timeout:
kwargs['message_expiry_in_seconds'] = timeout
error_key = 0
transport_errors = {}
response_reassembly_keys = []
service_request_ids = {}
for job in jobs:
try:
sent_request_id = self.send_request(job['service_name'], job['actions'], **kwargs)
service_request_ids.setdefault(job['service_name'], set()).add(sent_request_id)
except (ConnectionError, InvalidMessageError, MessageSendError, MessageSendTimeout, MessageTooLarge) as e:
if not catch_transport_errors:
raise
sent_request_id = error_key = error_key - 1
transport_errors[(job['service_name'], sent_request_id)] = e
response_reassembly_keys.append((job['service_name'], sent_request_id))
def get_response(_timeout):
service_responses = {}
for service_name, request_ids in six.iteritems(service_request_ids):
try:
for request_id, response in self.get_all_responses(
service_name,
receive_timeout_in_seconds=_timeout or timeout,
):
if request_id not in request_ids:
raise Exception(
'Got response ID {}, not in set of expected IDs {}'.format(request_id, request_ids)
)
service_responses[(service_name, request_id)] = response
if catch_transport_errors:
# We don't need the set to be reduced unless we're catching errors
request_ids.remove(request_id)
except (ConnectionError, InvalidMessageError, MessageReceiveError, MessageReceiveTimeout) as e:
if not catch_transport_errors:
raise
for request_id in request_ids:
transport_errors[(service_name, request_id)] = e
responses = []
actions_to_expand = []
for service_name, request_id in response_reassembly_keys:
if request_id < 0:
# A transport error occurred during send, and we are catching errors, so add it to the list
responses.append(transport_errors[(service_name, request_id)])
continue
if (service_name, request_id) not in service_responses:
if (service_name, request_id) in transport_errors:
# A transport error occurred during receive, and we are catching errors, so add it to the list
responses.append(transport_errors[(service_name, request_id)])
continue
# It shouldn't be possible for this to happen unless the code has a bug, but let's raise a
# meaningful exception just in case a bug exists, because KeyError will not be helpful.
raise Exception('There was no response for service {}, request {}'.format(service_name, request_id))
response = service_responses[(service_name, request_id)]
if raise_job_errors and response.errors:
raise self.JobError(response.errors)
if raise_action_errors:
error_actions = [action for action in response.actions if action.errors]
if error_actions:
raise self.CallActionError(error_actions)
if expansions:
actions_to_expand.extend(response.actions)
responses.append(response)
if expansions:
kwargs.pop('continue_on_error', None)
self._perform_expansion(actions_to_expand, expansions, **kwargs)
return responses
return self.FutureResponse(get_response)
# Methods used to send a request in a non-blocking manner and then later block for a response as a separate step
def send_request(
self,
service_name,
actions,
switches=None,
correlation_id=None,
continue_on_error=False,
context=None,
control_extra=None,
message_expiry_in_seconds=None,
suppress_response=False,
):
"""
Build and send a JobRequest, and return a request ID.
The context and control_extra arguments may be used to include extra values in the
context and control headers, respectively.
:param service_name: The name of the service from which to receive responses
:type service_name: union[str, unicode]
:param actions: A list of `ActionRequest` objects
:type actions: list
:param switches: A list of switch value integers
:type switches: union[list, set]
:param correlation_id: The request correlation ID
:type correlation_id: union[str, unicode]
:param continue_on_error: Whether to continue executing further actions once one action has returned errors
:type continue_on_error: bool
:param context: A dictionary of extra values to include in the context header
:type context: dict
:param control_extra: A dictionary of extra values to include in the control header
:type control_extra: dict
:param message_expiry_in_seconds: How soon the message will expire if not received by a server (defaults to
sixty seconds unless the settings are otherwise)
:type message_expiry_in_seconds: int
:param suppress_response: If `True`, the service will process the request normally | |
<reponame>Atokulus/flora-tools
from copy import copy
from typing import List, Union, Tuple, Optional
import numpy as np
import flora_tools.lwb_round as lwb_round
import flora_tools.lwb_slot as lwb_slot
import flora_tools.sim.lwb_service as service
import flora_tools.sim.sim_node as sim_node
from flora_tools.sim.sim_message import SimMessage, SimMessageType
LWB_STREAM_MAX_TTL = 3
LWB_STREAM_MAX_REQUEST_TRIALS = 10
LWB_STREAM_DEACTIVATION_BACKOFF = 100
LWB_STREAM_MAX_BACKOFF_RANGE = 16
LWB_STREAM_INITIAL_BACKOFF_RANGE = 4
class DataStream:
def __init__(self, id: str, node: 'sim_node.SimNode', master: 'sim_node.SimNode', priority, subpriority, period,
service: 'service.Service' = None, destination: 'sim_node.SimNode' = None,
max_payload=None, needs_ack=True):
self.id = id
self.node = node
self.master = master
self.destination = destination
self.service = service
self.priority = priority
self.subpriority = subpriority
self.period = period
self.max_payload = max_payload
self.needs_ack = needs_ack
self.stream_manager: LWBStreamManager = None
self._last_consumption = self.node.local_timestamp - self.period
self.ttl = LWB_STREAM_MAX_TTL
self.is_ack = False
if max_payload is None:
max_payload = lwb_slot.LWB_MAX_DATA_PAYLOAD
self.slot_count = int(np.ceil(self.max_payload / lwb_slot.LWB_MAX_DATA_PAYLOAD))
self.last_slot_size = self.max_payload % max_payload
self.current_slot = 0
self.trial_modulation = None
self.trial_counter = 0
self.backoff = 0
self.backoff_range = LWB_STREAM_INITIAL_BACKOFF_RANGE
# TODO Contracts
self.advertised_ack_power_level = None # Modulation not needed, as it is implicated by the stream request handshake
@property
def last_consumption(self):
return self._last_consumption
@last_consumption.setter
def last_consumption(self, value):
self._last_consumption = value
def schedule_slot(self, timestamp):
if self.current_slot < self.slot_count - 1:
self.current_slot += 1
else:
self.current_slot = 0
self.last_consumption = timestamp
def check_request(self, round: 'lwb_round.LWBRound', modulation: int) -> bool:
if not self.backoff:
self.backoff = np.random.choice(range(self.backoff_range))
self.backoff_range *= 2
if self.backoff_range > LWB_STREAM_MAX_BACKOFF_RANGE:
self.backoff_range = LWB_STREAM_MAX_BACKOFF_RANGE
if self.trial_modulation is not None:
self.trial_counter += 1
if self.trial_counter >= LWB_STREAM_MAX_REQUEST_TRIALS:
if self.trial_modulation > 0:
self.trial_modulation -= 1
self.trial_counter = 0
else:
self.trial_modulation = None
self.trial_counter = 0
self.backoff = LWB_STREAM_DEACTIVATION_BACKOFF
if modulation == self.trial_modulation:
return True
else:
return False
else:
if self.node.lwb.link_manager.get_link(round.master)['modulation'] > modulation:
return False
else:
self.trial_counter += 1
self.trial_modulation = modulation
return True
else:
self.backoff -= 1
return False
def reset_request_check(self, round: 'lwb_round.LWBRound'):
if self.trial_modulation == round.modulation:
self.trial_counter = 0
self.backoff = 0
self.backoff_range = LWB_STREAM_INITIAL_BACKOFF_RANGE
@property
def next_period(self):
return self.last_consumption + self.period
def __copy__(self):
return DataStream(self.id, self.node, self.master, self.priority, self.subpriority, self.period, service=None,
destination=self.destination, max_payload=self.max_payload, needs_ack=self.needs_ack)
def get(self):
return self.service.get_data()
def available(self, timestamp=None):
if timestamp is None:
timestamp = self.service.node.local_timestamp
if self.current_slot < self.slot_count:
if self.service is not None:
return self.service.data_available()
elif timestamp - self.last_consumption > self.period:
return (self.slot_count - self.current_slot) * self.max_payload
else:
return 0
else:
return 0
def success(self):
self.ttl = LWB_STREAM_MAX_TTL
self.backoff = 0
self.backoff_range = LWB_STREAM_INITIAL_BACKOFF_RANGE
self.trial_counter = 0
if self.current_slot < self.slot_count - 1:
self.current_slot += 1
else:
self.current_slot = 0
if self.service is not None:
self.last_consumption = self.node.local_timestamp # Not required on BASE
self.service.ack_data_callback()
def fail(self):
self.ttl -= 1
if self.ttl is 0:
if self.stream_manager is not None:
self.stream_manager.remove_data(self)
if self.service is not None:
self.service.failed_datastream_callback(self)
def retry(self):
self.ttl = LWB_STREAM_MAX_TTL
self.is_ack = False
self.stream_manager.register_data(self)
class NotificationStream:
def __init__(self, id: str, node: 'sim_node.SimNode', master: 'sim_node.SimNode', priority: int, subpriority: int,
period, service: 'service.Service' = None, destination: 'sim_node.SimNode' = None, low_power=False,
needs_ack=True):
self.id = id
self.node = node
self.master = master
self.destination = destination
self.service = service
self.priority = priority
self.subpriority = subpriority
self.period = period
self.low_power = low_power
self.needs_ack = needs_ack
self.stream_manager: LWBStreamManager = None
self.last_consumption = self.node.local_timestamp - self.period
self.ttl = LWB_STREAM_MAX_TTL
self.is_ack = True
self.trial_modulation = None
self.trial_counter = 0
self.backoff = None
self.backoff_range = LWB_STREAM_INITIAL_BACKOFF_RANGE
self.advertised_ack_power_level = None # Modulation not needed, as it is implicated by the stream request handshake
def check_request(self, round: 'lwb_round.LWBRound', modulation: int) -> bool:
if not self.backoff:
self.backoff = np.random.choice(range(self.backoff_range))
self.backoff_range *= 2
if self.backoff_range > LWB_STREAM_MAX_BACKOFF_RANGE:
self.backoff_range = LWB_STREAM_MAX_BACKOFF_RANGE
if self.trial_modulation is not None:
self.trial_counter += 1
if self.trial_counter >= LWB_STREAM_MAX_REQUEST_TRIALS:
if self.trial_modulation > 0:
self.trial_modulation -= 1
self.trial_counter = 0
else:
self.trial_modulation = None
self.trial_counter = 0
self.backoff = LWB_STREAM_DEACTIVATION_BACKOFF
if modulation == self.trial_modulation:
return True
else:
return False
else:
if self.node.lwb.link_manager.get_link(round.master)['modulation'] > modulation:
return False
else:
self.trial_counter += 1
self.trial_modulation = modulation
return True
else:
self.backoff -= 1
return False
def reset_request_check(self, round: 'lwb_round.LWBRound'):
if self.trial_modulation == round.modulation:
self.trial_counter = 0
self.backoff = 0
self.backoff_range = LWB_STREAM_INITIAL_BACKOFF_RANGE
def __copy__(self):
return NotificationStream(self.id, self.node, self.master, self.priority, self.subpriority, self.period,
service=None, destination=self.destination, low_power=self.low_power,
needs_ack=self.needs_ack)
@property
def next_period(self):
return self.last_consumption + self.period
def schedule_slot(self, timestamp):
self.last_consumption = timestamp
def get(self):
return self.service.get_notification()
def available(self, timestamp=None):
if timestamp is None:
timestamp = self.service.node.local_timestamp
if timestamp - self.last_consumption > self.period:
if self.service is not None:
return self.service.notification_available()
else:
return True
else:
return None
def success(self):
self.ttl = LWB_STREAM_MAX_TTL
self.last_consumption = self.stream_manager.node.local_timestamp
if self.service is not None:
self.service.ack_notification_callback()
def fail(self):
self.ttl -= 1
if self.ttl is 0:
self.stream_manager.remove_notification(self)
if self.service is not None:
self.service.failed_notification_callback(self)
def retry(self):
self.ttl = LWB_STREAM_MAX_TTL
self.is_ack = False
self.stream_manager.register_notification(self)
class LWBStreamManager:
def __init__(self, node: 'sim_node.SimNode'):
self.node = node
self.datastreams: List[DataStream] = []
self.notification_streams: List[NotificationStream] = []
def register(self, stream: Union[DataStream, NotificationStream]):
if stream.service is None:
stream.is_ack = True
if type(stream) is DataStream:
self.register_data(stream)
else:
self.register_notification(stream)
def register_data(self, datastream: DataStream):
datastream.node = self.node
datastream.stream_manager = self
for item in self.datastreams:
if item.id == datastream.id:
self.datastreams.remove(item)
self.datastreams.append(datastream)
def remove_data(self, datastream: DataStream):
self.datastreams.remove(datastream)
def register_notification(self, notification_stream: NotificationStream):
notification_stream.node = self.node
notification_stream.stream_manager = self
for item in self.notification_streams:
if item.id == notification_stream.id:
self.datastreams.remove(item)
self.datastreams.append(notification_stream)
def remove_notification(self, notification_stream: NotificationStream):
self.notification_streams.remove(notification_stream)
def select_data(self, slot_size: int = None, selection: List[DataStream] = None, timestamp=None):
if selection is None:
selection = self.datastreams.copy()
best_match: DataStream = None
if slot_size:
stream: DataStream
for stream in selection:
if (stream.is_ack and stream.available(timestamp=timestamp)
and stream.max_payload <= slot_size):
if best_match is None:
best_match = stream
else:
if (best_match.priority >= stream.priority
and best_match.subpriority >= stream.subpriority
and best_match.available(timestamp) < stream.available(timestamp)
and best_match.last_consumption > stream.last_consumption):
best_match = stream
else:
'stream: DataStream'
for stream in selection:
if stream.is_ack and stream.available(timestamp):
if best_match is None:
best_match = stream
else:
if (best_match.priority >= stream.priority
and best_match.subpriority >= stream.subpriority
and best_match.available(timestamp) < stream.available(timestamp)
and best_match.last_consumption > stream.last_consumption):
best_match = stream
return best_match
def select_notification(self, selection: List[NotificationStream] = None, low_power=False, timestamp=None):
if selection is None:
selection = self.notification_streams
best_match: NotificationStream = None
stream: NotificationStream
for stream in selection:
if stream.is_ack and stream.available(timestamp=timestamp) and (stream.low_power if low_power else True):
if best_match is None:
best_match = stream
else:
if best_match.priority >= stream.priority \
and best_match.subpriority > stream.subpriority:
best_match = stream
return best_match
def schedule_data(self, timestamp, slot_count: int, modulation: int):
selection = self.datastreams.copy()
streams: List[DataStream] = []
count = 0
while count < slot_count:
stream = self.select_data(timestamp=timestamp, selection=selection)
if (stream is not None
and self.node.lwb.link_manager.get_link(stream.master)['modulation'] == modulation
and stream.priority <= modulation):
for i in range(stream.slot_count):
streams.append(stream)
count += 1
if count >= slot_count:
break
selection.remove(stream)
else:
break
return streams
def schedule_notification(self, timestamp, slot_count: int, modulation: int):
selection = self.notification_streams.copy()
streams: List[NotificationStream] = []
count = 0
while count < slot_count:
stream = self.select_notification(selection=selection)
if (stream is not None
and self.node.lwb.link_manager.get_link(stream.master)['modulation'] == modulation
and stream.priority <= modulation):
if stream.priority <= modulation:
streams.append(stream)
count += 1
selection.remove(stream)
else:
break
return streams
def get_round_request(self):
if (not all([stream.is_ack for stream in self.notification_streams])
or not all([stream.is_ack for stream in self.datastreams])):
return SimMessage(self.node.local_timestamp, self.node, lwb_slot.LWB_CONTENTION_HEADER_LENGTH, 0,
self.node.lwb.base,
SimMessageType.ROUND_REQUEST)
else:
return None
def get_data(self, slot_size: int) -> Tuple[DataStream, SimMessage]:
stream = self.select_data(slot_size=slot_size - lwb_slot.LWB_DATA_HEADER_LENGTH)
if stream is not None:
content = {'data': stream.get(), 'stream': copy(stream)}
message = SimMessage(self.node.local_timestamp, self.node, stream.max_payload + lwb_slot.LWB_DATA_HEADER_LENGTH,
0,
self.node.lwb.base,
SimMessageType.DATA, content=content)
return stream, message
else:
return None, None
def get_notification(self, low_power=False) -> Tuple[Optional[NotificationStream], Optional[SimMessage]]:
stream = self.select_notification(low_power=low_power)
content = {'notification': stream.get(), 'stream': stream}
message = SimMessage(self.node.local_timestamp, self.node, lwb_slot.LWB_CONTENTION_HEADER_LENGTH, 0,
self.node.lwb.base,
SimMessageType.NOTIFICATION, content=content)
return stream, message
def get_stream_request(self, round: 'lwb_round.LWBRound', modulation: int, power_level: int) -> Tuple[
Optional[Union[DataStream, NotificationStream]], Optional[SimMessage]]:
for stream in self.notification_streams:
if not stream.is_ack and stream.check_request(round, power_level):
stream = copy(stream)
stream.advertised_ack_power_level = self.node.lwb.link_manager.get_link(round.master)['power_level']
message = SimMessage(self.node.local_timestamp, self.node, lwb_slot.LWB_CONTENTION_HEADER_LENGTH, 0,
self.node.lwb.base,
SimMessageType.STREAM_REQUEST,
content={'type': 'notification', 'stream': stream})
return stream, message
for stream in self.datastreams:
if not stream.is_ack and stream.check_request(round, modulation):
stream = copy(stream)
stream.advertised_ack_power_level = self.node.lwb.link_manager.get_link(round.master)['power_level']
message = SimMessage(self.node.local_timestamp, self.node, lwb_slot.LWB_CONTENTION_HEADER_LENGTH, 0,
self.node.lwb.base,
SimMessageType.STREAM_REQUEST,
content={'type': 'data', 'stream': stream})
return stream, message
return None, None
def rx_ack_stream_request(self, message: SimMessage):
if message.content['type'] is 'data':
for stream in self.datastreams:
if stream.id is message.content['stream'].id:
stream.is_ack = True
break
elif message.content['type'] is 'notification':
for stream in self.notification_streams:
if stream.id | |
= True
if (epoch + 1) % self.VALIDATION_EPOCH_NUM == 0:
is_validation_step = True
return is_checkpoint_step, is_validation_step
def run_batches(self,
dataset,
total_samples,
epoch,
train_dev_test = 'train'
):
n_batches_per_epoch = total_samples//self.batch_size + 1
total_training_loss = 0
for _ in range(n_batches_per_epoch):
batch_inputs, batch_targets, _, _, batch_seq_lens, batch_inputs_FP, batch_seq_lens_FP, batch_inputs_feature_cubes = dataset.next_batch()
feeds = {self.input_tensor: batch_inputs_feature_cubes,
self.input_decode_coords_tensor: batch_inputs,
self.target: batch_targets,
self.seq_length: batch_seq_lens,
self.input_encode_tensor: batch_inputs_FP,
self.seq_len_encode: batch_seq_lens_FP,
}
if train_dev_test == 'train':
# total_batch_loss, _, summary_line = self.sess.run([self.total_loss, self.optimizer, self.summary_op])
# total_batch_loss, _, summary_line = self.sess.run([self.MODEL.total_loss, self.MODEL.optimizer, self.summary_op], feed_dict = feeds)
# MNV_loss, p_end_loss, p_end, total_batch_loss, _ = self.sess.run([self.MODEL.MNV_loss, self.MODEL.p_end_loss, self.MODEL.p_end, self.MODEL.total_loss, self.MODEL.optimizer], feed_dict = feeds)
# np.savez('debug_file/loss_arr.npz', p_end = p_end, MNV_loss = MNV_loss, p_end_loss = p_end_loss)
total_batch_loss, _ = self.sess.run([self.MODEL.total_loss, self.MODEL.optimizer], feed_dict = feeds)
# encoder_final_state, _initial_state = self.sess.run([self.MODEL.encoder_final_state, self.MODEL._initial_state], feed_dict = feeds)
# np.savez('debug_file/encoder_decoder_state.npz', encoder_state = encoder_final_state, decoder_state = _initial_state)
total_training_loss += total_batch_loss
logger.debug('Total batch loss: %2.f |Total train cost so far: %.2f', total_batch_loss, total_training_loss)
# self.writer.add_summary(summary_line, epoch)
return total_training_loss
def sample_seq_mu_cov(self,
start_tracks, # normalized
start_tracks_feature_cubes, # normalized
normalized_flight_plan,
flight_plan_length,
max_length = 100,
search_power = 2,
weights = 0.1,
end_thres = 0.9,
debug = False):
# start_tracks should have the shape of [n_seq, n_time, n_input]
# normalized_flight_plan should have the shape of [n_seq, n_time, n_input] (also flipped)
# normalized_flight_plan should be (flight_plan - [dep_lat, dep_lon] - fp_mu)/fp_std; and then pad_and_flip
# for each sample in the start_tracks, it should have the same length
# flight_plan_length should have the shape of [n_seq]
#################################################
############# data preprocessing #############
#################################################
n_seq, n_time, _ = start_tracks.shape
coords_logprob_tensor = self.MODEL.MVN_pdf.log_prob(self.MODEL.mu_layer)
coords_cov_tensor = tf.matmul(self.MODEL.L_layer, self.MODEL.L_layer, transpose_b = True)
#########################################################
############# initialize neural network #############
#########################################################
encoder_state = self.sess.run([self.MODEL.encoder_final_state],
feed_dict = {self.input_encode_tensor: normalized_flight_plan,
self.seq_len_encode: flight_plan_length})
if debug:
with open('debug_file/Endlayer_encoder_state.pkl', 'wb') as f:
pickle.dump((encoder_state), f)
###################################################
############# start the main loop #############
###################################################
# dynamic shapes
# cur_time_len = n_time
prob_end = []
for i in range(search_power):
print('current search power: %d'%i)
if i == 0:
feeds_update = {self.input_tensor: start_tracks_feature_cubes,
self.seq_length: [n_time]*n_seq,
self.input_decode_coords_tensor: start_tracks,
# self.input_encode_tensor: normalized_flight_plan,
# self.seq_len_encode: flight_plan_length,
self.MODEL._initial_state: encoder_state}
else:
feeds_update = {self.input_tensor: pred_feature_cubes,
self.seq_length: [1]*last_input_track_point.shape[0],
self.input_decode_coords_tensor: last_input_track_point,
# self.input_encode_tensor: normalized_flight_plan,
# self.seq_len_encode: flight_plan_length,
self.MODEL._initial_state: state}
state, pi_logprob, coords_logprob, coords_mu, coords_cov = self.sess.run([self.MODEL.decode_final_state,
tf.log(self.MODEL.pi_layer),
coords_logprob_tensor,
self.MODEL.mu_layer,
coords_cov_tensor],
feed_dict = feeds_update)
if i == 0:
# only select the last element (last time stamp)
pi_logprob = pi_logprob[range(n_time - 1, n_time*n_seq, n_time), :]
coords_logprob = coords_logprob[range(n_time - 1, n_time*n_seq, n_time), :]
coords_mu = coords_mu[range(n_time - 1, n_time*n_seq, n_time), :, :]
coords_cov = coords_cov[range(n_time - 1, n_time*n_seq, n_time), :, :, :]
# p_end = p_end[range(n_time - 1, n_time*n_seq, n_time), :]
"""
state: tuple with size n_layers, each is a LSTMtuple object;
state[i].c.shape = (n_seq*n_mixture^i, 256);
state[i].h.shape = (n_seq*n_mixture^i, 256);
pi_logprob: np array with size (n_seq*n_mixture^i, n_mixture)
coords_mu: np array with size (n_seq*n_mixture^i, n_mixture, n_input)
coords_cov: np array with size (n_seq*n_mixture^i, n_mixture, n_input, n_input)
coords_logprob: np array with size (n_seq*n_mixture^i, n_mixture)
"""
last_input_track_point = coords_mu.reshape(coords_mu.shape[0]*coords_mu.shape[1], 1, -1) # shape of [n_seq*n_mixture^(i+1), 1, n_controled_var]
last_input_track_point_cov = coords_cov.reshape(-1, 1, coords_cov.shape[2], coords_cov.shape[3]) # shape of [n_seq*n_mixture^(i+1), n_input, n_controled_var]
state = tuple([tf.nn.rnn_cell.LSTMStateTuple(c = np.repeat(tmp_state.c, self.n_mixture, axis = 0),
h = np.repeat(tmp_state.h, self.n_mixture, axis = 0)) for tmp_state in state])
##########################################################################################
########################## Controlled Prediction ############################
##########################################################################################
# unnormalize predicted flight tracks to limit the size of next predicted point
# with shape of [n_seq*n_mixture^(i+1), 1, n_controled_var]: [lat, lon, alt, time, spd, theta]
unnormalized_last_track_point = self.dataset_sample.unnormalize_flight_tracks(last_input_track_point.reshape(-1, self.n_controled_var))
if i == 0:
prev_track_point = np.repeat(self.dataset_sample.unnormalize_flight_tracks(start_tracks[:, -1, :]), self.n_mixture, axis = 0)
else:
prev_track_point = np.repeat(self.dataset_sample.unnormalize_flight_tracks(final_tracks[:, -1, :]), self.n_mixture, axis = 0)
controlled_next_point = self.calculate_next_pnt(current_lons = prev_track_point[:, 1],
current_lats = prev_track_point[:, 0],
controlled_azi = prev_track_point[:, 5] * 180 / np.pi,
controlled_dist = unnormalized_last_track_point[:, 4]*1852*(120))
# controlled_dist = unnormalized_last_track_point[:, 4]*1852*(unnormalized_last_track_point[:, 3] - prev_track_point[:, 3]))
unnormalized_last_track_point[:, 0] = controlled_next_point[1]
unnormalized_last_track_point[:, 1] = controlled_next_point[0]
unnormalized_last_track_point[:, 3] = prev_track_point[:, 3] + 120
normalized_last_track_point = self.dataset_sample.normalize_flight_tracks(unnormalized_last_track_point)
last_input_track_point[:, :, 0] = (normalized_last_track_point[:, 0, None]+last_input_track_point[:, :, 0])/2
last_input_track_point[:, :, 1] = (normalized_last_track_point[:, 1, None]+last_input_track_point[:, :, 1])/2
last_input_track_point[:, :, 3] = normalized_last_track_point[:, 3, None]
##########################################################################################
######################### End of Controlled Prediction #######################
##########################################################################################
if i == 0:
buffer_total_logprob = (pi_logprob*weights + coords_logprob*(1-weights)) # has the shape of [n_seq, n_mixture]
buffer_pi_prob = pi_logprob.copy()
final_tracks = np.concatenate((np.repeat(start_tracks, self.n_mixture, axis = 0), last_input_track_point), axis = 1)
# has the shape of [n_seq*n_mixture, n_time+1, 4]
final_tracks_cov = last_input_track_point_cov.copy()
pred_feature_cubes, \
pred_feature_grid, \
predicted_matched_info = self.dataset_sample.generate_predicted_pnt_feature_cube(predicted_final_track = final_tracks,
known_flight_deptime = self.known_flight_deptime,
shift_xleft = 0,
shift_xright = 2,
shift_yup = 1,
shift_ydown = 1,
nx = 20,
ny = 20)
else:
buffer_total_logprob = buffer_total_logprob.reshape(-1, 1)
buffer_pi_prob = buffer_pi_prob.reshape(-1, 1)
# prob_end = np.concatenate((np.repeat(prob_end, self.n_mixture, axis = 0), p_end), axis = 1)
buffer_total_logprob = buffer_total_logprob + (pi_logprob*weights + coords_logprob*(1-weights)) # has shape of [n_seq*n_mixture^i, n_mixture]
buffer_pi_prob = buffer_pi_prob + pi_logprob + i * np.log(0.95) # has shape of [n_seq*n_mixture^i, n_mixture]
final_tracks = np.concatenate((np.repeat(final_tracks, self.n_mixture, axis = 0), last_input_track_point), axis = 1)
# has the shape of [n_seq*n_mixture^(i+1), ?, 4]
final_tracks_cov = np.concatenate((np.repeat(final_tracks_cov, self.n_mixture, axis = 0), last_input_track_point_cov), axis = 1)
pred_feature_cubes, \
pred_feature_grid, \
predicted_matched_info = self.dataset_sample.generate_predicted_pnt_feature_cube(predicted_final_track = final_tracks,
known_flight_deptime = self.known_flight_deptime,
shift_xleft = 0,
shift_xright = 2,
shift_yup = 1,
shift_ydown = 1,
nx = 20,
ny = 20)
if (i == 0) and (debug is True):
with open('debug_file/samp_mu_cov_inner_loop0_debug.pkl', 'wb') as f:
pickle.dump((state, pi_logprob, coords_logprob, coords_mu, coords_cov, pred_feature_cubes, pred_feature_grid, predicted_matched_info), f)
# From here, feeds_update will have fixed shapes
buffer_pi_prob_all_mix = []
for j in range(max_length - search_power):
print('===current predicting time stamps: %d==='%j)
feeds_update = {self.input_tensor: pred_feature_cubes,
self.seq_length: [1]*last_input_track_point.shape[0],
self.input_decode_coords_tensor: last_input_track_point,
# self.input_encode_tensor: normalized_flight_plan,
# self.seq_len_encode: flight_plan_length,
self.MODEL._initial_state: state}
state, pi_logprob, coords_logprob, coords_mu, coords_cov = self.sess.run([self.MODEL.decode_final_state,
tf.log(self.MODEL.pi_layer),
coords_logprob_tensor,
self.MODEL.mu_layer,
coords_cov_tensor], feed_dict = feeds_update)
"""
state: tuple with size n_layers, each is a LSTMtuple object; state[0].c.shape = (n_seq*n_mixture^(i+1), 256); state[0].h.shape = (n_seq*n_mixture^(i+1), 256);
pi_logprob: np array with size (n_seq*n_mixture^(i+1), n_mixture)
coords_mu: np array with size (n_seq*n_mixture^(i+1), n_mixture, 4)
coords_cov: np array with size (n_seq*n_mixture^(i+1), n_mixture, 4, 4)
coords_logprob: np array with size (n_seq*n_mixture^(i+1), n_mixture)
"""
if (j == 0) and (debug is True):
with open('debug_file/samp_mu_cov_inner_loop1_debug.pkl', 'wb') as f:
pickle.dump((state, pi_logprob, coords_logprob, coords_mu, coords_cov, pred_feature_cubes, pred_feature_grid, predicted_matched_info), f)
if j == 0:
buffer_total_logprob = buffer_total_logprob.reshape(-1, 1)
buffer_pi_prob = buffer_pi_prob.reshape(-1, 1)
# prob_end = np.concatenate((np.repeat(prob_end, self.n_mixture, axis = 0), p_end), axis = 1)
# else:
# prob_end = np.concatenate((prob_end, p_end), axis = 1)
tmp_buffer_total_logprob = (pi_logprob*weights + coords_logprob*(1-weights))
buffer_total_logprob = buffer_total_logprob + tmp_buffer_total_logprob # has shape of [n_seq*n_mixture^(i+1), n_mixture]
# buffer_pi_prob += np.exp(pi_logprob)*(0.5**(j+search_power))
tmp_pi_prob = pi_logprob + (j + search_power) * np.log(0.95)
buffer_pi_prob_all_mix.append(tmp_pi_prob)
# buffer_pi_prob = buffer_pi_prob + pi_logprob + (j + search_power) * np.log(0.95)
# e.g., n_mixture = 10, search_power == 4, total 10000 trajs, then has the prob of buffer_total_logprob
# 000| 0,1,2,...,9
# 000| 0,1,2,...,9
# 000| 0,1,2,...,9
# ...
# 099| 0,1,2,...,9
# 100| 0,1,2,...,9
# ...
# 999| 0,1,2,...,9
top_k_idx = np.argsort(buffer_total_logprob, axis = -1)[:, -1] # shape of (n_seq*n_mixture^(i+1), )
# top_k_idx = np.argsort(tmp_buffer_total_logprob, axis = -1)[:, -1] # shape of (n_seq*n_mixture^(i+1), )
# buffer_total_logprob = buffer_total_logprob + tmp_buffer_total_logprob[range(coords_mu.shape[0]), top_k_idx, None]
buffer_total_logprob = buffer_total_logprob[range(coords_mu.shape[0]), top_k_idx, None]
buffer_pi_prob = np.concatenate((buffer_pi_prob, tmp_pi_prob[range(coords_mu.shape[0]), top_k_idx, None]), axis = 1)
# buffer_pi_prob = buffer_pi_prob[range(coords_mu.shape[0]), top_k_idx, None]
last_input_track_point = coords_mu[range(coords_mu.shape[0]), top_k_idx, None] # shape of [n_seq*n_mixture^(i+1), 1, n_controled_var]
last_input_track_point_cov = coords_cov[range(coords_cov.shape[0]), top_k_idx, :, :].reshape(-1, 1, coords_cov.shape[2], coords_cov.shape[3])
# shape of [n_seq*n_mixture^(i+1), 1, 4, 4]
##########################################################################################
########################## Controlled Prediction ############################
##########################################################################################
# unnormalize predicted flight tracks to limit the size of next predicted point
# with shape of [n_seq*n_mixture^(i+1), 1, n_controled_var]: [lat, lon, alt, time, spd, theta]
unnormalized_last_track_point = self.dataset_sample.unnormalize_flight_tracks(last_input_track_point.reshape(-1, self.n_controled_var))
prev_track_point = self.dataset_sample.unnormalize_flight_tracks(final_tracks[:, -1, :])
controlled_next_point = self.calculate_next_pnt(current_lons = prev_track_point[:, 1],
current_lats = prev_track_point[:, 0],
controlled_azi = prev_track_point[:, 5] * 180 / np.pi,
controlled_dist = unnormalized_last_track_point[:, 4]*1852*(120))
# controlled_dist = unnormalized_last_track_point[:, 4]*1852*(unnormalized_last_track_point[:, 3] - prev_track_point[:, 3]))
unnormalized_last_track_point[:, 0] | |
node unoccupied"""
if self.free is not None and self.free:
raise LauncherException("Attempting to release a free node")
self.free = True
self.taskid = -1
self.tasks_on_this_node += 1
def isfree(self):
"""Test whether a node is occupied"""
return self.free
def nodestring(self):
if self.free:
return "X"
else:
return str(self.taskid)
def __str__(self):
return "h:%s, c:%s, id:%s" % (self.hostname, str(self.core), str(self.nodeid))
class Completion:
"""Define a completion object for a task.
The base class doesn't do a lot: it immediately returns true on the
completion test_sweep.
"""
workdir = "."
def __init__(self, taskid=0):
self.taskid = taskid
self.stampdir = "."
def set_workdir(self, workdir):
self.workdir = workdir
if self.workdir[0] != "/":
self.workdir = os.getcwd() + "/" + self.workdir
# create stampdir. maybe this should be in the attach method?
if not os.path.isdir(self.workdir):
os.makedirs(self.workdir)
def attach(self, txt):
"""Attach a completion to a command, giving a new command"""
return txt
def test(self):
"""Test whether the task has completed"""
return True
class FileCompletion(Completion):
"""FileCompletion is the most common type of completion. It appends
to a command the creation of a zero size file with a unique name.
The completion test_sweep then tests for the existence of that file.
:param taskid: (keyword, required) this has to be unique. Unfortunately we can not test_sweep for that.
:param stampdir: (keyword, optional, default is self.stampdir, which is ".") directory where the stampfile is left
:param stamproot: (keyword, optional, default is "expire") root of the stampfile name
"""
stamproot = "expire"
stampdir = "."
def __init__(self, **kwargs):
taskid = kwargs.pop("taskid", -1)
if taskid == -1:
raise LauncherException("Need an explicit task ID")
Completion.__init__(self, taskid)
self.set_workdir(kwargs.pop("stampdir", self.stampdir))
self.stamproot = kwargs.pop("stamproot", self.stamproot)
if len(kwargs) > 0:
raise LauncherException("Unprocessed FileCompletion args: %s" % str(kwargs))
def stampname(self):
"""Internal function that gives the name of the stamp file,
including directory path"""
return "%s/%s%s" % (self.workdir, self.stamproot, str(self.taskid))
def attach(self, txt):
"""Append a 'touch' command to the txt argument"""
os.system("mkdir -p %s" % self.workdir)
if re.match("^[ \t]*$", txt):
return "touch %s" % self.stampname()
else:
return "%s ; touch %s" % (txt, self.stampname())
def test(self):
"""Test for the existence of the stamp file"""
return os.path.isfile(self.stampname())
def cleanup(self):
os.system("rm -f %s" % self.stampname())
class Task:
"""A Task is an abstract object associated with a commandline
:param command: (required) Commandline object; note that this contains the core count
:param completion: (keyword, optional) Completion object; if unspecified the trivial completion is used.
:param taskid: (keyword) identifying number of this task; has to be unique in a job, also has to be equal to the taskid of the completion
:param debug: (keyword, optional) string of debug keywords
"""
def __init__(self, command, **kwargs):
self.command = command["command"]
self.pre_process = command["pre_process"]
self.post_process = command["post_process"]
# make a default completion if needed
self.completion = kwargs.pop("completion", None)
self.taskid = kwargs.pop("taskid", 0)
if self.completion is None:
self.completion = Completion(taskid=self.taskid)
if self.taskid != self.completion.taskid:
raise LauncherException("Incompatible taskids")
self.size = command["cores"]
self.debugs = kwargs.pop("debug", "")
self.debug = re.search("task", self.debugs)
if len(kwargs) > 0:
raise LauncherException("Unprocessed args: %s" % str(kwargs))
self.has_started = False
DebugTraceMsg("created task <<%s>>" % str(self), self.debug, prefix="Task")
self.nodes = None
def start_on_nodes(self, **kwargs):
"""Start the task.
:param pool: HostLocator object (keyword, required) : this describes the nodes on which to start the task
:param commandexecutor: (keyword, optional) prefixer routine, by default the commandexecutor of the pool is used
This sets ``self.startime`` to right before the execution begins. We do not keep track
of the endtime, but instead set ``self.runningtime`` in the ``hasCompleted`` routine.
"""
self.pool = kwargs.pop("pool", None)
self.starttick = kwargs.pop("starttick", 0)
if self.pool is None:
self.pool = LocalHostPool(
nhosts=self.size, debug=self.debugs
).request_nodes(self.size)
elif isinstance(self.pool, (Node)):
if self.size > 1:
raise LauncherException(
"Can not start size=%d on sing Node" % self.size
)
self.pool = OneNodePool(self.pool, debug=self.debugs).request_nodes(
self.size
)
if not isinstance(self.pool, (HostLocator)):
raise LauncherException("Invalid locator object")
if len(kwargs) > 0:
raise LauncherException(
"Unprocessed Task.start_on_nodes args: %s" % str(kwargs)
)
# wrap with stamp detector
if self.post_process is None:
wrapped = self.line_with_completion()
else:
wrapped = self.get_fixed_command()
# I the post process is not None, then we should attach completion to the post process
# Because otherwise the job may terminate before the prost processing completes
# In all honesty though, post processing should be handled as a separate job with a dependency
self.post_process = self.completion.attach(self.post_process)
DebugTraceMsg(
"starting task %d of size %d on <<%s>>\nin cwd=<<%s>>\ncmd=<<%s>>"
% (self.taskid, self.size, str(self.pool), os.getcwd(), wrapped),
self.debug,
prefix="Task",
)
self.starttime = time.time()
commandexecutor = self.pool.pool.commandexecutor
commandexecutor.execute(
wrapped,
pool=self.pool,
pre_process=self.pre_process,
post_process=self.post_process,
)
self.has_started = True
DebugTraceMsg("started %d" % self.taskid, self.debug, prefix="Task")
def get_fixed_command(self):
return re.sub("PYL_ID", str(self.taskid), self.command)
def line_with_completion(self):
"""Return the task's commandline with completion attached"""
line = self.get_fixed_command()
return self.completion.attach(line)
def isRunning(self):
return self.has_started
def hasCompleted(self):
"""Execute the completion test_sweep of this Task"""
completed = self.has_started and self.completion.test()
if completed:
self.runningtime = time.time() - self.starttime
DebugTraceMsg(
"completed %d in %5.3f" % (self.taskid, self.runningtime),
self.debug,
prefix="Task",
)
return completed
def __repr__(self):
s = "Task %d, commandline: [%s], pool size %d" % (
self.taskid,
self.command,
self.size,
)
return s
class TaskQueue:
"""Object that does the maintains a list of Task objects.
This is internally created inside a ``LauncherJob`` object."""
def __init__(self, **kwargs):
self.queue = []
self.running = []
self.completed = []
self.aborted = []
self.maxsimul = 0
self.submitdelay = 0
self.debug = kwargs.pop("debug", False)
if len(kwargs) > 0:
raise LauncherException("Unprocessed TaskQueue args: %s" % str(kwargs))
def isEmpty(self):
"""Test whether the queue is empty and no tasks running"""
return self.queue == [] and self.running == []
def enqueue(self, task):
"""Add a task to the queue"""
DebugTraceMsg("enqueueing <%s>" % str(task), self.debug, prefix="Queue")
self.queue.append(task)
def startQueued(self, hostpool, **kwargs):
"""for all queued, try to find nodes to run it on;
the hostpool argument is a HostPool object"""
tqueue = copy.copy(self.queue)
tqueue.sort(key=lambda x: -x.size)
max_gap = len(hostpool)
starttick = kwargs.pop("starttick", 0)
for t in tqueue:
# go through tasks in descending size
# if one doesn't fit, skip all of same size
requested_gap = t.size
if requested_gap > max_gap:
continue
locator = hostpool.request_nodes(requested_gap)
if locator is None:
DebugTraceMsg(
"could not find nodes for <%s>" % str(t), self.debug, prefix="Queue"
)
max_gap = requested_gap - 1
continue
if self.submitdelay > 0:
time.sleep(self.submitdelay)
DebugTraceMsg(
"starting task <%s> on locator <%s>" % (str(t), str(locator)),
self.debug,
prefix="Queue",
)
t.start_on_nodes(pool=locator, starttick=starttick)
hostpool.occupyNodes(locator, t.taskid)
self.queue.remove(t)
self.running.append(t)
self.maxsimul = max(self.maxsimul, len(self.running))
def find_recently_completed(self):
"""Find the first recently completed task.
Note the return, not yield.
"""
for t in self.running:
if t.hasCompleted():
DebugTraceMsg(
".. job completed: %d" % t.taskid, self.debug, prefix="Queue"
)
return t
return None
def find_recently_aborted(self, abort_test):
"""Find the first recently aborted task.
Note the return, not yield.
"""
for t in self.running:
if abort_test(t):
DebugTraceMsg(
".. job aborted: %d ran from %d" % (t.taskid, t.starttick),
self.debug,
prefix="Queue",
)
return t
return None
def __repr__(self):
completed = sorted([t.taskid for t in self.completed])
aborted = sorted([t.taskid for t in self.aborted])
queued = sorted([t.taskid for t in self.queue])
running = sorted([t.taskid for t in self.running])
return (
"completed: "
+ str(CompactIntList(completed))
+ "\naborted: "
+ str(CompactIntList(aborted))
+ "\nqueued: "
+ str(CompactIntList(queued))
+ "\nrunning: "
+ str(CompactIntList(running))
+ "."
)
def savestate(self):
state = ""
state += "queued\n"
for t in self.queue:
state += "%s: %s\n" % (t.taskid, t.command)
state += "running\n"
for t in self.running:
state += "%s: %s\n" % (t.taskid, t.command)
state += "completed\n"
for t in self.completed:
state += "%s: %s\n" % (t.taskid, t.command)
return state
f = open("queuestate", "w")
f.write("queued\n")
for t in self.queue:
f.write("%s: %s\n" % (t.taskid, t.command))
f.write("running\n")
for t in self.running:
f.write("%s: %s\n" % (t.taskid, t.command))
f.write("completed\n")
for t in self.completed:
f.write("%s: %s\n" % (t.taskid, t.command))
f.close()
def final_report(self):
"""Return a string describing the max and average runtime for each task."""
times = [t.runningtime for t in self.completed]
message = """# tasks completed: %d
tasks aborted: %d
max runningtime: %6.2f
avg runningtime: %6.2f
""" % (
len(self.completed),
len(self.aborted),
max(times),
sum(times) / len(self.completed),
)
return message
class Commandline:
"""A Commandline is basically a dict containing at least the following members:
* command : | |
value.
self.primary_key = list()
if isinstance(primary_key, str):
self.primary_key.append(primary_key)
elif isinstance(primary_key, list):
self.primary_key.extend(primary_key)
else:
raise ValueError(
"Primary keys must be a string or list instead of %s."
% type(primary_key)
)
def add_foreign_key(
self,
foreign_key=None,
from_keys=None,
to_table=None,
to_keys=None,
name=None,
):
"""
Adds a foreign key to the table. This can either be a created ForeignKey object, or the details of the key,
which will be created.
:param foreign_key: The ForeignKey that was already created. Other parameters will be ignored.
:type foreign_key: ForeignKey
:param from_keys: Name of key or list of keys.
:param to_table: Name of table to link to.
:param to_keys: Name of key or list of keys to link to.
:param name: Optional name of the table. One will be created if not provided.
"""
if foreign_key is not None:
if isinstance(foreign_key, ForeignKey):
self.foreign_keys[foreign_key.name] = foreign_key
else:
raise ValueError(
"The foreign key must be of type ForeignKey, but got %s"
% type(foreign_key)
)
else:
fk = ForeignKey(
from_table=self.table_name,
from_keys=from_keys,
to_table=to_table,
to_keys=to_keys,
name=name,
)
self.foreign_keys[fk.name] = fk
def get_foreign_key(self, fk_name):
"""
Returns the foreign key with the given name or none.
:param fk_name: Name of the foreign key.
:type fk_name: str
:return: Foreign key with the given name or None if it doesn't exist.
":rtype: ForeignKey
"""
return self.foreign_keys.get(fk_name, None)
def foreign_keys_iter(self):
"""
Returns an iterator over the foreign keys.
:return: Iterator over the foreign keys.
:rtype: iter
"""
return iter(self.foreign_keys.values())
# def __init__(self, from_table, from_keys, to_table, to_keys, name=None, conditions=None):
def add_relationship(
self, relationship=None, to_table=None, name=None, conditions=None
):
"""
Adds a foreign key to the table. This can either be a created ForeignKey object, or the details of the key,
which will be created.
:param relationship: The GenericRelationship that was already created. Other parameters will be ignored.
:type relationship: GenericRelationship
:param to_table: Name of table to link to.
:param name: Optional name of the table. One will be created if not provided.
:param conditions: Optional conditions for the relationship.
"""
if relationship is not None:
if isinstance(relationship, GenericRelationship):
self.relationships[relationship.name] = relationship
else:
raise ValueError(
"The relationship must be of type GenericRelationship, but got %s"
% type(relationship)
)
else:
rel = GenericRelationship(
from_table=self.table_name,
to_table=to_table,
name=name,
conditions=conditions,
)
self.relationships[rel.name] = rel
def get_relationship(self, rel_name):
"""
Returns the foreign key with the given name or none.
:param rel_name: Name of the foreign key.
:type rel_name: str
:return: Relationship with the given name or None if it doesn't exist.
":rtype: Relationship
"""
return self.relationships.get(rel_name, None)
def relationships_iter(self):
"""
Returns an iterator over the relationships.
:return: Iterator over the relationships.
:rtype: iter
"""
return iter(self.relationships.values())
def get_shard_key_columns(self):
"""
Returns the columns used for partiioning or None if not partitioned.
:return: Returns the columns used for partiioning or None if not partitioned.
:rtype: list of str | None
"""
if self.shard_key:
return self.shard_key.shard_keys
return None
def get_number_shards(self):
"""
If the table has a shard key, then return the number of shards. Useful for directly getting.
:return: The number of shards or None if the table isn't partitioned.
:rtype: int | None
"""
if self.shard_key:
return self.shard_key.number_shards
return None
def get_all_related_tables(self):
"""
Returns a list with the names of all related tables, either foreign keys or generic relationships.
:return: A list with the names of all related tables, either foreign keys or generic relationships.
:rtype: list of str
"""
all_related_tables = []
all_related_tables.extend([fk.to_table for fk in self.foreign_keys.values()])
all_related_tables.extend([rel.to_table for rel in self.relationships.values()])
return all_related_tables
# -------------------------------------------------------------------------------------------------------------------
class ValidationResult:
"""
Provides the results of validation.
"""
INFO = "Information"
WARNING = "Warning"
ERROR = "Error"
def __init__(self):
"""
Creates a new validation result that is valid with no issues.
"""
self.is_valid = True
self.issues = []
def add_issue(self, issue, level=ERROR):
"""
Adds an issue to the validation results, automatically making it invalid.
:param issue: The issue to add.
:type issue: str
:param level: The level of the error (INFO, WARNING, ERROR).
:type level: str
"""
self.is_valid = False
self.issues.append((issue, level))
def add_error(self, issue):
"""
Adds a validation error.
:param issue: The issue to add.
:type issue: str
"""
self.add_issue(issue, ValidationResult.ERROR)
def add_warning(self, issue):
"""
Adds a validation warning.
:param issue: The issue to add.
:type issue: str
"""
self.add_issue(issue, ValidationResult.WARNING)
def add_info(self, issue):
"""
Adds a validation info.
:param issue: The issue to add.
:type issue: str
"""
self.add_issue(issue, ValidationResult.INFO)
def eprint_issues(self):
"""
Prints the issues to standard error.
"""
for issue in self.issues:
eprint(issue[1] + ": " + issue[0])
# -------------------------------------------------------------------------------------------------------------------
class Database:
"""
Class that represents a database. A database contains schemas and tables.
Note that tables with the same name in different schemas are not currently supported.
"""
# TODO Add support for tables with the same name in different schemas.
def __init__(self, database_name):
"""
Creates a new database with the given name.
:param database_name: Name of the database.
:type database_name: str
"""
assert database_name is not None
self.database_name = database_name
self.tables = OrderedDict()
self.schemas = {}
def add_table(self, table):
"""
Adds a table to the database.
:param table: table to add to the database.
":type table: Table
"""
self.tables[table.table_name] = table
# increment so that the schema can be deleted.
nbr_schema = self.schemas.get(table.schema_name, 0)
nbr_schema += 1
self.schemas[table.schema_name] = nbr_schema
def get_table(self, table_name):
"""
Returns the table with the given name.
:param table_name: Name of the table to return.
:return: Table with the given name or None if it's not in the database.
:rtype: Table
"""
return self.tables.get(table_name, None)
def get_table_names(self):
"""
Returns the names of the tables.
:return: The name of the tables.
:rtype: list
"""
return self.tables.keys()
def number_tables(self):
"""
Return the number of tables in the database.
:return: The number of tables in the database.
:rtype: int
"""
return len(self.tables.keys())
def __iter__(self):
"""
Returns an iterator on the tables.
:return: Iterator for the tables.
"""
return iter(self.tables.values())
def drop_table(self, table_name):
"""
Drops a table from the database.
:param table_name: The name of the table to drop.
:type table_name: str
:return: The table to drop or None if the table doesn't exist.
:rtype: Table
"""
table = self.tables.pop(table_name, None)
if table is not None:
schema_name = table.schema_name
nbr_schema = self.schemas[schema_name]
nbr_schema -= 1
if nbr_schema == 0:
self.schemas.pop(schema_name)
else:
self.schemas[schema_name] = nbr_schema
return table
def get_schema_names(self):
"""
Returns a list of schema names.
:return: The list of schemas in the database.
:rtype: list
"""
return self.schemas.keys()
def validate(self):
"""
Validates that the model does not contain any errors.
:return: A validation object with is_valid set to True or False and list of issues if not valid.
:rtype: ValidationResult
"""
return DatabaseValidator(self).validate()
def get_number_relationships_from_table(self, table_name):
"""
Returns the number of foreign keys and generic relationships from a given table.
:param table_name: The name of the table to get relationships for.
:type table_name: str
:return: The number of foreign keys and relationships from a given table.
:rtype: int
"""
table = self.get_table(table_name=table_name)
if table:
return len(table.foreign_keys.values()) + len(table.relationships.values())
raise ValueError(f"Unkonwn table {table_name}")
def get_number_relationships_to_table(self, table_name):
"""
Returns the number of foreign keys to a given table.
:param table_name: The name of the table to get relationships for.
:type table_name: str
:return: The number of foreign keys to a given table.
:rtype: int
"""
if not table_name in self.get_table_names():
raise ValueError(f"Unkonwn table {table_name}")
number_relationships = 0
for table in self.tables.values():
for fk in table.foreign_keys.values():
if fk.to_table == table_name:
number_relationships += 1
for rel in table.relationships.values():
if rel.to_table == table_name:
number_relationships += 1
return number_relationships
# -------------------------------------------------------------------------------------------------------------------
class DatabaseValidator:
"""
Validates databases for consistency. Any database that passes validation should load into
ThoughtSpot with no errors.
"""
def __init__(self, database):
"""
Creates a new DatabaseValidator
:param database: A database to validate.
:type database: Database
"""
self.database = database
self.validation_results = ValidationResult()
def validate(self):
"""
Validates the database returning a validation result.
:return: A validation result object.
:rtype: ValidationResult
"""
for table in self.database:
self._validate_column_types(table)
self._validate_primary_key(table)
self._validate_shard_keys(table)
self._validate_foreign_keys(table)
self._validate_relationships(table)
return self.validation_results
def | |
Leaf":0x5f674b,
"Fiddle-Leaf Fig":0xa6c875,
"Fiddlehead Fern":0xc8c387,
"Fiddler":0x5a9589,
"Fiddlesticks":0xbb9fb1,
"Field Blue":0x4477aa,
"Field Day":0xc5e6a4,
"Field Drab":0x6c541e,
"Field Green":0x60b922,
"Field Khaki":0xb1a891,
"Field Maple":0x80884e,
"Field of Wheat":0xdeb699,
"Field Poppy":0xd86f3c,
"Fieldstone":0x807e77,
"Fierce Mantis":0x7fc15c,
"Fiery Brown":0x5d3831,
"Fiery Coral":0xe26058,
"Fiery Flamingo":0xf96d7b,
"Fiery Fuchsia":0xb7386e,
"Fiery Glow":0xf0531c,
"Fiery Orange":0xb1592f,
"Fiery Red":0xd01c1f,
"Fiery Rose":0xff5470,
"Fiery Salmon":0xf76564,
"Fiesta":0xedd8d2,
"Fiesta Blue":0x6fc0b1,
"Fiesta Pink":0xd47194,
"Fiesta Rojo":0xb67c80,
"Fife":0xa9a5c2,
"Fifth Olive-Nue":0x8e8779,
"Fig":0x532d3b,
"Fig Balsamic":0x550022,
"Fig Branches":0x7a634d,
"Fig Fruit Mauve":0xa98691,
"Fig Leaf":0x556b2f,
"Fig Mustard Yellow":0xbb8610,
"Fig Preserves":0xa7989e,
"Fig Tree":0x605f4b,
"Fight the Sunrise":0xff99aa,
"Figue":0x9469a2,
"Figue Pulp":0x962c54,
"Figure Stone":0xeedac3,
"Figurine":0xe4d5c0,
"Fiji":0x00aaac,
"Fiji Coral":0x6b5f68,
"Fiji Green":0x636f22,
"Fiji Palm":0x528d3c,
"Fiji Sands":0xd8caa9,
"Filigree":0xdfe7e8,
"Filigree Green":0xa5af89,
"Film Fest":0x93877c,
"Film Noir":0x473933,
"Filmy Green":0xd1d3c7,
"Filtered Forest":0xb7e1d2,
"Filtered Light":0xb1b2c4,
"Filtered Moon":0xecca9a,
"Filtered Rays":0xd0b064,
"Filthy Brown":0xe8aa08,
"Final Straw":0xd0bf9e,
"Finch":0x75785a,
"Fine Alabaster":0xecd3cb,
"Fine Blue":0xb6e1e1,
"Fine Burgundy":0x815158,
"Fine Grain":0xd8cfc1,
"Fine Greige":0xb5a998,
"Fine Linen":0xfaf5c3,
"Fine Pine":0x008800,
"Fine Porcelain":0xfaf0e1,
"Fine Purple":0x5e548d,
"Fine Sand":0xf1d5ae,
"Fine White":0xfaede1,
"Fine White Sand":0xe4d4c0,
"Fine Wine":0x744e5b,
"Finesse":0x96a8c8,
"Finest Blush":0xdd8888,
"Finest Silk":0xf1e5d7,
"Finger Banana":0xe1c12f,
"Fingerpaint":0x8a7e61,
"Fingerprint":0x555356,
"Finishing Touch":0xcbbfb3,
"Finlandia":0x61755b,
"Finn":0x694554,
"Finnish Fiord":0x5db0be,
"Fioletowy Beige":0xfffce3,
"Fioletowy Purple":0xfc44a3,
"Fiord":0x4b5a62,
"Fiorito":0xbfbfaf,
"Fir":0x3a725f,
"Fir Blue":0x46807b,
"Fir Green":0x67592a,
"Fir Spruce Green":0x6d7969,
"Fire":0x8f3f2a,
"Fire Ant":0xbe6400,
"Fire Axe Red":0xce1620,
"Fire Bolt":0xcc4411,
"Fire Bush":0xe09842,
"Fire Chalk":0xd2806c,
"Fire Chi":0x92353a,
"Fire Coral":0xe3b46f,
"Fire Dance":0xe3d590,
"Fire Dragon Bright":0xf97306,
"Fire Dust":0xb98d68,
"Fire Engine":0xfe0002,
"Fire Flower":0xf68f37,
"Fire Hydrant":0xff0d00,
"Fire Island":0xd95137,
"Fire Lord":0xbb7733,
"Fire Mist":0xfbd9c4,
"Fire Opal":0xfd3c06,
"Fire Orange":0xff8e57,
"Fire Roasted":0x79483e,
"Fire Yellow":0xffb70b,
"Fireball":0xce2029,
"Firebird Tail Lights":0xdd5522,
"Firebrick":0xb22222,
"Firebug":0xcd5c51,
"Firecracker":0xf36944,
"Firecracker Salmon":0xf36363,
"Fired Brick":0x6a2e2a,
"Fired Clay":0x884444,
"Fired Up":0xd37a38,
"Fireflies":0xf6daa7,
"Firefly":0x314643,
"Firefly Glow":0xfff3a1,
"Fireglow":0xd65e40,
"Firelight":0xf9d97b,
"Fireplace Glow":0xd08b73,
"Fireplace Kitten":0xc5c9c7,
"Fireplace Mantel":0x847c70,
"Fireside":0x6e4a44,
"Firewatch":0xee8866,
"Fireweed":0xb38491,
"Fireworks":0x44363d,
"Firm Green":0x47654a,
"Firm Pink":0xda93c1,
"Firmament Blue":0x11353f,
"First Blush":0xf4edec,
"First Colors of Spring":0xdbe64c,
"First Crush":0xf6e2ea,
"First Date":0xf5b1a2,
"First Daughter":0xf7d2d8,
"First Day of School":0xfadba0,
"First Day of Summer":0xf1e798,
"First Frost":0xcfe5f0,
"First Impression":0xf4e5e7,
"First Lady":0xc47967,
"First Landing":0x59a6cf,
"First Light":0xd9e6ee,
"First Lilac":0xe7d6ed,
"First Love":0xcf758a,
"First of July":0xbce6ef,
"First Peach":0xf4cac6,
"First Plum":0xb87592,
"First Post":0x2fbda1,
"First Rain":0xbdd8ec,
"First Shade of Blue":0xcbe1f2,
"First Snow":0xe8eff8,
"First Star":0xdad9d4,
"First Timer Green":0x00e8d8,
"First Tulip":0xffe79c,
"First Waltz":0xd5bcb2,
"Fischer Blue":0x32a0b1,
"Fish Bone":0xe4d9c5,
"Fish Boy":0x11dddd,
"Fish Camp Woods":0x7a9682,
"Fish Ceviche":0xe1e1d5,
"Fish Finger":0xeecc55,
"Fish Net Blue":0x1e446e,
"Fish Pond":0x86c8ed,
"Fisher King":0x5182b9,
"Fishy House":0x1ba590,
"Fist of the North Star":0x225599,
"Fistfull of Green":0xa2a415,
"Fitness Blue":0x5bb9d2,
"Fitzgerald Smoke":0xb3b6b0,
"Five Star":0xffaa4a,
"Fizz":0xb1dbaa,
"Fizzing Whizbees":0xddbcbc,
"Fizzle":0xd8e4de,
"Fjord":0x616242,
"Fjord Blue":0x007290,
"Fjord Green":0x005043,
"Flag Green":0x717c00,
"Flagstaff Green":0xb3bfb0,
"Flagstone":0xacadad,
"Flagstone Quartzite":0x9a9e88,
"Flamboyant":0x129c8b,
"Flamboyant Plum":0x694e52,
"Flame":0xe25822,
"Flame Hawkfish":0x960018,
"Flame Orange":0xfb8b23,
"Flame Pea":0xbe5c48,
"Flame Red":0x86282e,
"Flame Scarlet":0xcd212a,
"Flame Yellow":0xffcf49,
"Flamenco":0xea8645,
"Flaming Flamingo":0xdd55ff,
"Flaming June":0xeebb66,
"Flaming Torch":0xd2864e,
"Flamingo":0xe1634f,
"Flamingo Diva":0xff44dd,
"Flamingo Dream":0xee888b,
"Flamingo Feather":0xf8bdd9,
"Flamingo Fury":0xdf01f0,
"Flamingo Peach":0xf6e2d8,
"Flamingo Pink":0xfc8eac,
"Flamingo Queen":0xcc33ff,
"Flamingo Red":0xef8e87,
"Flan":0xf6e3b4,
"Flannel Grey":0xaeadac,
"Flannel Pajamas":0x8b8d98,
"Flapper Dance":0x495762,
"Flare Gun":0xff4519,
"Flash Gitz Yellow":0xfffb05,
"Flash in the Pan":0xff9977,
"Flash of Orange":0xffaa00,
"Flashlight":0xf9eed6,
"Flashman":0x7cbd85,
"Flashpoint":0xf9f2d1,
"Flashy Sapphire":0x2c538a,
"Flat Aluminum":0xc3c6cd,
"Flat Blue":0x3c73a8,
"Flat Brown":0x754600,
"Flat Earth":0xaa5533,
"Flat Flesh":0xf7d48f,
"Flat Green":0x699d4c,
"Flat Yellow":0xfff005,
"Flattered Flamingo":0xee6655,
"Flattering Peach":0xf4d3b3,
"Flattery":0x6b4424,
"Flavescent":0xf7e98e,
"Flavoparmelia Caperata":0x8fb67b,
"Flax":0xeedc82,
"Flax Beige":0xd4c3b3,
"Flax Bloom":0xd2d8f4,
"Flax Fiber":0xe0d68e,
"Flax Fibre Grey":0xb7a99a,
"Flax Flower":0x5577aa,
"Flax Flower Blue":0x4499dd,
"Flax Smoke":0x7b8265,
"Flax Straw":0xcbaa7d,
"Flax-Flower Blue":0x6f88af,
"Flaxen":0xfbecc9,
"Flaxen Fair":0xe3ddbd,
"Flaxen Field":0xbba684,
"Flaxseed":0xf7e6c6,
"Flayed One Flesh":0xfcfcde,
"Fleck":0x97bbe1,
"Fleeting Green":0xd8e2d8,
"Flemish Blue":0xadd0e0,
"Flesh":0xffcbc4,
"Flesh Fly":0x894585,
"Flesh Grey":0xaaa197,
"Flesh Pink":0xf9cbd3,
"Flesh Red":0xe9c49d,
"Flesh Wash":0xce8c42,
"Fleshtone Shade Wash":0xcf9346,
"Fleur de Sel":0xdcddd8,
"Fleur-De-Lis":0xb090c7,
"Flexible Gray":0xb1a3a1,
"Flickering Firefly":0xf8f6e6,
"Flickering Flame":0xaa6e49,
"Flickering Gold":0xc6a668,
"Flickering Light":0xfff1dc,
"Flickering Sea":0x5566ee,
"Flickery C64":0x4f81ff,
"Flickery CRT Green":0x90f215,
"Flickr Blue":0x216bd6,
"Flickr Pink":0xfb0081,
"Flier Lie":0xcdb891,
"Flight Time":0xa3b8ce,
"Flinders Green":0x6d7058,
"Fling Green":0x8ecfd0,
"Flint":0x716e61,
"Flint Corn Red":0xd9623b,
"Flint Grey":0xa09c98,
"Flint Purple":0x42424d,
"Flint Rock":0x989493,
"Flint Shard":0x8f9395,
"Flint Smoke":0xa8b2b1,
"Flintstone":0x677283,
"Flintstone Blue":0x434252,
"Flip":0x45747e,
"Flip a Coin":0xccddcc,
"Flip-Flop":0xf2c4a7,
"Flipper":0x7f726b,
"Flirt":0x7a2e4d,
"Flirt Alert":0xbe3c37,
"Flirtatious":0xffd637,
"Flirtatious Flamingo":0xcc22ff,
"Flirtatious Indigo Tea":0x473f2d,
"Flirty Pink":0x9e88b1,
"Flirty Salmon":0xfa7069,
"Floating Blue":0xb0c9cd,
"Floating Feather":0xe9d8c2,
"Floating Island":0xece5cf,
"Floating Lily":0xedebce,
"Floating Lily Pad":0xccc7a1,
"Flood":0x6677bb,
"Flood Mud":0x877966,
"Flood Out":0x579dab,
"Floppy Disk":0x110044,
"Flor Lila":0xe0e0eb,
"Flora":0x73fa79,
"Flora Green":0x91ad8a,
"Floral Arrangement":0xc6ac9f,
"Floral Bluff":0xe7cfb9,
"Floral Bouquet":0xbacb7c,
"Floral Leaf":0xffb94e,
"Floral Linen":0xf5e2de,
"Floral Scent":0xeeede9,
"Floral Tapestry":0xc39191,
"Floral White":0xfffaf0,
"Florence":0x96b576,
"<NAME>":0x835740,
"Florence Red":0x753f38,
"<NAME>":0x7a5544,
"Florentine Clay":0xc1937a,
"Florentine Lapis":0x1c5798,
"Florida Grey":0xbea4a2,
"Florida Keys":0x56beab,
"Florida Mango":0xed9f6c,
"Florida Sunrise":0xf7aa6f,
"Florida Turquoise":0x6bb8b1,
"Florida Waters":0x2a4983,
"Floriography":0xa54049,
"Floss":0xd7b3b9,
"Flotation":0x7bb0ba,
"Flounce":0x4a8791,
"Flour Sack":0xb9b297,
"Flourish":0xebdc9c,
"Flower Bulb":0xd9e8c9,
"Flower Centre":0xfde6c6,
"Flower Field":0xd9a96f,
"Flower Girl":0xf498ad,
"Flower Girl Dress":0xede7e6,
"Flower Hat Jellyfish":0xf9d593,
"Flower of Oahu":0xf5dfc5,
"Flower Pot":0x8f4438,
"Flower Spell":0xffc9d7,
"Flower Stem":0xb5d5b0,
"Flower Wood":0x988378,
"Flowerbed":0xffebda,
"Flowering Cactus":0xa2d4bd,
"Flowering Chestnut":0x875657,
"Flowering Raspberry":0xa16c94,
"Flowering Reed":0xe1d8b8,
"Flowerpot":0xd8b0a0,
"Flowers of May":0xe3d7e3,
"Flowery":0xe4dcbf,
"Flowing Breeze":0xb9c6cb,
"Flowing River":0x335e6f,
"Fluffy Duckling":0xfcdf39,
"Fluffy Pink":0xf7d6cb,
"Fluid Blue":0xc5d6eb,
"Fluor Spar":0xa77d35,
"Fluorescence":0x89d178,
"Fluorescent Fire":0x984427,
"Fluorescent Green":0x08ff08,
"Fluorescent Lime":0xbdc233,
"Fluorescent Orange":0xffcf00,
"Fluorescent Pink":0xfe1493,
"Fluorescent Red":0xff5555,
"Fluorescent Red Orange":0xfc8427,
"Fluorescent Turquoise":0x00fdff,
"Fluorescent Yellow":0xccff02,
"Fluorite Blue":0xb4ccc2,
"Fluorite Green":0x699158,
"Fluro Green":0x0aff02,
"Flurries":0xf2ede3,
"Flush Mahogany":0xca2425,
"Flush Orange":0xff6f01,
"Flush Pink":0xf8cbc4,
"Flushed":0xdd5555,
"Fly a Kite":0xc8daf5,
"Fly Agaric":0xff2052,
"Fly by Night":0x1c1e4d,
"Flying Carpet":0x787489,
"Flying Fish":0x5376ab,
"Flying Fish Blue":0x024aca,
"Flyway":0x5db3d4,
"Foam":0xd0eae8,
"Foam Green":0x90fda9,
"Foaming Surf":0x90d1dd,
"Foamy Milk":0xf7f4f7,
"Focus":0xe5e0d2,
"Focus on Light":0xfef9d3,
"Focus Point":0x91c3bd,
"Fog":0xd6d7d2,
"Fog Beacon":0xd8d6d1,
"Fog Green":0xc2cbb4,
"Fog of War":0x112233,
"Fog White":0xf1efe4,
"Foggy Amethyst":0x57317e,
"Foggy Blue":0x99aebb,
"Foggy Day":0xe7e3db,
"Foggy Dew":0xd1d5d0,
"Foggy Grey":0xa7a69d,
"Foggy Heath":0xe2c9ff,
"Foggy London":0x5c5658,
"Foggy Love":0xd5c7e8,
"Foggy Mist":0xc8d1cc,
"Foggy Morn":0xcad0ce,
"Foggy Night":0xa79c8e,
"Foggy Quartz":0xbfa2a1,
"Fogtown":0xeef0e7,
"Foil":0xc0c3c4,
"Foille":0xb0b99c,
"Foliage":0x95b388,
"Foliage Green":0x3e6f58,
"Folk Guitar":0x7a634f,
"Folk Song":0x65a19f,
"Folk Tale":0xb2e1bc,
"Folk Tales":0xa5c1b6,
"Folklore":0x684141,
"Folkstone":0x6d6562,
"Folkstone Grey":0x626879,
"Folksy Gold":0xd69969,
"Follow the Leader":0xf7e5d0,
"Folly":0xfd004d,
"Fond de Teint":0xffaaaa,
"Fond Memory":0xc8bcb7,
"Fondue":0xc99f97,
"Fondue Fudge":0x5d4236,
"Fool's Gold":0xcad175,
"Football":0x825736,
"Football Field":0x7eaf34,
"Foothill Drive":0xcab48e,
"Foothills":0xe1cfa5,
"Footie Pajamas":0xe6cee6,
"For the Love of Hue":0x457e87,
"Forbidden Blackberry":0x323f75,
"Forbidden Forest":0x215354,
"Forbidden Fruit":0xfe7b7c,
"Forbidden Red":0x8a4646,
"Forbidden Thrill":0x856363,
"Force of Nature":0xd5ce69,
"Forceful Orange":0xf29312,
"Foresight":0x94a8d3,
"Forest":0x0b5509,
"Forest Berry":0x956378,
"Forest Biome":0x184a45,
"Forest Blues":0x0d4462,
"Forest Bound":0x738f50,
"Forest Canopy":0x969582,
"Forest Edge":0x627b72,
"Forest Fern":0x63b76c,
"Forest Floor":0x555142,
"Forest Floor Khaki":0x78766d,
"Forest Found":0xe1dfbb,
"Forest Frolic":0x88bb95,
"Forest Fruit Pink":0x68393b,
"Forest Fruit Red":0x6e2759,
"Forest Green":0x154406,
"Forest Greenery":0x3e645b,
"Forest Lichen":0x9aa22b,
"Forest Maid":0x52b963,
"Forest Moss":0x858f83,
"Forest Night":0x434237,
"Forest Path":0x708d6c,
"Forest Rain":0x216957,
"Forest Ride":0x006800,
"Forest Ridge":0x555d46,
"Forest Shade":0x91ac80,
"Forest Spirit":0x667028,
"Forest Splendor":0x016e61,
"Forest Tapestry":0xa4ba8a,
"Forest Tent":0xbba748,
"Forester":0x9aa77c,
"Forestwood":0x4d5346,
"Forever Blue":0x899bb8,
"Forever Denim":0x778590,
"Forever Fairytale":0xd2bbb2,
"Forever Faithful":0xefe6e1,
"Forever Green":0xaab4a7,
"Forever Lilac":0xafa5c7,
"Forged Iron":0x48464a,
"Forged Steel":0x5b5b59,
"Forget-Me-Not":0x0087bd,
"Forget-Me-Not Blue":0x358094,
"Forgive Quickly":0xe1e1be,
"Forgiven Sin":0xff1199,
"Forgotten Blue":0xc0e5ec,
"Forgotten Gold":0xc7b89f,
"Forgotten Mosque":0xe2d9db,
"Forgotten Pink":0xffd9d6,
"Forgotten Purple":0x9878f8,
"Forgotten Sunset":0xfdd5b1,
"Formal Affair":0x848391,
"Formal Garden":0x3a984d,
"Formal Grey":0x97969a,
"Formal Maroon":0x70474b,
"Forsythia":0xffc801,
"Forsythia Blossom":0xf6d76e,
"Forsythia Bud":0xbbcc55,
"Fortitude":0xc6c5c1,
"Fortress Grey":0xb8b8b8,
"Fortress Stone":0xc5c0b0,
"Fortune":0x9f97a3,
"Fortune Cookie":0xe0c5a1,
"Fortune Red":0xb0534d,
"Fortune's Prize":0xdaa994,
"Forward Fuchsia":0x92345b,
"Fossil":0x806f63,
"Fossil Butte":0xa78f65,
"Fossil Green":0x6c6a43,
"Fossil Sand":0xd2c8bb,
"Fossil Stone":0xe3ddcc,
"Fossil Tan":0xd1af90,
"Fossilized":0xb6b8b0,
"Fossilized Leaf":0x756a43,
"Foul Green":0x85c7a1,
"Foundation":0xf8e8c5,
"Foundation White":0xefeeff,
"Fountain":0x56b5ca,
"Fountain Blue":0x65adb2,
"Fountain City":0x9cd4cf,
"Fountain Frolic":0xe4e4c5,
"Fountain Spout":0xcdebec,
"Fountains of Budapest":0xb9def0,
"Four Leaf Clover":0x738f5d,
"Fox":0xc38743,
"Fox Hill":0xc8aa92,
"Fox Red":0xca4e33,
"Fox Tails":0xdd8800,
"Foxen":0xbf8e7f,
"Foxfire Brown":0x9f6949,
"Foxflower Viola":0xa2acc5,
"Foxglove":0xb98391,
"Foxgloves":0xc6c0ca,
"Foxhall Green":0x454b40,
"Foxtail":0xbc896e,
"Foxy":0xa85e53,
"Foxy Fuchsia":0x9f00c5,
"Foxy Lady":0xd5a6ad,
"Foxy Pink":0xdb95ab,
"Fozzie Bear":0x70625c,
"Fragile":0xbbb8d0,
"Fragile Beauty":0xe7d7c6,
"Fragile Fern":0xeff2db,
"Fragrant Cherry":0x8e545c,
"Fragrant Cloves":0xac5e3a,
"Fragrant Jasmine":0xfbf6e7,
"Fragrant Lilac":0xceadbe,
"Fragrant Satchel":0xa99fba,
"Fragrant Snowbell":0xd5c5d4,
"Fragrant Wand":0xadb1c1,
"Frail Fuchsia":0xee88ee,
"Framboise":0xe40058,
"Frangipane":0xf4d5b2,
"Frangipani":0xffd7a0,
"Frank Blue":0x225288,
"Frank Lloyd White":0xefebdb,
"Frankenstein":0x7ba05b,
"Frankly Earnest":0xe2dbca,
"Frappe":0xd1b7a0,
"Freckles":0xd78775,
"Free Green":0x74a690,
"Free Reign":0xd1cdca,
"Free Speech Aquamarine":0x029d74,
"Free Speech Blue":0x4156c5,
"Free Speech Green":0x09f911,
"Free Speech Magenta":0xe35bd8,
"Free Speech Red":0xc00000,
"Free Spirit":0xdeeeed,
"Freedom":0x3b5e68,
"Freedom Found":0x657682,
"Freefall":0x565266,
"Freesia":0xf3c12c,
"Freesia Purple":0xb3b0d4,
"Freezing Vapor":0xd4e9f5,
"Freezy Breezy":0x99eeee,
"Freezy Wind":0x99ffdd,
"Freinacht Black":0x232f36,
"French 75":0xf9f3d5,
"French Beige":0xa67b50,
"French Bistre":0x856d4d,
"French Blue":0x0072bb,
"French Bustle":0xf2d5d4,
"French Castle":0xcdc0b7,
"French Colony":0x90a1aa,
"French Court":0x6a8ea2,
"French Creme":0xf2e6cf,
"French Diamond":0x597191,
"French Fuchsia":0xfd3f92,
"French Grey":0xbfbdc1,
"French Grey Linen":0xcac8b6,
"French Heirloom":0xe9e2e0,
"French Lavender":0xdfc9d1,
"French Lilac":0xdeb7d9,
"French Lilac Blue":0xadbae3,
"French Lime":0xc0ff00,
"French Limestone":0xc9d6c2,
"French Manicure":0xfee6dc,
"French Market":0xa2c7a3,
"French Mauve":0xd473d4,
"French Mirage Blue":0x446688,
"French Moire":0x9fbbc3,
"French Oak":0xbb9e7c,
"French Pale Gold":0xd4ab60,
"French Parsley":0x9ea07d,
"French Pass":0xa4d2e0,
"French Pastry":0xc4aa92,
"French Pear":0x9e9f7d,
"French Pink":0xfd6c9e,
"French Plum":0x811453,
"French Porcelain":0xf6f4f6,
"French Porcelain Clay":0xfaf1d7,
"French Puce":0x4e1609,
"French Raspberry":0xc72c48,
"French Roast":0x58423f,
"French Rose":0xf64a8a,
"French Shutter":0xbab6a0,
"French Silver":0xb8bcbc,
"French Sky Blue":0x77b5fe,
"French Tarragon":0x667255,
"French Taupe":0xd3c2bf,
"French Toast":0xdd8822,
"French Truffle":0x896d61,
"French Vanilla":0xefe1a7,
"French Vanilla Sorbet":0xfbe8ce,
"French Violet":0x8806ce,
"French White":0xf1e7db,
"French Wine":0xac1e44,
"French Winery":0x991133,
"Frenzied Red":0x814a5c,
"Frenzy":0xfeb101,
"Fresco":0xf4dbd9,
"Fresco Blue":0x034c67,
"Fresco Cream":0xfcc9a6,
"Fresco Green":0x7bd9ad,
"Fresh Acorn":0xd2693e,
"Fresh Air":0xa6e7ff,
"Fresh Apple":0x97a346,
"Fresh Apricot":0xffd7a5,
"Fresh Artichoke":0x7c8447,
"Fresh Auburn":0xa52a24,
"Fresh Baked Bread":0xf8d7be,
"Fresh Basil":0x5c5f4b,
"Fresh Blue":0x8bd6e2,
"Fresh Blue of Bel Air":0x069af3,
"Fresh Breeze":0xbeeddc,
"Fresh Brew":0xb8aa7d,
"Fresh Cantaloupe":0xff9c68,
"Fresh Cedar":0xa77f74,
"Fresh Cinnamon":0x995511,
"Fresh Clay":0xbe8035,
"Fresh Cream":0xfcf7e0,
"Fresh Croissant":0xcc9f76,
"Fresh Cut":0xf2003c,
"Fresh Cut Grass":0x91cb7d,
"Fresh Day":0xdfe9e5,
"Fresh Dew":0xf0f4e5,
"Fresh Dough":0xf2ebe6,
"Fresh Eggplant":0x4f467e,
"Fresh Eggs":0xfaf4ce,
"Fresh Eucalyptus":0xadbcb4,
"Fresh Frappe":0xdbe69d,
"Fresh Gingerbread":0xd3691f,
"Fresh Granny Smith":0x7ff217,
"Fresh Green":0x69d84f,
"Fresh Greens":0x3fad71,
"Fresh Grown":0xf0f7c4,
"Fresh Guacamole":0xa2b07e,
"Fresh Gum":0xffaadd,
"Fresh Heather":0xd1c1dd,
"Fresh Herb":0x77913b,
"Fresh Herbs":0x3a5f49,
"Fresh Honeydew":0xf6efc5,
"Fresh Ivy Green":0x006a5b,
"Fresh Lavender":0x8e90b4,
"Fresh Lawn":0x88aa00,
"Fresh Leaf":0x93ef10,
"Fresh Lemonade":0xece678,
"Fresh Lettuce":0xb2d58c,
"Fresh Lime":0xd8f1cb,
"Fresh Linen":0xebe8da,
"Fresh Mint":0x2a5443,
"Fresh Nectar":0xdaa674,
"Fresh Neon Pink":0xff11ff,
"Fresh Olive":0xa69e73,
"Fresh Onion":0x5b8930,
"Fresh Oregano":0x4faa6c,
"Fresh Peaches":0xf6b98a,
"Fresh Piglet":0xfddde6,
"Fresh Pine":0x4f5b49,
"Fresh Pineapple":0xf3d64f,
"Fresh Pink":0xe19091,
"Fresh Pink Lemonade":0xd2adb5,
"Fresh Popcorn":0xf4f3e9,
"Fresh Praline":0xe7bb95,
"Fresh Salmon":0xff7f6a,
"Fresh Sawdust":0xc8a278,
"Fresh Scent":0xf1c11c,
"Fresh Snow":0xf6efe1,
"Fresh Sod":0x91a085,
"Fresh Soft Blue":0x6ab9bb,
"Fresh Sprout":0xc7c176,
"Fresh Squeezed":0xffad00,
"Fresh Start":0xcfd4a4,
"Fresh Straw":0xeecc66,
"Fresh Take":0x505b93,
"Fresh Thyme":0xaebda8,
"Fresh Tone":0xb2c7c0,
"Fresh Turquoise":0x40e0d0,
"Fresh Up":0xdfebb1,
"Fresh Water":0xc6e3f7,
"Fresh Watermelon":0xdf9689,
"Fresh Willow":0xe1d9aa,
"Fresh Wood Ashes":0xeae6cc,
"Fresh Yellow":0xf7e190,
"Fresh Zest":0xf5e9cf,
"Freshly Roasted Coffee":0x663322,
"Freshman":0xe6f2c4,
"Freshmint":0xd9f4ea,
"Freshwater":0x4da6b2,
"Freshwater Marsh":0x535644,
"Fretwire":0xb2a490,
"Friar Brown":0x6e493a,
"Friar Grey":0x807e79,
"Friar Tuck":0xddb994,
"Friar's Brown":0x5e5241,
"Fricassée":0xffe6c2,
"Friend Flesh":0xf1a4b7,
"Friendly Basilisk":0xe2f5e1,
"Friendly Homestead":0xc8a992,
"Friendly Yellow":0xf5e0b1,
"Friends":0xe8c5c1,
"Friendship":0xfed8c2,
"Fright Night":0x004499,
"Frijid Pink":0xee77ff,
"Frilled Shark":0x939fa9,
"Frills":0x8fa6c1,
"Fringy Flower":0xb4e1bb,
"Frisky":0xccdda1,
"Frisky Blue":0x7bb1c9,
"Frittata":0xfeebc8,
"Frivolous Folly":0xcfd2c7,
"Frog":0x58bc08,
"Frog Green":0x00693c,
"Frog Hollow":0x7da270,
"Frog Prince":0xbbd75a,
"Frog's Legs":0x8c8449,
"Frogger":0x8cd612,
"Frolic":0xf9e7e1,
"Froly":0xe56d75,
"Frond":0x7b7f56,
"Front Porch":0xcdccc5,
"Frontier":0x314a49,
"Frontier Brown":0x9a8172,
"Frontier Fort":0xc3b19f,
"Frontier Land":0xbca59a,
"Frontier Shadow":0x655a4a,
"Frontier Shingle":0x7b5f46,
"Frost":0xe1e4c5,
"Frost Bite":0xf6f0e5,
"Frost Blue":0x5d9aa6,
"Frost Grey":0x848283,
"Frost Gum":0x8ecb9e,
"Frost Wind":0xdaebef,
"Frostbite":0xacfffc,
"Frosted Almond":0xd2c2ac,
"Frosted Blueberries":0x0055dd,
"Frosted Cocoa":0xa89c91,
"Frosted Emerald":0x78b185,
"Frosted Fern":0xa7a796,
"Frosted Garden":0xe2f7d9,
"Frosted Glass":0xeaf0f0,
"Frosted Grape":0xd4c4d2,
"Frosted Iris":0xb1b9d9,
"Frosted Jade":0xc2d1c4,
"Frosted Juniper":0xf0f4eb,
"Frosted Lemon":0xffedc7,
"Frosted Lilac":0xd3d1dc,
"Frosted Mint":0xe2f2e4,
"Frosted Pomegranate":0xad3d46,
"Frosted Sage":0xc6d1c4,
"Frosted Silver":0xc5c9c5,
"Frosted Sugar":0xd5bcc2,
"Frosted Toffee":0xf1dbbf,
"Frosted Tulip":0xf6d8d7,
"Frostee":0xdbe5d2,
"Frosting Cream":0xfffbee,
"Frostini":0xdbf2d9,
"Frostproof":0xd1f0f6,
"Frostwork":0xeff1e3,
"Frosty Dawn":0xcbe9c9,
"Frosty Day":0xccebf5,
"Frosty Fog":0xdee1e9,
"Frosty Glade":0xa0c0bf,
"Frosty Green":0xa3b5a6,
"Frosty Mint":0xe2f7f1,
"Frosty Morning":0xefe8e8,
"Frosty Pine":0xc7cfbe,
"Frosty Soft Blue":0xb4e0de,
"Frosty Spruce":0x578270,
"Frosty White":0xddddd6,
"Frosty White Blue":0xcce9e4,
"Froth":0xc6b8ae,
"Frothy Milk":0xfaede6,
"Frothy Surf":0xe7ebe6,
"Frozen Banana":0xfbf5d6,
"Frozen Blue":0xa5c5d9,
"Frozen Civilization":0xe1f5e5,
"Frozen Custard":0xfbeabd,
"Frozen Dew":0xd8cfb2,
"Frozen Edamame":0x9ca48a,
"Frozen Forest":0xcfe8b6,
"Frozen Frappe":0xddc5d2,
"Frozen Fruit":0xe1ca99,
"Frozen Grass":0xdeeadc,
"Frozen Lake":0x7b9cb3,
"Frozen Mammoth":0xdfd9da,
"Frozen Margarita":0xdbe2cc,
"Frozen Mint":0xd8e8e6,
"Frozen Moss Green":0xaddfad,
"Frozen Pea":0xc4ead5,
"Frozen Pond":0xa5b4ae,
"Frozen Salmon":0xfea993,
"Frozen State":0x26f7fd,
"Frozen Statues":0xe1dee5,
"Frozen Stream":0x30555d,
"Frozen Tomato":0xdd5533,
"Frozen Tundra":0xa3bfcb,
"Frozen Turquoise":0x53f6ff,
"Frozen Wave":0x56acca,
"Frugal":0xa5d7b2,
"Fruit Bowl":0xfdc9d0,
"Fruit Cocktail":0xd08995,
"Fruit Dove":0xce5b78,
"Fruit Of Passion":0x946985,
"Fruit Red":0xfa8970,
"Fruit Salad":0x4ba351,
"Fruit Shake":0xf39d8d,
"Fruit Yard":0x604241,
"Fruit Yellow":0xeac064,
"Fruitful Orchard":0x773b3e,
"Fruitless Fig Tree":0x448822,
"Fruity Licious":0xf69092,
"Fuchsia":0xed0dd9,
"Fuchsia Berries":0x333322,
"Fuchsia Blue":0x7a58c1,
"Fuchsia Blush":0xe47cb8,
"Fuchsia Fever":0xff5599,
"Fuchsia Flair":0xbb22bb,
"Fuchsia Flash":0xdd55cc,
"Fuchsia Flock":0xab446b,
"Fuchsia Flourish":0xbb2299,
"Fúchsia Intenso":0xd800cc,
"Fuchsia Kiss":0xcb6e98,
"Fuchsia Nebula":0x7722aa,
"Fuchsia Pink":0xff77ff,
"Fuchsia Purple":0xd33479,
"Fuchsia Red":0xab3475,
"Fuchsia Rose":0xc74375,
"Fuchsia Tint":0xc255c1,
"Fuchsite":0xc3d9ce,
"Fuchsite Green":0x5b7e70,
"Fudge":0x493338,
"Fudge Bar":0x997964,
"Fudge Truffle":0x604a3f,
"Fudgesicle":0xd46bac,
"Fuegan Orange":0xc77e4d,
"Fuego":0xee5533,
"Fuego Nuevo":0xee6622,
"Fuego Verde":0xc2d62e,
"Fuel Town":0x596472,
"Fuel Yellow":0xd19033,
"Fugitive Flamingo":0xee66aa,
"Fuji Peak":0xf6eee2,
"Fuji Purple":0x89729e,
"Fuji Snow":0xf1efe8,
"Fujinezumi":0x766980,
"Fulgrim Pink":0xf5b3ce,
"Fulgurite Copper":0xe6b77e,
"Full Bloom":0xfbcdc3,
"Full City Roast":0x662222,
"Full Cream":0xfae4ce,
"Full Glass":0x916b77,
"Full Moon":0xf4f3e0,
"Full Moon Grey":0xcfeae9,
"Full Of Life":0xde5f2f,
"Full Yellow":0xf9bc4f,
"Fully Purple":0x514c7e,
"Fulvous":0xe48400,
"Fun and Games":0x33789c,
"Fun Blue":0x335083,
"Fun Green":0x15633d,
"Fun Yellow":0xf7e594,
"Funchal Yellow":0xb6884d,
"Functional Blue":0x3f6086,
"Functional Gray":0xaba39a,
"Fundy Bay":0xcdd2c9,
"Fungal Hallucinations":0xcc00dd,
"Fungi":0x8f8177,
"Funhouse":0xf3d9dc,
"Funk":0x3ea380,
"Funki Porcini":0xee9999,
"Funkie Friday":0x4a3c4a,
"Funky Frog":0x98bd3c,
"Funky Yellow":0xedd26f,
"Funnel Cloud":0x113366,
"Funny Face":0xedc8ce,
"Furious Frog":0x55ee00,
"Furious Fuchsia":0xee2277,
"Furious Red":0xff1100,
"Furnace":0xdd4124,
"Furry Lady":0xf5efeb,
"Furry Lion":0xf09338,
"Fury":0xff0011,
"Fuschia Flair":0xa44769,
"Fuscia Fizz":0xb56e91,
"Fuscous Grey":0x54534d,
"Fusilli":0xf1e8d6,
"Fusion":0xb0ae26,
"Fusion Coral":0xff8576,
"Fusion Red":0xff6163,
"Fussy Pink":0xe6a3b9,
"Futaai Indigo":0x614e6e,
"Futon":0xedf6db,
"Future":0x15abbe,
"Future Hair":0x20b562,
"Future Vision":0xbcb6bc,
"Futuristic":0x998da8,
"Fuzzy Duckling":0xffea70,
"Fuzzy Navel":0xffd69f,
"Fuzzy Peach":0xffbb8f,
"Fuzzy Sheep":0xf0e9d1,
"Fuzzy Unicorn":0xeae3db,
"Fuzzy Wuzzy":0xcc6666,
"Fuzzy Wuzzy Brown":0xc45655,
"Fynbos Leaf":0xaeb1ac,
"Gable Green":0x2c4641,
"Gaboon Viper":0x8c6450,
"Gabriel's Light":0xdacca8,
"Gabriel's Torch":0xf8e6c6,
"Gadabout":0xffc4ae,
"Gaelic Garden":0xa5b3ab,
"Gaharā Lāl":0xac0c20,
"Gaia":0xd3bc9e,
"Gaiety":0xf4e4e5,
"Gainsboro":0xdcdcdc,
"Gala Ball":0x785d7a,
"Gala Pink":0xb04b63,
"Galactic Civilization":0x442288,
"Galactic Highway":0x3311bb,
"Galactic Mediator":0xe0dfdb,
"Galactic Tint":0xc0c4c6,
"Galactic Wonder":0x442255,
"Galactica":0xc4dde2,
"Galago":0x95a69f,
"Galah":0xd28083,
"Galapagos":0x085f6d,
"Galapagos Green":0x29685f,
"Galaxy Blue":0x2a4b7c,
"Galaxy Green":0x79afad,
"Gale Force":0x35454e,
"Gale of the Wind":0x007844,
"Galenite Blue":0x374b52,
"Gallant Gold":0xa4763c,
"Gallant Green":0x99aa66,
"Galleon Blue":0x3f95bf,
"Gallery":0xdcd7d1,
"Gallery Blue":0x9bbce4,
"Gallery Green":0x88a385,
"Gallery Grey":0xc5c2be,
"Gallery Red":0x935a59,
"Gallery Taupe":0xd0c5b8,
"Gallery White":0xeaebe4,
"Galley Gold":0xd5aa5e,
"Galliano":0xd8a723,
"Gallstone Yellow":0xa36629,
"Galveston Tan":0xe8c8b8,
"Galway":0xc4ddbb,
"Galway Bay":0x95a7a4,
"Gamboge":0xe49b0f,
"Gamboge Brown":0x996600,
"Gamboge Yellow":0xe6d058,
"Gambol Gold":0xe1b047,
"Game Over":0x7e8181,
"Gameboy Contrast":0x0f380f,
"Gameboy Light":0x9bbc0f,
"Gameboy Screen":0x8bac0f,
"Gameboy Shade":0x306230,
"Gamin":0xbfd1af,
"Gǎn Lǎn Huáng Olive":0xc9ff27,
"Gǎn Lǎn Lǜ Green":0x658b38,
"Ganache":0x34292a,
"Gangsters Gold":0xffdd22,
"Ganon Blue":0xa4e4fc,
"Ganymede":0x8b7d82,
"Garbanzo Bean":0xf1d5a5,
"Garbanzo Paste":0xeec684,
"Garden Aroma":0x9c6989,
"Garden Country":0xd5c5a8,
"Garden Cucumber":0x506a48,
"Garden Dawn":0xf1f8ec,
"Garden Fairy":0xccd4ec,
"Garden Flower":0xa892a8,
"Garden Fountain":0x729588,
"Garden Gate":0xdadcc1,
"Garden Gazebo":0xabc0bb,
"Garden Glade":0xdcd8a8,
"Garden Glory":0xffc1d0,
"Garden Glow":0x7dcc98,
"Garden Gnome Red":0x9b2002,
"Garden Goddess":0x99cea0,
"Garden Green":0x495e35,
"Garden Greenery":0x658369,
"Garden Grove":0x5e7f57,
"Garden Hedge":0x6f7d6d,
"Garden Lattice":0xe1d4b4,
"Garden Lettuce Green":0x87762b,
"Garden Medley":0x28a873,
"Garden of Eden":0x7fa771,
"Garden Pansy":0xa890b8,
"Garden Party":0xe3a4b8,
"Garden Path":0x424330,
"Garden Pebble":0xe4e4d5,
"Garden Picket":0xe4d195,
"Garden Plum":0x9d8292,
"Garden Pond":0xafc09e,
"Garden Promenade":0xa4a99b,
"Garden Room":0xaccfa9,
"Garden Rose White":0xf7ead4,
"Garden Salt Green":0xa18b62,
"Garden Seat":0xebe6c7,
"Garden Shadow":0x334400,
"Garden Shed":0xd6efda,
"Garden Snail":0xcdb1ab,
"Garden Spot":0xb1ca95,
"Garden Sprout":0xab863a,
"Garden Statue":0xbfd4c4,
"Garden Stroll":0x7dc683,
"Garden Swing":0x8cbd97,
"Garden Topiary":0x3e524b,
"Garden Twilight":0xa3bbb3,
"Garden View":0x89b89a,
"Garden Vista":0x9fb1ab,
"Garden Wall":0xaea492,
"Garden Weed":0x786e38,
"Gardener Green":0x5e602a,
"Gardener's Soil":0x5c534d,
"Gardenia":0xf1e8df,
"Gardening":0xacba8d,
"Gardens Sericourt":0x337700,
"Garfield":0xa75429,
"Gargantua":0xeeee55,
"Gargoyle":0xabb39e,
"Gargoyle Gas":0xffdf46,
"Garish Blue":0x00a4b1,
"Garish Green":0x51bf8a,
"Garland":0x69887b,
"Garlic Beige":0xb0aaa1,
"Garlic Clove":0xe2d7c1,
"Garlic Pesto":0xbfcf00,
"Garlic Suede":0xcdd2bc,
"Garlic Toast":0xdddd88,
"Garnet":0x733635,
"Garnet Black Green":0x354a41,
"Garnet Evening":0x763b42,
"Garnet Rose":0xac4b55,
"Garnet Sand":0xcc7446,
"Garnet Shadow":0xc89095,
"Garnet Stone Blue":0x384866,
"Garnish":0x1e9752,
"Garret Gray":0x756861,
"Garrison Grey":0x7b8588,
"Garuda Gold":0xffbb31,
"Gas Giant":0x98dcff,
"Gaslight":0xfeffea,
"Gates of Gold":0xd2935d,
"Gateway Gray":0xb2ac9c,
"Gateway Grey":0xa0a09c,
"Gathering Field":0xab8f55,
"Gathering Place":0xad9466,
"Gatsby Brick":0x8e3b2f,
"Gatsby Glitter":0xeed683,
"Gauntlet Gray":0x78736e,
"Gauss Blaster Green":0x84c3aa,
"Gauzy White":0xe3dbd4,
"Gazebo Green":0x76826c,
"Gazebo Grey":0xd1d0cb,
"Gazelle":0x947e68,
"Gazpacho":0xc23b22,
"Gecko":0x9d913c,
"<NAME>":0x7f5f00,
"Gedney Green":0x40534e,
"Geebung":0xc5832e,
"Gehenna's Gold":0xdba674,
"Gellibrand":0xb5acb2,
"Gem":0x4d5b8a,
"Gem Silica":0x73c4a4,
"Gem Turquoise":0x53c2c3,
"Gemstone Blue":0x004f6d,
"Gemstone Green":0x4b6331,
"Generic Viridian":0x007f66,
"Genestealer Purple":0x7761ab,
"Genetic Code":0x18515d,
"Geneva Green":0x1f7f76,
"Geneva Morn":0xbab7b8,
"Genever Green":0x33673f,
"Genevieve":0xbcc4e0,
"Gengiana":0x5f4871,
"Genie":0x3e4364,
"Genoa":0x31796d,
"Genoa Lemon":0xfde910,
"Genteel Blue":0x698eb3,
"Genteel Lavender":0xe2e6ec,
"Gentian":0x9079ad,
"Gentian Blue":0x312297,
"Gentian Flower":0x3366ff,
"Gentian Violet":0x544275,
"Gentle Aquamarine":0x97cbd2,
"Gentle Blue":0xcdd2de,
"Gentle Calm":0xc4cebf,
"Gentle Caress":0xfcd7ba,
"Gentle Cold":0xc3ece9,
"Gentle Doe":0xe8b793,
"Gentle Frost":0xdce0cd,
"Gentle Giant":0xb3ebe0,
"Gentle Glow":0xf6e5b9,
"Gentle Grape":0x908a9b,
"Gentle Mauve":0x958c9e,
"Gentle Rain":0xcbc9c5,
"Gentle Sea":0xb0c8d0,
"Gentle Sky":0x99bdd2,
"Gentle Touch":0xe3d5b8,
"Gentle Yellow":0xfff5be,
"Gentleman's Suit":0xc1becd,
"Geode":0x4b3f69,
"Georgia Clay":0xb06144,
"Georgia On My Mind":0xfdd4c5,
"Georgia Peach":0xf97272,
"Georgian Bay":0x22657f,
"Georgian Leather":0xcf875e,
"Georgian Pink":0xc6b8b4,
"Georgian Revival Blue":0x5b8d9f,
"Georgian Yellow":0xd1974c,
"Geraldine":0xe77b75,
"Geranium":0xda3d58,
"Geranium Bud":0xcfa1c7,
"Geranium Leaf":0x90ac74,
"Geranium Pink":0xf6909d,
"Geranium Red":0xd76968,
"Gerbera Red":0xf6611a,
"German Camouflage Beige":0x9b8c7b,
"German Grey":0x53504e,
"German Hop":0x89ac27,
"German Liquorice":0x2e3749,
"German Mustard":0xcd7a00,
"Ger<NAME>well":0x0094c8,
"Germania":0xddc47e,
"Get Up and Go":0x1a9d49,
"Gettysburg Grey":0xc7c1b7,
"Geyser":0xc4d7cf,
"Geyser Basin":0xe3cab5,
"Geyser Steam":0xcbd0cf,
"Ghee Yellow":0xd8bc23,
"Ghost":0xc0bfc7,
"Ghost Grey":0x9c9b98,
"Ghost Pepper":0xc10102,
"Ghost Ship":0x887b6e,
"Ghost Town":0xbeb6a8,
"Ghost Whisperer":0xcbd1d0,
"Ghost White":0xf8f8ff,
"Ghost Writer":0xbcb7ad,
"Ghosted":0xe2e0dc,
"Ghosting":0xcac6ba,
"Ghostlands Coal":0x113c42,
"Ghostly":0xa7a09f,
"Ghostly Green":0xd9d7b8,
"Ghostly Grey":0xccccd3,
"Ghostly Purple":0x7b5d92,
"Ghostwaver":0xe2dbdb,
"Ghoul":0x667744,
"Giant Cactus Green":0x88763f,
"Giant Onion":0x665d9e,
"Giant's Club":0xb05c52,
"Giants Orange":0xfe5a1d,
"Gibraltar":0x626970,
"Gibraltar Grey":0x6f6a68,
"Gibraltar Sea":0x123850,
"Gigas":0x564786,
"Giggle":0xeff0d3,
"Gilded":0xf4db4f,
"Gilded Beige":0xb39f8d,
"Gilded Glamour":0x956841,
"Gilded Leaves":0xeba13c,
"Gilded Pear":0xc09e6c,
"Gilneas Grey":0x6c8396,
"Gimblet":0xb9ad61,
"Gin":0xd9dfcd,
"Gin Fizz":0xf8eaca,
"Gin Tonic":0xecebe5,
"Ginger":0xb06500,
"Ginger Ale":0xc9a86a,
"Ginger Ale Fizz":0xf5dfbc,
"Ginger Beer":0xc27f38,
"Ginger Cream":0xefe0d7,
"Ginger Crunch":0xceaa64,
"Ginger Dough":0xb06d3b,
"Ginger Dy":0x97653c,
"Ginger Flower":0xcf524e,
"Ginger Grey Yellow":0xb8a899,
"Ginger Jar":0xc6a05e,
"Ginger Lemon Tea":0xffffaa,
"Ginger Milk":0xf7a454,
"Ginger Peach":0xf9d09f,
"Ginger Pie":0x9a7d61,
"Ginger Root":0xc17444,
"Ginger Rose":0xbe8774,
"Ginger Shortbread":0xe3cec6,
"Ginger Snap":0x977d70,
"Ginger Spice":0xb65d48,
"Ginger Sugar":0xdddace,
"Ginger Tea":0xb19d77,
"Ginger Whisper":0xcc8877,
"Gingerbread":0x8c4a2f,
"Gingerbread Crumble":0x9c5e33,
"Gingerbread House":0xca994e,
"Gingerbread Latte":0xb39479,
"Gingerline":0xffdd11,
"Gingersnap":0xc79e73,
"Gingery":0xb06c3e,
"Gingko":0xa3c899,
"Gingko Tree":0x918260,
"Ginkgo Green":0xa5aca4,
"Ginnezumi":0x97867c,
"Ginninderra":0xb3d5c0,
"Ginseng Root":0xe6cdb5,
"Ginshu":0xbc2d29,
"Gio Ponti Green":0xb3ceab,
"Giraffe":0xfefe33,
"Girl Power":0xd39bcb,
"Girl Talk":0xe4c7c8,
"Girlie":0xffd3cf,
"Girls Night Out":0xff69b4,
"Girly Nursery":0xf6e6e5,
"Give Me Your Love":0xee88ff,
"Givry":0xebd4ae,
"Gizmo":0xd4a1b5,
"Glacial Green":0x6fb7a8,
"Glacial Ice":0xeae9e7,
"Glacial Stream":0xbcd8e2,
"Glacial Tint":0xeaf2ed,
"Glacial Water Green":0xc9ead4,
"Glacier":0x78b1bf,
"Glacier Bay":0xdef2ee,
"Glacier Blue":0xa9c1c0,
"Glacier Green":0x3e9eac,
"Glacier Grey":0xc5c6c7,
"Glacier Ivy":0xeaf3e6,
"Glacier Lake":0x62b4c0,
"Glacier Pearl":0xd1d2dc,
"Glacier Point":0xb3d8e5,
"Glacier Valley":0xe2e3d7,
"Glad Yellow":0xf5e1ac,
"Glade":0x9ca687,
"Glade Green":0x5f8151,
"Gladeye":0x7a8ca6,
"Gladiator Grey":0x6e6c5e,
"Gladiator Leather":0xa95c3e,
"Gladiola":0xd54f43,
"Gladiola Blue":0x6370b6,
"Gladiola Violet":0x6e5178,
"Glam":0xcf748c,
"Glamorgan Sausage":0xdacba7,
"Glamorous":0xb74e64,
"Glamorous White":0xf0eae0,
"Glamour":0xdb9da7,
"Glamour Pink":0xff1dcd,
"Glamour White":0xfffcec,
"Glasgow Fog":0xbdb8ae,
"Glass Bead":0xc7bec4,
"Glass Bottle":0x93ba59,
"Glass Bull":0x880000,
"Glass Green":0xdcdfb0,
"Glass Jar Blue":0x20b2aa,
"Glass Of Milk":0xfcf3dd,
"Glass Sand":0xcdb69b,
"Glass Sapphire":0x587b9b,
"Glass Sea":0x095d75,
"Glass Tile":0xcdd0c0,
"Glass Violet":0xb7a2cc,
"Glassine":0xd7e2e5,
"Glaucous":0x6082b6,
"Glaze White":0xeae1df,
"Glazed Carrot":0xe9692c,
"Glazed Chestnut":0x967217,
"Glazed Ginger":0x91552b,
"Glazed Granite":0x5b5e61,
"Glazed Pears":0xefe3d2,
"Glazed Pecan":0xd19564,
"Glazed Persimmon":0xd34e36,
"Glazed Pot":0xad7356,
"Glazed Raspberry":0xa44b62,
"Glazed Ringlet":0x89626d,
"Glazed Sugar":0xffdccc,
"Gleam":0xbfd1ad,
"Gleaming Shells":0xf8ded1,
"Gleeful":0x9dbb7d,
"Glen":0x4aac72,
"Glen Falls":0xacb8c1,
"Glendale":0xa1bb8b,
"Glenwood Green":0xa7d3b7,
"Glide Time":0x5d6f80,
"Glimmer":0xe1e8e3,
"Glimpse":0x4fb9ce,
"Glimpse into Space":0x121210,
"Glimpse of Pink":0xfff3f4,
"Glimpse of Void":0x335588,
"Glisten Green":0xf2efdc,
"Glisten Yellow":0xf5e6ac,
"Glistening":0xeed288,
"Glistening Grey":0xb1b3be,
"Glitch":0x2c5463,
"Glitchy Shader Blue":0x99ffff,
"Glitter":0xe6e8fa,
"Glitter is not Gold":0xfedc57,
"Glitter Lake":0x44bbff,
"Glitter Shower":0x88ffff,
"Glitter Yellow":0xf8d75a,
"Glitterati":0x944a63,
"Glittering Gemstone":0xdec0e2,
"Glittering Sun":0xd3ad77,
"Glittery Glow":0xeeeddb,
"Glittery Yellow":0xf9eecd,
"Glitz and Glamour":0x965f73,
"Glitzy Gold":0xd6a02b,
"Glitzy Red":0xaf413b,
"Global Green":0x696e51,
"Global Warming":0xf1d7d3,
"Globe Artichoke":0x5f6c3c,
"Globe Thistle":0x2e0329,
"Globe Thistle Grey Rose":0x998d8d,
"Gloomy Blue":0x3c416a,
"Gloomy Purple":0x8756e4,
"Gloomy Sea":0x4a657a,
"Glorious Gold":0xcba956,
"Glorious Green Glitter":0xaaee11,
"Glossy Black":0x110011,
"Glossy Gold":0xffdd77,
"Glossy Grape":0xab92b3,
"Glossy Kiss":0xeee3de,
"Glossy Olive":0x636340,
"Glow":0xf9f2da,
"Glow in the Dark":0xbefdb7,
"Glow Pink":0xd8979e,
"Glow Worm":0xbed565,
"Glowing Brake Disc":0xee4444,
"Glowing Coals":0xbc4d39,
"Glowing Firelight":0xaf5941,
"Glowing Lantern":0xfbb736,
"Glowing Meteor":0xee4400,
"Glowing Scarlet":0xbd4649,
"Glowlight":0xfff6b9,
"Gloxinia":0x622e5a,
"Gluon Grey":0x1a1b1c,
"Gluten":0xddcc66,
"Gnarls Green":0x00754b,
"Gnocchi Beige":0xffeebb,
"Gnome":0x81a19b,
"Gnome Green":0xc4bc84,
"Gnu Tan":0xb09f84,
"Go Alpha":0x007f87,
"Go Bananas":0xf7ca50,
"Go Ben":0x786e4c,
"Go Go Glow":0xfcecd5,
"Go Go Green":0x008a7d,
"Go Go Lime":0xc6be6b,
"Go Go Mango":0xfeb87e,
"Go Go Pink":0xfdd8d4,
"Go Green!":0x00ab66,
"Go To Grey":0xdcd8d7,
"Goat":0xa89a91,
"Gobelin Mauve":0x5e5a6a,
"Gobi Desert":0xcdbba2,
"Gobi Sand":0xd4aa6f,
"Gobi Tan":0xbba587,
"Goblin":0x34533d,
"Goblin Blue":0x5f7278,
"Goblin Eyes":0xeb8931,
"Goblin Green":0x76ff7a,
"Goblin Warboss":0x4efd54,
"Gobo Brown":0x635147,
"Gochujang Red":0x770000,
"God | |
<reponame>CSMMLab/neuralEntropyClosures<filename>callNeuralClosure.py<gh_stars>1-10
'''
This is the script that gets called from the C++ KiT-RT method MLOptimizer.cpp
It initializes and loads a neural Closure
The call method performs a prediction
Author: <NAME>
Version: 0.0
Date 29.10.2020
'''
### imports ###
# internal modules
from src.networks.configmodel import init_neural_closure
from src import utils
# python modules
import tensorflow as tf
import os
from optparse import OptionParser
import time
import statistics
import numpy as np
import matplotlib.pyplot as plt
### global variable ###
# neuralClosureModel = 0 # bm.initNeuralClosure(0,0)
### function definitions ###
def initModelCpp(input):
'''
input: string array consisting of [modelNumber,maxDegree_N, folderName]
modelNumber : Defines the used network model, i.e. MK1, MK2...
maxDegree_N : Defines the maximal Degree of the moment basis, i.e. the "N" of "M_N"
folderName: Path to the folder containing the neural network model
'''
print("|-------------------- Tensorflow initialization Log ------------------")
print("|")
modelNumber = input[0]
maxDegree_N = input[1]
# --- Transcribe the modelNumber and MaxDegree to the correct model folder --- #
folderName = "neuralClosure_M" + str(maxDegree_N) + "_MK" + str(modelNumber)
global neuralClosureModel
neuralClosureModel = init_neural_closure(modelNumber, maxDegree_N, folderName)
neuralClosureModel.load_model()
neuralClosureModel.model.summary()
print("|")
print("| Tensorflow neural closure initialized.")
print("|")
return 0
### function definitions ###
def init_model(network_mk: int = 1, polynomial_degree: int = 0, spatial_dim: int = 3, folder_name: str = "testFolder",
loss_combination: int = 0, width: int = 10, depth: int = 5, normalized: bool = False,
input_decorrelation: bool = False, scale_active: bool = True):
'''
modelNumber : Defines the used network model, i.e. MK1, MK2...
maxDegree_N : Defines the maximal Degree of the moment basis, i.e. the "N" of "M_N"
'''
global neuralClosureModel
neuralClosureModel = init_neural_closure(network_mk=network_mk, poly_degree=polynomial_degree,
spatial_dim=spatial_dim,
folder_name=folder_name, loss_combination=loss_combination, nw_depth=depth,
nw_width=width, normalized=normalized,
input_decorrelation=input_decorrelation, scale_active=scale_active)
return 0
def call_network(input):
'''
# Input: input.shape = (nCells,nMaxMoment), nMaxMoment = 9 in case of MK3
# Output: Gradient of the network wrt input
'''
# predictions = neuralClosureModel.model.predict(input)
x_model = tf.Variable(input)
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = neuralClosureModel.model(x_model, training=False) # same as neuralClosureModel.model.predict(x)
gradients = tape.gradient(predictions, x_model)
return gradients
def call_network_batchwise(network_input):
# Transform npArray to tfEagerTensor
x_model = tf.Variable(network_input)
# Compute Autodiff tape
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = neuralClosureModel.model(x_model, training=False) # same as model.predict(x)
# Compute the gradients
gradients = tape.gradient(predictions, x_model)
# ---- Convert gradients from eagerTensor to numpy array and then to flattened c array ----
# Note: Use inputNetwork as array, since a newly generated npArray seems to cause a Segfault in cpp
(dimCell, dimBase) = network_input.shape
for i in range(0, dimCell):
for j in range(0, dimBase):
network_input[i, j] = gradients[i, j]
return network_input
def main():
print("---------- Start Network Training Suite ------------")
print("Parsing options")
# --- parse options ---
parser = OptionParser()
parser.add_option("-a", "--sampling", dest="sampling", default=0,
help="uses data sampled in alpha:\n 0: uniform in u\n 1: uniform in alpha\n 2: gaussian in alpha",
metavar="SAMPLING")
parser.add_option("-b", "--batch", dest="batch", default=128,
help="batch size", metavar="BATCH")
parser.add_option("-c", "--curriculum", dest="curriculum", default=1,
help="training curriculum", metavar="CURRICULUM")
parser.add_option("-d", "--degree", dest="degree", default=0,
help="max degree of moment", metavar="DEGREE")
parser.add_option("-e", "--epoch", dest="epoch", default=1000,
help="epoch count for neural network", metavar="EPOCH")
parser.add_option("-f", "--folder", dest="folder", default="testFolder",
help="folder where the model is stored", metavar="FOLDER")
parser.add_option("-g", "--scaledOutput", dest="scaledOutput", default="0",
help="train on scaled entropy values", metavar="SCALEDOUTPUT")
parser.add_option("-i", "--decorrInput", dest="decorrInput", default="0",
help="train normalized and decorrelated input moments", metavar="SCALEDINPUT")
parser.add_option("-l", "--loadModel", dest="loadmodel", default=1,
help="load model weights from file", metavar="LOADING")
parser.add_option("-m", "--model", dest="model", default=11,
help="choice of network model", metavar="MODEL")
parser.add_option("-n", "--normalized", dest="normalized", default=0,
help="train on normalized moments", metavar="NORMALIZED")
parser.add_option("-o", "--objective", dest="objective", default=0,
help="choice of loss functions:\n 0=[h]\n 1 =[h,alpha]\n 2=[h,alpha,u]\n3=[rel_entropy_h]",
metavar="OBJECTIVE")
parser.add_option("-p", "--processingmode", dest="processingmode", default=1,
help="gpu mode (1). cpu mode (0) ", metavar="PROCESSINGMODE")
parser.add_option("-s", "--spatialDimension", dest="spatialDimension", default=3,
help="spatial dimension of closure", metavar="SPATIALDIM")
parser.add_option("-t", "--training", dest="training", default=1,
help="execution mode (0) training mode (1) analysis mode (2) re-save mode (3)",
metavar="TRAINING")
parser.add_option("-v", "--verbosity", dest="verbosity", default=1,
help="output verbosity keras (0 or 1)", metavar="VERBOSITY")
parser.add_option("-w", "--networkwidth", dest="networkwidth", default=10,
help="width of each network layer", metavar="WIDTH")
parser.add_option("-x", "--networkdepth", dest="networkdepth", default=5,
help="height of the network", metavar="HEIGHT")
(options, args) = parser.parse_args()
options.objective = int(options.objective)
options.sampling = int(options.sampling)
options.degree = int(options.degree)
options.spatial_dimension = int(options.spatialDimension)
options.model = int(options.model)
options.epoch = int(options.epoch)
options.curriculum = int(options.curriculum)
options.batch = int(options.batch)
options.verbosity = int(options.verbosity)
options.scaledOutput = bool(int(options.scaledOutput))
options.decorrInput = bool(int(options.decorrInput))
options.loadmodel = int(options.loadmodel)
options.training = int(options.training)
options.processingmode = int(options.processingmode)
options.normalized = bool(int(options.normalized))
options.networkwidth = int(options.networkwidth)
options.networkdepth = int(options.networkdepth)
# --- End Option Parsing ---
# witch to CPU mode, if wished
if options.processingmode == 0:
# Set CPU as available physical device
# Set CPU as available physical device
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if tf.test.gpu_device_name():
print('GPU found. Using GPU')
else:
print("Disabled GPU. Using CPU")
# --- initialize model framework
print("Initialize model")
init_model(network_mk=options.model, polynomial_degree=options.degree, spatial_dim=options.spatial_dimension,
folder_name=options.folder, normalized=options.normalized, loss_combination=options.objective,
width=options.networkwidth, depth=options.networkdepth, input_decorrelation=options.decorrInput,
scale_active=options.scaledOutput)
# --- load model data before creating model (important for data scaling)
if options.training == 1:
# create training Data
# Save options and runscript to file (only for training)
utils.write_config_file(options, neuralClosureModel)
neuralClosureModel.load_training_data(shuffle_mode=True, sampling=options.sampling,
normalized_data=neuralClosureModel.normalized, train_mode=True)
# create model after loading training data to get correct scaling in
if options.loadmodel == 1 or options.training == 0 or options.training == 2 or options.training == 5:
neuralClosureModel.load_model() # also creates model
# preprocess training data. Compute scalings
neuralClosureModel.training_data_preprocessing(scaled_output=options.scaledOutput,
model_loaded=options.loadmodel)
else:
print("Start training with new weights")
# preprocess training data. Compute scalings
neuralClosureModel.training_data_preprocessing(scaled_output=options.scaledOutput,
model_loaded=options.loadmodel)
neuralClosureModel.create_model()
# neuralClosureModel.model.summary()
if options.training == 1:
# train model
neuralClosureModel.config_start_training(val_split=0.1, epoch_count=options.epoch,
curriculum=options.curriculum,
batch_size=options.batch, verbosity=options.verbosity,
processing_mode=options.processingmode)
# save model
neuralClosureModel.save_model()
elif options.training == 2:
print("Analysis mode entered.")
print("Evaluate Model on normalized data...")
neuralClosureModel.load_training_data(shuffle_mode=False, load_all=True, normalized_data=True,
scaled_output=options.scaledOutput, train_mode=False)
[u, alpha, h] = neuralClosureModel.get_training_data()
neuralClosureModel.evaluate_model_normalized(u, alpha, h)
print("Evaluate Model on non-normalized data...")
neuralClosureModel.load_training_data(shuffle_mode=False, load_all=True, normalized_data=False,
train_mode=False)
[u, alpha, h] = neuralClosureModel.get_training_data()
neuralClosureModel.evaluate_model(u, alpha, h)
elif options.training == 3:
print(
"Re-Save mode entered.") # if training was not finished, models are not safed to .pb. this can be done here
neuralClosureModel.load_training_data(shuffle_mode=False,
sampling=options.sampling,
normalized_data=neuralClosureModel.normalized,
train_mode=False)
# normalize data (experimental)
# neuralClosureModel.normalizeData()
# train model
neuralClosureModel.model(neuralClosureModel.training_data[0])
# save model
neuralClosureModel.save_model()
elif options.training == 4:
# timing measurement
# startup
u_in = tf.zeros([2, neuralClosureModel.input_dim], tf.float32)
[u, alpha, h] = neuralClosureModel.model(u_in)
u_in = tf.ones([1000000, neuralClosureModel.input_dim], tf.float32)
# u_tf = tf.constant(u_in)
totduration = 0
durations = []
for i in range(0, 100):
print("Start computation")
start = time.perf_counter()
[u, alpha, h] = neuralClosureModel.model(u_in)
end = time.perf_counter()
totduration += end - start
durations.append(end - start)
print("Model executed. Elapsed time: " + str(end - start) + " in iteration " + str(i) + ".")
avg = totduration / 100
print("Average duration: " + str(avg) + " seconds")
stddev = statistics.stdev(durations)
print("Standard deviation:" + str(stddev) + "")
elif options.training == 5: # print weights mode
all_layers = neuralClosureModel.model.trainable_weights
layer_list = []
count = 0
for layer in all_layers:
t = layer
# print(t)
tn = t.numpy().flatten()
layer_list.append(tn)
print(layer.shape)
print("max weight: " + str(np.max(tn)) + " min weight: " + str(np.min(tn)))
# hist, bin_edges = np.histogram(tn, bins=10, density=True)
plt.hist(tn, density=True) # arguments are passed to np.histogram
name = layer.name
name = name.replace(':', '')
name = name.replace('/', '_')
plt.title("Histogram of weights in layer " + name)
# Text(0.5, 1.0, "Histogram with 'auto' bins")
plt.savefig(neuralClosureModel.folder_name + "/" + name + ".png")
# plt.show()
plt.clf()
# if "nn_component" in name:
# tn_sm = tf.nn.relu(tn)
# print(max(tn_sm))
# print(min(tn_sm))
# plt.hist(tn_sm, density=True)
# name = name + "_relu"
# plt.title("Histogram of weights in layer " + name)
# plt.savefig(neuralClosureModel.folder_name + "/" + name + ".png")
# plt.clf()
count += 1
# print non trainable weights
all_layers_nt = neuralClosureModel.model.non_trainable_weights
layer_list = []
count = 0
for layer in all_layers_nt:
t = layer
# print(t)
tn = t.numpy().flatten()
layer_list.append(tn)
print(layer.shape)
print("max weight: " + str(np.max(tn)) + " min weight: " + str(np.min(tn)))
# hist, bin_edges = np.histogram(tn, bins=10, density=True)
plt.hist(tn, density=True) # arguments are passed to np.histogram
name = layer.name
name = name.replace(':', '')
name = name.replace('/', '_')
plt.title("Histogram of weights in layer " + name)
# Text(0.5, 1.0, "Histogram with 'auto' bins")
plt.savefig(neuralClosureModel.folder_name + "/" + name + ".png")
plt.clf()
count += 1
if | |
from __future__ import annotations
import re
import pytz
from textwrap import TextWrapper
from typing import Callable
from unicodedata import normalize
from datetime import datetime
from random import uniform
from arrow import Arrow
from math import ceil, floor
from py_reportit.crawler.post_processors import abstract_pp
from py_reportit.shared.model.answer_meta import ReportAnswerMeta
from py_reportit.shared.model.answer_meta_tweet import AnswerMetaTweet
from py_reportit.shared.model.report_answer import ReportAnswer
from py_reportit.shared.model.meta_tweet import MetaTweet
from py_reportit.shared.model.meta import Meta
from py_reportit.shared.model.report import Report
def extract_ids(reports: list[Report]) -> list[int]:
return list(map(lambda report: report.id, reports))
def filter_reports_by_state(reports: list[Report], finished: bool) -> list[Report]:
return list(filter(lambda report: report.status == 'finished' if finished else 'accepted', reports))
def truncate_float(f: float, decimals: int) -> float:
return int(f*10**decimals)/10**decimals
def reports_are_roughly_equal_by_position(r1: Report, r2: Report, decimals: int) -> bool:
return positions_are_rougly_equal(r1.latitude, r1.longitude, r2.latitude, r2.longitude, decimals)
def positions_are_rougly_equal(lat1: float or str, lon1: float or str, lat2: float or str, lon2: float or str, decimals: int) -> bool:
lats_are_equal = truncate_float(float(lat1), decimals) == truncate_float(float(lat2), decimals)
lons_are_equal = truncate_float(float(lon1), decimals) == truncate_float(float(lon2), decimals)
return lats_are_equal and lons_are_equal
def get_last_tweet_id(report: Report) -> str:
if report.answers and len(report.answers):
all_answers: list[ReportAnswer] = report.answers
all_answers_with_tweet_ids = list(filter(lambda answer: answer.meta.tweet_ids and len(answer.meta.tweet_ids), all_answers))
if len(all_answers_with_tweet_ids):
newest_answer = max(all_answers_with_tweet_ids, key=lambda answer: answer.order)
answer_meta: ReportAnswerMeta = newest_answer.meta
tweet_ids: list[AnswerMetaTweet] = answer_meta.tweet_ids
partial_closure_tweet_ids = list(filter(lambda tweet_id: tweet_id.type == "partial_closure", tweet_ids))
if len(partial_closure_tweet_ids):
return max(partial_closure_tweet_ids, key=lambda tweet_id: tweet_id.order).tweet_id
return max(tweet_ids, key=lambda tweet_id: tweet_id.order).tweet_id
report_meta: Meta = report.meta
if report_meta.tweet_ids:
tweet_ids: list[MetaTweet] = report_meta.tweet_ids
follow_tweet_id = next(filter(lambda tweet_id: tweet_id.type == "follow", tweet_ids), False)
if follow_tweet_id:
return follow_tweet_id.tweet_id
return max(tweet_ids, key=lambda tweet_id: tweet_id.order).tweet_id
return None
def generate_random_times_between(start: Arrow, end: Arrow, amount: int) -> list[Arrow]:
start_timestamp = start.timestamp()
end_timestamp = end.timestamp()
return sorted([Arrow.fromtimestamp(uniform(start_timestamp, end_timestamp)) for _ in range(amount)])
def to_utc(dtime: datetime) -> datetime:
lu_tz = pytz.timezone('Europe/Luxembourg')
lu_dt = lu_tz.localize(dtime)
return lu_dt.astimezone(pytz.UTC)
def string_to_crontab_kwargs(crontab_str: str) -> dict:
ordered_celery_crontab_kwargs = ["minute", "hour", "day_of_month", "month_of_year", "day_of_week"]
crontab_str_split = crontab_str.split(" ")
if len(crontab_str_split) != 5:
raise CrontabParseException("Crontab string does not have expected number of arguments")
return dict(zip(ordered_celery_crontab_kwargs, crontab_str_split))
def generate_time_graph(times: list[Arrow], interval_minutes: int = 5):
graph_symbols = [" ", "▁","▂","▃","▄","▅","▆","▇","█"]
first_timestamp = times[0].replace(second=0).shift(minutes=-(times[0].time().minute % interval_minutes))
last_timestamp = times[-1]
generated_intervals = [first_timestamp]
while generated_intervals[-1] < last_timestamp:
generated_intervals.append(generated_intervals[-1].shift(minutes=interval_minutes))
counts_per_interval = []
for index, intvl in enumerate(generated_intervals):
filter_func = lambda current_time: generated_intervals[index] <= current_time < generated_intervals[index].shift(minutes=interval_minutes)
current_time_is_in_interval = generated_intervals[index] <= Arrow.now() < generated_intervals[index].shift(minutes=interval_minutes)
first_interval_of_hour = intvl.time().minute < interval_minutes
matches = list(filter(filter_func,times))
if len(matches) or index != (len(generated_intervals) - 1):
counts_per_interval.append((first_interval_of_hour, len(matches), current_time_is_in_interval))
frame_top = "╭"
frame_bottom = "╰"
core_graph = "│"
for index, cnt in enumerate(counts_per_interval):
if index != 0 and cnt[0] == True:
core_graph += "│"
frame_top += "┬"
frame_bottom += "┴"
core_graph += graph_symbols[min(cnt[1], len(graph_symbols))]
if cnt[2]:
frame_top += "┬"
frame_bottom += "┴"
else:
frame_top += "─"
frame_bottom += "─"
core_graph += "│"
frame_top += "╮"
frame_bottom += "╯"
start_friendly = times[0].strftime("%H:%M:%S")
end_friendly = times[-1].strftime("%H:%M:%S")
eta = times[-1] - max(times[0], Arrow.now())
eta_friendly = f" {str(eta).split('.')[0]} left "
top_line_spacing = len(core_graph) - len(start_friendly) - len(end_friendly) - len(eta_friendly)
top_line = start_friendly + " " * ceil(top_line_spacing/2) + eta_friendly + " " * floor(top_line_spacing/2) + end_friendly
return top_line + "\n" + frame_top + "\n" + core_graph + "\n" + frame_bottom
class CrontabParseException(Exception):
pass
def filter_pp(pps: list[abstract_pp.PostProcessor], immediate_run: bool = False) -> list[abstract_pp.PostProcessor]:
return list(filter(lambda pp: pp.immediate_run == immediate_run, pps))
pretty_format_time: Callable[[datetime | Arrow], str] = lambda dtime: dtime.strftime("%Y/%m/%d %H:%M:%S")
# The following constants come from python-twitter
# https://github.com/bear/python-twitter/blob/master/twitter/twitter_utils.py
CHAR_RANGES = [
range(0, 4351),
range(8192, 8205),
range(8208, 8223),
range(8242, 8247)]
TLDS = [
"ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar",
"as", "at", "au", "aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg",
"bh", "bi", "bj", "bl", "bm", "bn", "bo", "bq", "br", "bs", "bt", "bv",
"bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl",
"cm", "cn", "co", "cr", "cu", "cv", "cw", "cx", "cy", "cz", "de", "dj",
"dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu",
"fi", "fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg",
"gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr", "gs", "gt", "gu", "gw",
"gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in",
"io", "iq", "ir", "is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh",
"ki", "km", "kn", "kp", "kr", "kw", "ky", "kz", "la", "lb", "lc", "li",
"lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mf",
"mg", "mh", "mk", "ml", "mm", "mn", "mo", "mp", "mq", "mr", "ms", "mt",
"mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph",
"pk", "pl", "pm", "pn", "pr", "ps", "pt", "pw", "py", "qa", "re", "ro",
"rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj",
"sk", "sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy",
"sz", "tc", "td", "tf", "tg", "th", "tj", "tk", "tl", "tm", "tn", "to",
"tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "um", "us", "uy",
"uz", "va", "vc", "ve", "vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt",
"za", "zm", "zw", "ελ", "бел", "мкд", "мон", "рф", "срб", "укр", "қаз",
"հայ", "الاردن", "الجزائر", "السعودية", "المغرب", "امارات", "ایران", "بھارت",
"تونس", "سودان", "سورية", "عراق", "عمان", "فلسطين", "قطر", "مصر",
"مليسيا", "پاکستان", "भारत", "বাংলা", "ভারত", "ਭਾਰਤ", "ભારત",
"இந்தியா", "இலங்கை", "சிங்கப்பூர்", "భారత్", "ලංකා", "ไทย",
"გე", "中国", "中國", "台湾", "台灣", "新加坡", "澳門", "香港", "한국", "neric:",
"abb", "abbott", "abogado", "academy", "accenture", "accountant",
"accountants", "aco", "active", "actor", "ads", "adult", "aeg", "aero",
"afl", "agency", "aig", "airforce", "airtel", "allfinanz", "alsace",
"amsterdam", "android", "apartments", "app", "aquarelle", "archi", "army",
"arpa", "asia", "associates", "attorney", "auction", "audio", "auto",
"autos", "axa", "azure", "band", "bank", "bar", "barcelona", "barclaycard",
"barclays", "bargains", "bauhaus", "bayern", "bbc", "bbva", "bcn", "beer",
"bentley", "berlin", "best", "bet", "bharti", "bible", "bid", "bike",
"bing", "bingo", "bio", "biz", "black", "blackfriday", "bloomberg", "blue",
"bmw", "bnl", "bnpparibas", "boats", "bond", "boo", "boots", "boutique",
"bradesco", "bridgestone", "broker", "brother", "brussels", "budapest",
"build", "builders", "business", "buzz", "bzh", "cab", "cafe", "cal",
"camera", "camp", "cancerresearch", "canon", "capetown", "capital",
"caravan", "cards", "care", "career", "careers", "cars", "cartier",
"casa", "cash", "casino", "cat", "catering", "cba", "cbn", "ceb", "center",
"ceo", "cern", "cfa", "cfd", "chanel", "channel", "chat", "cheap",
"chloe", "christmas", "chrome", "church", "cisco", "citic", "city",
"claims", "cleaning", "click", "clinic", "clothing", "cloud", "club",
"coach", "codes", "coffee", "college", "cologne", "com", "commbank",
"community", "company", "computer", "condos", "construction", "consulting",
"contractors", "cooking", "cool", "coop", "corsica", "country", "coupons",
"courses", "credit", "creditcard", "cricket", "crown", "crs", "cruises",
"cuisinella", "cymru", "cyou", "dabur", "dad", "dance", "date", "dating",
"datsun", "day", "dclk", "deals", "degree", "delivery", "delta",
"democrat", "dental", "dentist", "desi", "design", "dev", "diamonds",
"diet", "digital", "direct", "directory", "discount", "dnp", "docs",
"dog", "doha", "domains", "doosan", "download", "drive", "durban", "dvag",
"earth", "eat", "edu", "education", "email", "emerck", "energy",
"engineer", "engineering", "enterprises", "epson", "equipment", "erni",
"esq", "estate", "eurovision", "eus", "events", "everbank", "exchange",
"expert", "exposed", "express", "fage", "fail", "faith", "family", "fan",
"fans", "farm", "fashion", "feedback", "film", "finance", "financial",
"firmdale", "fish", "fishing", "fit", "fitness", "flights", "florist",
"flowers", "flsmidth", "fly", "foo", "football", "forex", "forsale",
"forum", "foundation", "frl", "frogans", "fund", "furniture", "futbol",
"fyi", "gal", "gallery", "game", "garden", "gbiz", "gdn", "gent",
"genting", "ggee", "gift", "gifts", "gives", "giving", "glass", "gle",
"global", "globo", "gmail", "gmo", "gmx", "gold", "goldpoint", "golf",
"goo", "goog", "google", "gop", "gov", "graphics", "gratis", "green",
"gripe", "group", "guge", "guide", "guitars", "guru", "hamburg", "hangout",
"haus", "healthcare", "help", "here", "hermes", "hiphop", "hitachi", "hiv",
"hockey", "holdings", "holiday", "homedepot", "homes", "honda", "horse",
"host", "hosting", "hoteles", "hotmail", "house", "how", "hsbc", "ibm",
"icbc", "ice", "icu", "ifm", "iinet", "immo", "immobilien", "industries",
"infiniti", "info", "ing", "ink", "institute", "insure", "int",
"international", "investments", "ipiranga", "irish", "ist", "istanbul",
"itau", "iwc", "java", "jcb", "jetzt", "jewelry", "jlc", "jll", "jobs",
"joburg", "jprs", "juegos", "kaufen", "kddi", "kim", "kitchen", "kiwi",
"koeln", "komatsu", "krd", "kred", "kyoto", "lacaixa", "lancaster", "land",
"lasalle", "lat", "latrobe", "law", "lawyer", "lds", "lease", "leclerc",
"legal", "lexus", "lgbt", "liaison", "lidl", "life", "lighting", "limited",
"limo", "link", "live", "lixil", "loan", "loans", "lol", "london", "lotte",
"lotto", "love", "ltda", "lupin", "luxe", "luxury", "madrid", "maif",
"maison", "man", "management", "mango", "market", "marketing", "markets",
"marriott", "mba", "media", "meet", "melbourne", "meme", "memorial", "men",
"menu", "miami", "microsoft", "mil", "mini", "mma", "mobi", "moda", "moe",
"mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow",
"motorcycles", "mov", "movie", "movistar", "mtn", "mtpc", "museum",
"nadex", "nagoya", "name", "navy", "nec", "net", "netbank", "network",
"neustar", "new", "news", "nexus", "ngo", "nhk", "nico", "ninja", "nissan",
"nokia", "nra", "nrw", "ntt", "nyc", "office", "okinawa", "omega", "one",
"ong", | |
<filename>analysis.py
"""
Functions used to save model data and to perform analysis
"""
import numpy as np
from parameters import *
from sklearn import svm
import time
import pickle
def analyze_model(trial_info, y_hat, h, syn_x, syn_u, model_performance, weights):
"""
Converts neuronal and synaptic values, stored in lists, into 3D arrays
Creating new variable since h, syn_x, and syn_u are class members of model.py,
and will get mofiied by functions within analysis.py
"""
syn_x_stacked = np.stack(syn_x, axis=1)
syn_u_stacked = np.stack(syn_u, axis=1)
h_stacked = np.stack(h, axis=1)
trial_time = np.arange(0,h_stacked.shape[1]*par['dt'], par['dt'])
num_reps = 100
if par['trial_type'] == 'dualDMS':
trial_info['rule'] = trial_info['rule'][:,0] + 2*trial_info['rule'][:,1]
par['num_rules'] = 4
"""
Calculate the neuronal and synaptic contributions towards solving the task
"""
print('simulating network...')
accuracy, accuracy_neural_shuffled, accuracy_syn_shuffled = \
simulate_network(trial_info, h_stacked, syn_x_stacked, syn_u_stacked, weights, num_reps = num_reps)
print('lesioning weights...')
"""
accuracy_rnn_start, accuracy_rnn_test, accuracy_out = lesion_weights(trial_info, \
h_stacked, syn_x_stacked, syn_u_stacked, weights)
"""
accuracy_rnn_start = []
accuracy_rnn_test = []
accuracy_out = []
"""
Calculate neuronal and synaptic sample motion tuning
"""
print('calculate tuning...')
neuronal_pref_dir, neuronal_pev, synaptic_pref_dir, synaptic_pev, neuronal_pev_test, neuronal_pref_dir_test, neuronal_sample_tuning \
= calculate_tuning(h_stacked, syn_x_stacked, syn_u_stacked, trial_info, trial_time, calculate_test = True)
"""
Decode the sample direction from neuronal activity and synaptic efficacies
using support vector machhines
"""
print('decoding activity...')
neuronal_decoding, synaptic_decoding = calculate_svms(h_stacked, syn_x_stacked, \
syn_u_stacked, trial_info, trial_time, num_reps = num_reps)
"""
Save the results
"""
results = {
'neuronal_decoding': neuronal_decoding,
'synaptic_decoding': synaptic_decoding,
'neuronal_pref_dir': neuronal_pref_dir,
'neuronal_pev': neuronal_pev,
'neuronal_sample_tuning': neuronal_sample_tuning,
'synaptic_pref_dir': synaptic_pref_dir,
'synaptic_pev': synaptic_pev,
'accuracy': accuracy,
'accuracy_neural_shuffled': accuracy_neural_shuffled,
'accuracy_syn_shuffled': accuracy_syn_shuffled,
'model_performance': model_performance,
'parameters': par,
'weights': weights,
'trial_time': trial_time,
'neuronal_pev_test': neuronal_pev_test,
'neuronal_pref_dir_test': neuronal_pref_dir_test,
'accuracy_rnn_start': accuracy_rnn_start,
'accuracy_rnn_test': accuracy_rnn_test,
'accuracy_out': accuracy_out}
save_fn = par['save_dir'] + par['save_fn']
pickle.dump(results, open(save_fn, 'wb') )
print('Analysis results saved in ', save_fn)
def calculate_svms(h, syn_x, syn_u, trial_info, trial_time, num_reps = 20):
"""
Calculates neuronal and synaptic decoding accuracies uisng support vector machines
sample is the index of the sample motion direction for each trial_length
rule is the rule index for each trial_length
"""
lin_clf = svm.SVC(C=1, kernel='linear', decision_function_shape='ovr', shrinking=False, tol=1e-4)
num_time_steps = len(trial_time)
neuronal_decoding = np.zeros((par['num_rules'], num_time_steps, num_reps))
synaptic_decoding = np.zeros((par['num_rules'], num_time_steps, num_reps))
"""
The synaptic efficacy is the product of syn_x and syn_u, will decode sample
direction from this value
"""
syn_efficacy = syn_x*syn_u
if par['trial_type'] == 'DMC':
"""
Will also calculate the category decoding accuracies, assuming the first half of
the sample direction belong to category 1, and the second half belong to category 2
"""
num_motion_dirs = len(np.unique(trial_info['sample']))
sample = np.floor(trial_info['sample']/(num_motion_dirs/2)*np.ones_like(trial_info['sample']))
rule = trial_info['rule']
elif par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
"""
For ABBA/ABCA trials, will only analyze trials for which the first n-1
test stimuli, out of n, are non-matches
"""
ind = np.where(np.sum(trial_info['match'][:,:-1],axis=1)==0)[0]
sample = trial_info['sample'][ind]
rule = trial_info['rule'][ind]
h = h[:,:,ind]
syn_efficacy = syn_efficacy[:,:,ind]
else:
sample = trial_info['sample']
rule = trial_info['rule']
# number of unique samples
N = len(np.unique(sample))
neuronal_decoding, synaptic_decoding = svm_wraper(lin_clf, h, syn_efficacy, sample, rule, num_reps, N, trial_time)
return neuronal_decoding, synaptic_decoding
def svm_wraper(lin_clf, h, syn_eff, conds, rule, num_reps, num_conds, trial_time):
"""
Wraper function used to decode sample direction from hidden activity (h)
and synaptic efficacies (syn_eff)
"""
train_pct = 0.75
trials_per_cond = 25
_, num_time_steps, num_trials = h.shape
score_h = np.zeros((par['num_rules'], par['num_receptive_fields'], num_reps, num_time_steps))
score_syn_eff = np.zeros((par['num_rules'], par['num_receptive_fields'], num_reps, num_time_steps))
for r in range(par['num_rules']):
ind_rule = np.where(rule==r)[0]
for rep in range(num_reps):
q = np.random.permutation(len(ind_rule))
i = int(np.round(len(ind_rule)*train_pct))
train_ind = ind_rule[q[:i]]
test_ind = ind_rule[q[i:]]
equal_train_ind = np.zeros((num_conds*trials_per_cond), dtype = np.uint16)
equal_test_ind = np.zeros((num_conds*trials_per_cond), dtype = np.uint16)
for n in range(par['num_receptive_fields']):
if par['trial_type'] == 'dualDMS':
current_conds = conds[:,n]
else:
current_conds = np.array(conds)
for c in range(num_conds):
u = range(c*trials_per_cond, (c+1)*trials_per_cond)
# training indices for current condition number
ind = np.where(current_conds[train_ind] == c)[0]
q = np.random.randint(len(ind), size = trials_per_cond)
equal_train_ind[u] = train_ind[ind[q]]
# testing indices for current condition number
ind = np.where(current_conds[test_ind] == c)[0]
#print(len(ind), trials_per_cond, n, c)
q = np.random.randint(len(ind), size = trials_per_cond)
equal_test_ind[u] = test_ind[ind[q]]
for t in range(num_time_steps):
if trial_time[t] <= par['dead_time']:
# no need to analyze activity during dead time
continue
score_h[r,n,rep,t] = calc_svm(lin_clf, h[:,t,:].T, current_conds, equal_train_ind, equal_test_ind)
score_syn_eff[r,n,rep,t] = calc_svm(lin_clf, syn_eff[:,t,:].T, current_conds, equal_train_ind, equal_test_ind)
return score_h, score_syn_eff
def calc_svm(lin_clf, y, conds, train_ind, test_ind):
# normalize values between 0 and 1
for i in range(y.shape[1]):
m1 = y[:,i].min()
m2 = y[:,i].max()
y[:,i] -= m1
if m2>m1:
if par['svm_normalize']:
y[:,i] /=(m2-m1)
lin_clf.fit(y[train_ind,:], conds[train_ind])
dec = lin_clf.predict(y[test_ind,:])
score = 0
for i in range(len(test_ind)):
if conds[test_ind[i]]==dec[i]:
score += 1/len(test_ind)
return score
def lesion_weights(trial_info, h, syn_x, syn_u, weights):
N = weights['w_rnn'].shape[0]
num_reps = 5
accuracy_rnn_start = np.ones((N,N), dtype=np.float32)
accuracy_rnn_test = np.ones((N,N), dtype=np.float32)
accuracy_out = np.ones((3,N), dtype=np.float32)
trial_time = np.arange(0,h.shape[1]*par['dt'], par['dt'])
neuronal_decoding = np.zeros((N,N,par['num_rules'], num_reps, len(trial_time)))
neuronal_pref_dir = np.zeros((N,N,par['n_hidden'], par['num_rules'], len(trial_time)))
neuronal_pev = np.zeros((N,N,par['n_hidden'], par['num_rules'], len(trial_time)))
# network inputs/outputs
_, trial_length, batch_train_size = h.shape
x = np.split(trial_info['neural_input'],trial_length,axis=1)
y = trial_info['desired_output']
train_mask = trial_info['train_mask']
test_onset = 1
hidden_init = h[:,test_onset-1,:]
syn_x_init = syn_x[:,test_onset-1,:]
syn_u_init = syn_u[:,test_onset-1,:]
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
hidden_init_test = h[:,test_onset-1,:]
syn_x_init_test = syn_x[:,test_onset-1,:]
syn_u_init_test = syn_u[:,test_onset-1,:]
x_test = np.split(trial_info['neural_input'][:,test_onset:,:],trial_length-test_onset,axis=1)
y_test = trial_info['desired_output'][:,test_onset:,:]
train_mask_test = trial_info['train_mask'][test_onset:,:]
# create new dict of weights
weights_new = {}
for k,v in weights.items():
weights_new[k] = v
for n1 in range(3):
for n2 in range(N):
if weights['w_out'][n1,n2] <= 0:
continue
# lesion weights
q = np.ones((3,N))
q[n1,n2] = 0
weights_new['w_out'] = weights['w_out']*q
# simulate network
y_hat, hidden_state_hist = run_model(x_test, y_test, hidden_init_test, syn_x_init_test, syn_u_init_test, weights_new)
accuracy_out[n1,n2] = get_perf(y_test, y_hat, train_mask_test)
for n1 in range(N):
for n2 in range(N):
if weights['w_rnn'][n1,n2] <= 0:
continue
# lesion weights
q = np.ones((N,N))
q[n1,n2] = 0
weights_new['w_rnn'] = weights['w_rnn']*q
# simulate network
y_hat, hidden_state_hist = run_model(x, y, hidden_init, syn_x, syn_u, weights_new)
accuracy_rnn_start[n1,n2] = get_perf(y, y_hat, train_mask)
y_hat, hidden_state_hist = run_model(x_test, y_test, hidden_init_test, syn_x_init_test, syn_u_init_test, weights_new)
accuracy_rnn_test[n1,n2] = get_perf(y_test, y_hat, train_mask_test)
if accuracy_rnn_start[n1,n2] < -1:
h_stacked = np.stack(hidden_state_hist, axis=1)
neuronal_decoding[n1,n2,:,:,:], _ = calculate_svms(h_stacked, syn_x, syn_u, trial_info['sample'], \
trial_info['rule'], trial_info['match'], trial_time, num_reps = num_reps)
neuronal_pref_dir[n1,n2,:,:], neuronal_pev[n1,n2,:,:], _, _ = calculate_sample_tuning(h_stacked, \
syn_x, syn_u, trial_info['sample'], trial_info['rule'], trial_info['match'], trial_time)
"""
y_hat_test = run_model(x_test, y_test, hidden_init_test, syn_x_init_test, syn_u_init_test, weights_new)
accuracy_test[n1,n2] = get_perf(y_test, y_hat_test, train_mask_test)
"""
return accuracy_rnn_start, accuracy_rnn_test, accuracy_out
def simulate_network(trial_info, h, syn_x, syn_u, weights, num_reps = 20):
"""
Simulation will start from the start of the test period until the end of trial
"""
if par['trial_type'] == 'dualDMS':
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+2*par['delay_time']+par['test_time'])//par['dt']
elif par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA' :
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+5*par['ABBA_delay'])//par['dt']
else:
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
accuracy = np.zeros((par['num_rules'], num_reps))
accuracy_neural_shuffled = np.zeros((par['num_rules'], num_reps))
accuracy_syn_shuffled = np.zeros((par['num_rules'], num_reps))
_, trial_length, batch_train_size = h.shape
test_length = trial_length - test_onset
for r in range(par['num_rules']):
# For ABBA/ABCA trials, will only analyze trials for which the first n-1
# test stimuli, out of n, are non-matches
if par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
trial_ind = np.where((np.sum(trial_info['match'][:,:-1],axis=1)==0)*(trial_info['rule']==r))[0]
else:
trial_ind = np.where(trial_info['rule']==r)[0]
train_mask = trial_info['train_mask'][test_onset:,trial_ind]
x = np.split(trial_info['neural_input'][:,test_onset:,trial_ind],test_length,axis=1)
y = trial_info['desired_output'][:,test_onset:,trial_ind]
for n in range(num_reps):
"""
Calculating behavioral accuracy without shuffling
"""
hidden_init = h[:,test_onset-1,trial_ind]
syn_x_init = syn_x[:,test_onset-1,trial_ind]
syn_u_init = syn_u[:,test_onset-1,trial_ind]
y_hat, _ = run_model(x, y, hidden_init, syn_x_init, syn_u_init, weights)
accuracy[r,n] = get_perf(y, y_hat, train_mask)
"""
Keep the synaptic values fixed, permute the neural activity
"""
ind_shuffle = np.random.permutation(len(trial_ind))
hidden_init = hidden_init[:,ind_shuffle]
y_hat, _ = run_model(x, y, hidden_init, syn_x_init, syn_u_init, weights)
accuracy_neural_shuffled[r,n] = get_perf(y, y_hat, train_mask)
"""
Keep the hidden values fixed, permute synaptic values
"""
hidden_init = h[:,test_onset-1,trial_ind]
syn_x_init = syn_x_init[:,ind_shuffle]
syn_u_init = syn_u_init[:,ind_shuffle]
y_hat, _ = run_model(x, y, hidden_init, syn_x_init, syn_u_init, weights)
accuracy_syn_shuffled[r,n] = get_perf(y, y_hat, train_mask)
return accuracy, accuracy_neural_shuffled, accuracy_syn_shuffled
def calculate_tuning(h, syn_x, syn_u, trial_info, trial_time, calculate_test = False):
"""
Calculates neuronal and synaptic sample motion direction tuning
"""
num_time_steps = len(trial_time)
neuronal_pref_dir = np.zeros((par['n_hidden'], par['num_rules'], num_time_steps))
synaptic_pref_dir = np.zeros((par['n_hidden'], par['num_rules'], num_time_steps))
neuronal_sample_tuning = np.zeros((par['n_hidden'], par['num_rules'], par['num_motion_dirs'], num_time_steps))
neuronal_pev = np.zeros((par['n_hidden'], par['num_rules'], num_time_steps))
synaptic_pev = np.zeros((par['n_hidden'], par['num_rules'], num_time_steps))
neuronal_pref_dir_test = np.zeros((par['n_hidden'], par['num_rules'], num_time_steps))
neuronal_pev_test = np.zeros((par['n_hidden'], par['num_rules'], num_time_steps))
"""
The synaptic efficacy is the product of syn_x and syn_u, will decode sample
direction from this value
"""
syn_efficacy = syn_x*syn_u
if par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
"""
For ABBA/ABCA trials, will only analyze trials for which the first n-1
test stimuli, out | |
= QtWidgets.QAction(self.tr("&Web Parser"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_W),
statusTip=self.tr("Settings for web parser"),
triggered=self.action_web_profile)
actions['reload'] = QtWidgets.QAction(self.tr("Reload Archive"), self,
shortcut=QtGui.QKeySequence.Refresh,
statusTip=self.tr("Reload the current Archive"),
triggered=self.action_reload)
actions['next_file'] = QtWidgets.QAction(self.tr("Next Archive"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_L),
statusTip=self.tr("Load the next archive in the folder"),
triggered=self.action_next_file)
actions['prev_file'] = QtWidgets.QAction(self.tr("Previous Archive"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_K),
statusTip=self.tr("Load the previous archive in the folder"),
triggered=self.action_prev_file)
actions['next'] = QtWidgets.QAction(self.tr("Next View"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_Space),
statusTip=self.tr("Show next image part"),
triggered=self.manager.action_next)
actions['prev'] = QtWidgets.QAction(self.tr("Previous View"), self,
shortcut=QtGui.QKeySequence("Shift+Space"),
statusTip=self.tr("Show previous image part"),
triggered=self.manager.action_prev)
actions['page'] = QtWidgets.QAction(self.tr("Select Page"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_P),
statusTip=self.tr("Select the an image"),
triggered=self.action_page)
actions['info'] = QtWidgets.QAction(self.tr("Information"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_I),
checkable=True,
statusTip=self.tr("Show informaion about image"),
triggered=self.action_info)
actions['first_image'] = QtWidgets.QAction(self.tr("First Image"), self,
shortcut=QtGui.QKeySequence.MoveToStartOfLine,
statusTip=self.tr("Show first image"),
triggered=self.manager.action_first_image)
actions['last_image'] = QtWidgets.QAction(self.tr("Last Image"), self,
shortcut=QtGui.QKeySequence.MoveToEndOfLine,
statusTip=self.tr("Show last image"),
triggered=self.manager.action_last_image)
actions['next_image'] = QtWidgets.QAction(self.tr("Next Image"), self,
shortcut=QtGui.QKeySequence.MoveToNextPage,
statusTip=self.tr("Show next image"),
triggered=self.manager.action_next_image)
actions['prev_image'] = QtWidgets.QAction(self.tr("Previous Image"), self,
shortcut=QtGui.QKeySequence.MoveToPreviousPage,
statusTip=self.tr("Show previous image"),
triggered=self.manager.action_prev_image)
actions['continuous'] = QtWidgets.QAction(self.tr("Continuous"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_C),
checkable=True,
statusTip=self.tr("Continuous Flow"),
triggered=self.action_toggle_continuous)
actions['fullscreen'] = QtWidgets.QAction(self.tr("Fullscreen"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_F),
checkable=True,
statusTip=self.tr("Toggle Fullscreen"),
triggered=self.action_toggle_fullscreen)
actions['minimize'] = QtWidgets.QAction(self.tr("Minimize"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_M),
statusTip=self.tr("Minimize Window"),
triggered=self.showMinimized)
actions['close'] = QtWidgets.QAction(self.tr("Close"), self,
shortcut=QtGui.QKeySequence(QtCore.Qt.Key_Escape),
statusTip=self.tr("Close Viewer"),
triggered=self.close)
self.writing = []
self.auto_writing = set()
self.last_farchinfo = None
actions['save'] = QtWidgets.QAction(self.tr("Save as..."), self,
shortcut=QtGui.QKeySequence.Save,
statusTip=self.tr("Close Viewer"),
triggered=self.action_save)
for i in range(1, 10):
ckey = getattr(QtCore.Qt, 'Key_%d' % i)
caction = QtWidgets.QAction(self.tr("Append current image"), self,
shortcut=QtGui.QKeySequence(ckey),
triggered=partial(self.action_save_current, i-1))
actions['append_to_%d' % i] = caction
caction = QtWidgets.QAction(self.tr("Automatically append current image"),
self,
checkable=True,
triggered=partial(self.action_save_auto, i-1))
actions['auto_%d' % i] = caction
caction = QtWidgets.QAction(self.tr("Close"), self,
triggered=partial(self.action_save_close, i-1))
actions['close_%d' % i] = caction
actions['movement'] = QtWidgets.QActionGroup(self)
actions['movement'].triggered.connect(self.action_movement)
for mover in self.manager.movers:
act = QtWidgets.QAction(self.tr(mover), actions['movement'],
checkable=True)
if mover == self.manager.mover:
act.setChecked(True)
for act in itervalues(actions):
if isinstance(act, QtWidgets.QAction):
self.addAction(act)
self.actions = actions
def load_dropped_archive(self):
try:
farch = self.dropping.pop_archive()
_, name = os.path.split(farch.path)
ntitle = '%s - %s' % (name, self.tr("Image Viewer"))
self.manager.open_archive(farch)
self.setWindowTitle(ntitle)
except WrapperIOError as err:
errormsg = html.escape(str(err)) or "Unknown Error"
errormsg = '<br/>'.join(s.strip() for s in errormsg.split('\n'))
self.label.setText(self.tr(errormsg))
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
except Exception:
trace = traceback.format_exception(*sys.exc_info())
mask = 'Unexpected Error:<br/><pre>%s</pre>'
errmsg = html.escape('\n'.join(trace))
self.label.setText(self.tr(mask % errmsg))
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
def load_archive(self, path, page=0):
"""
load the images in the archive given py path and show the first one.
Parameters
----------
path : the path to the archive to load
page : the page to open in the archive, default 0.
Returns
----------
success : returns ``True`` if images could be loaded and ``False``
if no images could be found in the archive.
"""
try:
farch = open_wrapper(path)
_, name = os.path.split(path)
ntitle = '%s - %s' % (name, self.tr("Image Viewer"))
self.manager.open_archive(farch, page)
self.setWindowTitle(ntitle)
return True
except WrapperIOError as err:
errormsg = text_type(err) or self.tr("Unknown Error")
errormsg = html.escape(errormsg)
self.label.setText(errormsg)
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
return False
def hide_label(self):
self.actions['info'].setChecked(QtCore.Qt.Unchecked)
self.label.hide()
self.labeltimer.stop()
def resize_view(self):
self.resizetimer.stop()
self.manager.refresh()
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.actions['open'])
sv_menu = menu.addMenu(self.tr('Save'))
for i, farch in enumerate(self.writing):
base, filename = os.path.split(farch.path)
c_menu = sv_menu.addMenu(filename)
c_append = self.actions['append_to_%d' % (i+1)]
c_auto = self.actions['auto_%d' % (i+1)]
c_auto.setChecked(farch in self.auto_writing)
c_close = self.actions['close_%d' % (i+1)]
c_menu.addAction(c_append)
c_menu.addAction(c_auto)
c_menu.addAction(c_close)
sv_menu.addAction(self.actions['save'])
menu.addAction(self.actions['prev_file'])
menu.addAction(self.actions['reload'])
menu.addAction(self.actions['next_file'])
menu.addSeparator()
menu.addAction(self.actions['info'])
menu.addAction(self.actions['page'])
menu.addAction(self.actions['first_image'])
menu.addAction(self.actions['last_image'])
menu.addAction(self.actions['prev_image'])
menu.addAction(self.actions['next_image'])
menu.addAction(self.actions['prev'])
menu.addAction(self.actions['next'])
menu.addSeparator()
mv_menu = menu.addMenu(self.tr('Movement'))
for act in self.actions['movement'].actions():
mv_menu.addAction(act)
menu.addAction(self.actions['fullscreen'])
menu.addAction(self.actions['continuous'])
menu.addAction(self.actions['minimize'])
menu.addSeparator()
menu.addAction(self.actions['webparser'])
menu.addAction(self.actions['settings'])
menu.addAction(self.actions['close'])
menu.exec_(event.globalPos())
def dragEnterEvent(self,e):
if e.mimeData().hasUrls():
e.accept()
else:
super(ImageViewer,self).dragEnterEvent(e)
def dragMoveEvent(self, e):
if e.mimeData().hasUrls():
e.setDropAction(QtCore.Qt.LinkAction)
e.accept()
else:
super(ImageViewer,self).dragMoveEvent(e)
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
url = event.mimeData().urls()[0]
path = text_type(url.toLocalFile() or url.toString())
self.dropping.set_path(path).start()
labelstr = u'Loading "%s"' % path
self.label.setText(labelstr)
self.label.resize(self.label.sizeHint())
self.label.show()
else:
super(ImageViewer,self).dropEvent(event)
def mouseDoubleClickEvent(self,e):
self.manager.action_next()
def resizeEvent(self,e):
if e.oldSize().isValid():
self.resizetimer.start(100)
super(ImageViewer,self).resizeEvent(e)
def closeEvent(self,e):
self.save_settings()
self.manager.close()
for farch in self.writing:
farch.close()
super(ImageViewer,self).closeEvent(e)
def action_open(self):
archives = ' '.join('*%s' % ext for ext in ArchiveWrapper.formats)
dialog = QtGui.QFileDialog(self)
dialog.setFileMode(dialog.ExistingFile)
dialog.setNameFilter(self.tr("Archives (%s)") % archives)
dialog.setViewMode(dialog.Detail)
infos = self.manager.page_description
if 'archpath' in infos:
path, name = os.path.split(infos['archpath'])
dialog.setDirectory(path)
if dialog.exec_():
self.load_archive(dialog.selectedFiles()[0])
def action_save(self):
if len(self.writing) >= 9:
return
archives = '*.zip'
auto_add = False
fpath = '/'
infos = self.manager.page_description
if self.last_farchinfo is not None:
fpath, auto_add = self.last_farchinfo
elif 'archpath' in infos:
fpath, name = os.path.split(infos['archpath'])
path, dummy = QtWidgets.QFileDialog.getSaveFileName(self,
directory=fpath,
filter=archives)
if path:
try:
farch = ArchiveWrapper(path, 'w')
self.writing.append(farch)
if auto_add:
self.auto_writing.add(farch)
except WrapperIOError as err:
errormsg = text_type(err) or self.tr("Unkown Error")
errormsg = html.escape(errormsg)
self.label.setText(errormsg)
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
def action_save_current(self, archive_ind):
infos = self.manager.page_description
if archive_ind >= len(self.writing) or 'filename' not in infos:
return
base, filename = os.path.split(infos['filename'])
#remove trailing part seperated by ?
filename = filename.split('?')[0].strip()
#prepend the page number to ensure correct ordering of images
filename = u'%.3d_%s' % (infos['page'], filename)
img = self.manager.get_buffered_image(infos['page'])
farch = self.writing[archive_ind]
if isinstance(img, Image.Image) and filename not in farch:
with farch.open(filename, 'w') as fout:
img.save(fout,'jpeg',quality=self.settings.write_quality,
optimize=self.settings.write_optimize,
progressive=self.settings.write_progressive)
self.label.setText('Save "%s" to "%s"' % (filename, farch.path))
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
def action_save_auto(self, archive_ind):
if archive_ind >= len(self.writing):
return
farch = self.writing[archive_ind]
if farch in self.auto_writing:
self.auto_writing.remove(farch)
else:
self.auto_writing.add(farch)
def action_save_close(self, archive_ind):
if archive_ind >= len(self.writing):
return
farch = self.writing.pop(archive_ind)
if farch in self.auto_writing:
self.auto_writing.remove(farch)
self.last_farchinfo = farch.path, True
else:
self.last_farchinfo = farch.path, False
farch.close()
def action_web_profile(self):
dialog = WebProfileSettings(self)
dialog.exec_()
def action_settings(self):
dialog = Settings(self.settings, self)
if dialog.exec_():
osettings = self.settings
self.settings = dialog.settings
self.manager.set_settings(dialog.settings)
if osettings.bgcolor != self.settings.bgcolor:
self.scene().setBackgroundBrush(self.settings.bgcolor)
def action_page(self):
manager = self.manager
if manager.imagelist:
self.pageselect.set_range(manager.page, manager.imagelist)
if self.pageselect.exec_():
manager.show_page(self.pageselect.value)
def action_page_info(self):
if self.actions['info'].isChecked():
self._update_info()
elif not self.manager.continuous:
infos = self.manager.page_description
page = infos['page']
page_count = infos['page_count']
if infos['error']:
error = html.escape(infos['error'])
infostr = u'%d/%d<br />%s' % (page+1, page_count, error)
else:
infostr = u'%d/%d' % (page+1, page_count)
self.label.setText(infostr)
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.shorttimeout)
for farch in self.auto_writing:
self.action_save_current(self.writing.index(farch))
def action_status_info(self, priority):
# if the information is open or we are loading an archive
# we should update the info
if self.actions['info'].isChecked() or not self.manager:
self._update_info()
elif priority > 0:
self._update_info()
self.label.show()
if not self.actions['info'].isChecked():
self.labeltimer.start(self.settings.longtimeout)
for farch in self.auto_writing:
self.action_save_current(self.writing.index(farch))
def action_info(self):
#show the label when info is active or no data is loaded
if not self.manager:
self._update_info()
self.label.show()
elif self.actions['info'].isChecked():
self.labeltimer.stop()
self._update_info()
self.label.show()
else:
self.label.hide()
def action_movement(self,action):
action.setChecked(True)
self.manager.mover = action.text()
def action_toggle_fullscreen(self):
if self.isFullScreen():
self.showNormal()
else:
self.showFullScreen()
def action_toggle_continuous(self):
continuous = self.actions['continuous'].isChecked()
self.manager.set_settings(self.settings, continuous)
def action_reload(self):
infos = self.manager.page_description
if 'page_url' in infos:
self.dropping.set_path(infos['page_url']).start()
labelstr = u'Loading "%s"' % infos['page_url']
self.label.setText(labelstr)
self.label.resize(self.label.sizeHint())
self.label.show()
elif 'archpath' in infos:
page = infos.get('page', None)
path = infos['archpath']
self.load_archive(path, page)
def action_next_file(self):
errormsg = ''
farch = self.manager.wrapper
if farch:
archlist,loadindex = farch.list_archives()
folder, name = os.path.split(farch.path)
loadindex += 1
while loadindex < len(archlist) and \
not self.load_archive(archlist[loadindex]):
loadindex += 1
if loadindex >= len(archlist):
errormsg = self.tr('No further archives in "%s"') % folder
if errormsg:
errormsg = html.escape(errormsg)
self.label.setText(errormsg)
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
def action_prev_file(self):
errormsg = ''
farch = self.manager.wrapper
if farch:
archlist,loadindex = farch.list_archives()
folder, name = os.path.split(farch.path)
loadindex -= 1
while loadindex >= 0 and not self.load_archive(archlist[loadindex]):
loadindex -= 1
if loadindex < 0:
errormsg = self.tr('No previous archives in "%s"') % folder
if errormsg:
errormsg = html.escape(errormsg)
self.label.setText(errormsg)
self.label.resize(self.label.sizeHint())
self.label.show()
self.labeltimer.start(self.settings.longtimeout)
def save_settings(self):
isContinuous = self.actions['continuous'].isChecked()
settings = QtCore.QSettings("Caasar", "Image Viewer")
settings.beginGroup("MainWindow")
settings.setValue("fullscreen", self.isFullScreen())
settings.setValue("continuous", isContinuous)
if not self.isFullScreen():
settings.setValue("pos", self.pos())
settings.setValue("size", self.size())
settings.setValue("movement", self.manager.mover)
settings.endGroup()
settings.beginGroup("Settings")
csettings = self.settings._asdict()
for key, value in iteritems(csettings):
settings.setValue(key, value)
settings.endGroup()
settings.beginGroup("WebProfiles")
for key,val in WebWrapper.profiles.items():
values = repr(tuple(val[key] for key in WebWrapper.profile_keys))
settings.setValue(key,values)
settings.endGroup()
if self.settings.saveposition and self.manager:
infos = self.manager.page_description
settings.beginGroup("History")
if 'page_url' in infos:
settings.setValue("lastpath", infos['page_url'])
settings.setValue("lastpage", 0)
else:
settings.setValue("lastpath", infos['archpath'])
settings.setValue("lastpage", infos['page'])
settings.endGroup()
def load_settings(self):
settings = QtCore.QSettings("Caasar", "Image Viewer")
settings.beginGroup("MainWindow")
self.resize(settings.value("size",QtCore.QSize(640, 480)))
self.move(settings.value("pos", QtCore.QPoint(100, 100)))
isFullscreen = settings.value("fullscreen", 'false') == 'true'
isContinuous = settings.value("continuous", 'false') == 'true'
self.manager.mover = settings.value("movement", "")
for act in self.actions['movement'].actions():
if act.text() == self.manager.mover:
act.setChecked(True)
settings.endGroup()
settings.beginGroup("Settings")
csettings = self.settings._asdict()
for key, defvalue in iteritems(csettings):
value = settings.value(key, defvalue)
if defvalue is not None:
value = type(defvalue)(value)
csettings[key] = value
self.settings = Settings.dict2tuple(csettings)
self.manager.set_settings(self.settings, isContinuous)
settings.endGroup()
self.scene().setBackgroundBrush(self.settings.bgcolor)
settings.beginGroup("WebProfiles")
for profile in settings.allKeys():
values = literal_eval(settings.value(profile))
prof = dict(zip(WebWrapper.profile_keys, values))
if len(values) == len(WebWrapper.profile_keys):
WebWrapper.profiles[profile] = prof
settings.endGroup()
if self.settings.saveposition:
settings.beginGroup("History")
path = settings.value("lastpath",'')
page | |
n) -> SedChange
removeChange(SedModel self, string sid) -> SedChange
"""
return _libsedml.SedModel_removeChange(self, *args)
def getElementName(self):
"""getElementName(SedModel self) -> string"""
return _libsedml.SedModel_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedModel self) -> int"""
return _libsedml.SedModel_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedModel self) -> bool"""
return _libsedml.SedModel_hasRequiredAttributes(self)
def hasRequiredElements(self):
"""hasRequiredElements(SedModel self) -> bool"""
return _libsedml.SedModel_hasRequiredElements(self)
def setSedDocument(self, *args):
"""setSedDocument(SedModel self, SedDocument d)"""
return _libsedml.SedModel_setSedDocument(self, *args)
def connectToChild(self):
"""connectToChild(SedModel self)"""
return _libsedml.SedModel_connectToChild(self)
SedModel_swigregister = _libsedml.SedModel_swigregister
SedModel_swigregister(SedModel)
class SedListOfModels(SedListOf):
"""Proxy of C++ SedListOfModels class"""
__swig_setmethods__ = {}
for _s in [SedListOf]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedListOfModels, name, value)
__swig_getmethods__ = {}
for _s in [SedListOf]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedListOfModels, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedListOfModels self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedListOfModels
__init__(SedListOfModels self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedListOfModels
__init__(SedListOfModels self) -> SedListOfModels
__init__(SedListOfModels self, SedNamespaces sedns) -> SedListOfModels
"""
this = _libsedml.new_SedListOfModels(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedListOfModels self) -> SedListOfModels"""
return _libsedml.SedListOfModels_clone(self)
def get(self, *args):
"""
get(SedListOfModels self, unsigned int n) -> SedModel
get(SedListOfModels self, unsigned int n) -> SedModel
get(SedListOfModels self, string sid) -> SedModel
get(SedListOfModels self, string sid) -> SedModel
"""
return _libsedml.SedListOfModels_get(self, *args)
def addModel(self, *args):
"""addModel(SedListOfModels self, SedModel m) -> int"""
return _libsedml.SedListOfModels_addModel(self, *args)
def getNumModels(self):
"""getNumModels(SedListOfModels self) -> unsigned int"""
return _libsedml.SedListOfModels_getNumModels(self)
def createModel(self):
"""createModel(SedListOfModels self) -> SedModel"""
return _libsedml.SedListOfModels_createModel(self)
def remove(self, *args):
"""
remove(SedListOfModels self, unsigned int n) -> SedModel
remove(SedListOfModels self, string sid) -> SedModel
"""
return _libsedml.SedListOfModels_remove(self, *args)
def getElementName(self):
"""getElementName(SedListOfModels self) -> string"""
return _libsedml.SedListOfModels_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedListOfModels self) -> int"""
return _libsedml.SedListOfModels_getTypeCode(self)
def getItemTypeCode(self):
"""getItemTypeCode(SedListOfModels self) -> int"""
return _libsedml.SedListOfModels_getItemTypeCode(self)
__swig_destroy__ = _libsedml.delete_SedListOfModels
__del__ = lambda self : None;
SedListOfModels_swigregister = _libsedml.SedListOfModels_swigregister
SedListOfModels_swigregister(SedListOfModels)
class SedAlgorithm(SedBase):
"""Proxy of C++ SedAlgorithm class"""
__swig_setmethods__ = {}
for _s in [SedBase]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedAlgorithm, name, value)
__swig_getmethods__ = {}
for _s in [SedBase]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedAlgorithm, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedAlgorithm self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedAlgorithm
__init__(SedAlgorithm self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedAlgorithm
__init__(SedAlgorithm self) -> SedAlgorithm
__init__(SedAlgorithm self, SedNamespaces sedns) -> SedAlgorithm
__init__(SedAlgorithm self, SedAlgorithm orig) -> SedAlgorithm
"""
this = _libsedml.new_SedAlgorithm(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedAlgorithm self) -> SedAlgorithm"""
return _libsedml.SedAlgorithm_clone(self)
__swig_destroy__ = _libsedml.delete_SedAlgorithm
__del__ = lambda self : None;
def getKisaoID(self):
"""getKisaoID(SedAlgorithm self) -> string"""
return _libsedml.SedAlgorithm_getKisaoID(self)
def isSetKisaoID(self):
"""isSetKisaoID(SedAlgorithm self) -> bool"""
return _libsedml.SedAlgorithm_isSetKisaoID(self)
def setKisaoID(self, *args):
"""setKisaoID(SedAlgorithm self, string kisaoID) -> int"""
return _libsedml.SedAlgorithm_setKisaoID(self, *args)
def unsetKisaoID(self):
"""unsetKisaoID(SedAlgorithm self) -> int"""
return _libsedml.SedAlgorithm_unsetKisaoID(self)
def getListOfAlgorithmParameters(self):
"""getListOfAlgorithmParameters(SedAlgorithm self) -> SedListOfAlgorithmParameters"""
return _libsedml.SedAlgorithm_getListOfAlgorithmParameters(self)
def getAlgorithmParameter(self, *args):
"""
getAlgorithmParameter(SedAlgorithm self, unsigned int n) -> SedAlgorithmParameter
getAlgorithmParameter(SedAlgorithm self, unsigned int n) -> SedAlgorithmParameter
getAlgorithmParameter(SedAlgorithm self, string sid) -> SedAlgorithmParameter
getAlgorithmParameter(SedAlgorithm self, string sid) -> SedAlgorithmParameter
"""
return _libsedml.SedAlgorithm_getAlgorithmParameter(self, *args)
def addAlgorithmParameter(self, *args):
"""addAlgorithmParameter(SedAlgorithm self, SedAlgorithmParameter sap) -> int"""
return _libsedml.SedAlgorithm_addAlgorithmParameter(self, *args)
def getNumAlgorithmParameters(self):
"""getNumAlgorithmParameters(SedAlgorithm self) -> unsigned int"""
return _libsedml.SedAlgorithm_getNumAlgorithmParameters(self)
def createAlgorithmParameter(self):
"""createAlgorithmParameter(SedAlgorithm self) -> SedAlgorithmParameter"""
return _libsedml.SedAlgorithm_createAlgorithmParameter(self)
def removeAlgorithmParameter(self, *args):
"""
removeAlgorithmParameter(SedAlgorithm self, unsigned int n) -> SedAlgorithmParameter
removeAlgorithmParameter(SedAlgorithm self, string sid) -> SedAlgorithmParameter
"""
return _libsedml.SedAlgorithm_removeAlgorithmParameter(self, *args)
def getElementName(self):
"""getElementName(SedAlgorithm self) -> string"""
return _libsedml.SedAlgorithm_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedAlgorithm self) -> int"""
return _libsedml.SedAlgorithm_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedAlgorithm self) -> bool"""
return _libsedml.SedAlgorithm_hasRequiredAttributes(self)
def hasRequiredElements(self):
"""hasRequiredElements(SedAlgorithm self) -> bool"""
return _libsedml.SedAlgorithm_hasRequiredElements(self)
def setSedDocument(self, *args):
"""setSedDocument(SedAlgorithm self, SedDocument d)"""
return _libsedml.SedAlgorithm_setSedDocument(self, *args)
def connectToChild(self):
"""connectToChild(SedAlgorithm self)"""
return _libsedml.SedAlgorithm_connectToChild(self)
SedAlgorithm_swigregister = _libsedml.SedAlgorithm_swigregister
SedAlgorithm_swigregister(SedAlgorithm)
class SedAlgorithmParameter(SedBase):
"""Proxy of C++ SedAlgorithmParameter class"""
__swig_setmethods__ = {}
for _s in [SedBase]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedAlgorithmParameter, name, value)
__swig_getmethods__ = {}
for _s in [SedBase]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedAlgorithmParameter, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedAlgorithmParameter self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedAlgorithmParameter
__init__(SedAlgorithmParameter self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedAlgorithmParameter
__init__(SedAlgorithmParameter self) -> SedAlgorithmParameter
__init__(SedAlgorithmParameter self, SedNamespaces sedns) -> SedAlgorithmParameter
__init__(SedAlgorithmParameter self, SedAlgorithmParameter orig) -> SedAlgorithmParameter
"""
this = _libsedml.new_SedAlgorithmParameter(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedAlgorithmParameter self) -> SedAlgorithmParameter"""
return _libsedml.SedAlgorithmParameter_clone(self)
__swig_destroy__ = _libsedml.delete_SedAlgorithmParameter
__del__ = lambda self : None;
def getKisaoID(self):
"""getKisaoID(SedAlgorithmParameter self) -> string"""
return _libsedml.SedAlgorithmParameter_getKisaoID(self)
def isSetKisaoID(self):
"""isSetKisaoID(SedAlgorithmParameter self) -> bool"""
return _libsedml.SedAlgorithmParameter_isSetKisaoID(self)
def setKisaoID(self, *args):
"""setKisaoID(SedAlgorithmParameter self, string kisaoID) -> int"""
return _libsedml.SedAlgorithmParameter_setKisaoID(self, *args)
def unsetKisaoID(self):
"""unsetKisaoID(SedAlgorithmParameter self) -> int"""
return _libsedml.SedAlgorithmParameter_unsetKisaoID(self)
def getValue(self):
"""getValue(SedAlgorithmParameter self) -> string"""
return _libsedml.SedAlgorithmParameter_getValue(self)
def isSetValue(self):
"""isSetValue(SedAlgorithmParameter self) -> bool"""
return _libsedml.SedAlgorithmParameter_isSetValue(self)
def setValue(self, *args):
"""setValue(SedAlgorithmParameter self, string value) -> int"""
return _libsedml.SedAlgorithmParameter_setValue(self, *args)
def unsetValue(self):
"""unsetValue(SedAlgorithmParameter self) -> int"""
return _libsedml.SedAlgorithmParameter_unsetValue(self)
def getElementName(self):
"""getElementName(SedAlgorithmParameter self) -> string"""
return _libsedml.SedAlgorithmParameter_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedAlgorithmParameter self) -> int"""
return _libsedml.SedAlgorithmParameter_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedAlgorithmParameter self) -> bool"""
return _libsedml.SedAlgorithmParameter_hasRequiredAttributes(self)
def setSedDocument(self, *args):
"""setSedDocument(SedAlgorithmParameter self, SedDocument d)"""
return _libsedml.SedAlgorithmParameter_setSedDocument(self, *args)
SedAlgorithmParameter_swigregister = _libsedml.SedAlgorithmParameter_swigregister
SedAlgorithmParameter_swigregister(SedAlgorithmParameter)
class SedListOfAlgorithmParameters(SedListOf):
"""Proxy of C++ SedListOfAlgorithmParameters class"""
__swig_setmethods__ = {}
for _s in [SedListOf]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedListOfAlgorithmParameters, name, value)
__swig_getmethods__ = {}
for _s in [SedListOf]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedListOfAlgorithmParameters, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedListOfAlgorithmParameters self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedListOfAlgorithmParameters
__init__(SedListOfAlgorithmParameters self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedListOfAlgorithmParameters
__init__(SedListOfAlgorithmParameters self) -> SedListOfAlgorithmParameters
__init__(SedListOfAlgorithmParameters self, SedNamespaces sedns) -> SedListOfAlgorithmParameters
"""
this = _libsedml.new_SedListOfAlgorithmParameters(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedListOfAlgorithmParameters self) -> SedListOfAlgorithmParameters"""
return _libsedml.SedListOfAlgorithmParameters_clone(self)
def get(self, *args):
"""
get(SedListOfAlgorithmParameters self, unsigned int n) -> SedAlgorithmParameter
get(SedListOfAlgorithmParameters self, unsigned int n) -> SedAlgorithmParameter
get(SedListOfAlgorithmParameters self, string sid) -> SedAlgorithmParameter
get(SedListOfAlgorithmParameters self, string sid) -> SedAlgorithmParameter
"""
return _libsedml.SedListOfAlgorithmParameters_get(self, *args)
def addAlgorithmParameter(self, *args):
"""addAlgorithmParameter(SedListOfAlgorithmParameters self, SedAlgorithmParameter ap) -> int"""
return _libsedml.SedListOfAlgorithmParameters_addAlgorithmParameter(self, *args)
def getNumAlgorithmParameters(self):
"""getNumAlgorithmParameters(SedListOfAlgorithmParameters self) -> unsigned int"""
return _libsedml.SedListOfAlgorithmParameters_getNumAlgorithmParameters(self)
def createAlgorithmParameter(self):
"""createAlgorithmParameter(SedListOfAlgorithmParameters self) -> SedAlgorithmParameter"""
return _libsedml.SedListOfAlgorithmParameters_createAlgorithmParameter(self)
def remove(self, *args):
"""
remove(SedListOfAlgorithmParameters self, unsigned int n) -> SedAlgorithmParameter
remove(SedListOfAlgorithmParameters self, string sid) -> SedAlgorithmParameter
"""
return _libsedml.SedListOfAlgorithmParameters_remove(self, *args)
def getElementName(self):
"""getElementName(SedListOfAlgorithmParameters self) -> string"""
return _libsedml.SedListOfAlgorithmParameters_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedListOfAlgorithmParameters self) -> int"""
return _libsedml.SedListOfAlgorithmParameters_getTypeCode(self)
def getItemTypeCode(self):
"""getItemTypeCode(SedListOfAlgorithmParameters self) -> int"""
return _libsedml.SedListOfAlgorithmParameters_getItemTypeCode(self)
__swig_destroy__ = _libsedml.delete_SedListOfAlgorithmParameters
__del__ = lambda self : None;
SedListOfAlgorithmParameters_swigregister = _libsedml.SedListOfAlgorithmParameters_swigregister
SedListOfAlgorithmParameters_swigregister(SedListOfAlgorithmParameters)
class SedSimulation(SedBase):
"""Proxy of C++ SedSimulation class"""
__swig_setmethods__ = {}
for _s in [SedBase]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedSimulation, name, value)
__swig_getmethods__ = {}
for _s in [SedBase]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedSimulation, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedSimulation self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedSimulation
__init__(SedSimulation self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedSimulation
__init__(SedSimulation self) -> SedSimulation
__init__(SedSimulation self, SedNamespaces sedns) -> SedSimulation
__init__(SedSimulation self, SedSimulation orig) -> SedSimulation
"""
this = _libsedml.new_SedSimulation(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedSimulation self) -> SedSimulation"""
return _libsedml.SedSimulation_clone(self)
__swig_destroy__ = _libsedml.delete_SedSimulation
__del__ = lambda self : None;
def getId(self):
"""getId(SedSimulation self) -> string"""
return _libsedml.SedSimulation_getId(self)
def isSetId(self):
"""isSetId(SedSimulation self) -> bool"""
return _libsedml.SedSimulation_isSetId(self)
def setId(self, *args):
"""setId(SedSimulation self, string id) -> int"""
return _libsedml.SedSimulation_setId(self, *args)
def unsetId(self):
"""unsetId(SedSimulation self) -> int"""
return _libsedml.SedSimulation_unsetId(self)
def getName(self):
"""getName(SedSimulation self) -> string"""
return _libsedml.SedSimulation_getName(self)
def isSetName(self):
"""isSetName(SedSimulation self) -> bool"""
return _libsedml.SedSimulation_isSetName(self)
def setName(self, *args):
"""setName(SedSimulation self, string name) -> int"""
return _libsedml.SedSimulation_setName(self, *args)
def unsetName(self):
"""unsetName(SedSimulation self) -> int"""
return _libsedml.SedSimulation_unsetName(self)
def getAlgorithm(self):
"""getAlgorithm(SedSimulation self) -> SedAlgorithm"""
return _libsedml.SedSimulation_getAlgorithm(self)
def createAlgorithm(self):
"""createAlgorithm(SedSimulation self) -> SedAlgorithm"""
return _libsedml.SedSimulation_createAlgorithm(self)
def isSetAlgorithm(self):
"""isSetAlgorithm(SedSimulation self) -> bool"""
return _libsedml.SedSimulation_isSetAlgorithm(self)
def setAlgorithm(self, *args):
"""setAlgorithm(SedSimulation self, SedAlgorithm algorithm) -> int"""
return _libsedml.SedSimulation_setAlgorithm(self, *args)
def unsetAlgorithm(self):
"""unsetAlgorithm(SedSimulation self) -> int"""
return _libsedml.SedSimulation_unsetAlgorithm(self)
def getElementName(self):
"""getElementName(SedSimulation self) -> string"""
return _libsedml.SedSimulation_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedSimulation self) -> int"""
return _libsedml.SedSimulation_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedSimulation self) -> bool"""
return _libsedml.SedSimulation_hasRequiredAttributes(self)
def hasRequiredElements(self):
"""hasRequiredElements(SedSimulation self) -> bool"""
return _libsedml.SedSimulation_hasRequiredElements(self)
def setSedDocument(self, *args):
"""setSedDocument(SedSimulation self, SedDocument d)"""
return _libsedml.SedSimulation_setSedDocument(self, *args)
def connectToChild(self):
"""connectToChild(SedSimulation self)"""
return _libsedml.SedSimulation_connectToChild(self)
SedSimulation_swigregister = _libsedml.SedSimulation_swigregister
SedSimulation_swigregister(SedSimulation)
class SedListOfSimulations(SedListOf):
"""Proxy of C++ SedListOfSimulations class"""
__swig_setmethods__ = {}
for _s in [SedListOf]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda | |
<reponame>TyranicMoron/dotfiles<gh_stars>1-10
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# This script checks stream status of any channel on any servers
# listed in the "plugins.var.python.twitch.servers" setting. When you
# switch to a buffer it will display updated infomation about the stream
# in the title bar. Typing '/twitch' in buffer will also fetch updated
# infomation. '/whois nick' will lookup user info and display it in current
# buffer.
#
# https://github.com/mumixam/weechat-twitch
#
# settings:
# plugins.var.python.twitch.servers (default: twitch)
# plugins.var.python.twitch.prefix_nicks (default: 1)
# plugins.var.python.twitch.debug (default: 0)
# plugins.var.python.twitch.ssl_verify (default: 1)
# plugins.var.python.twitch.notice_notify_block (default: 1)
# plugins.var.python.twitch.client_id (default: awtv6n371jb7uayyc4jaljochyjbfxs)
# plugins.var.python.twitch.token (default: "")
#
# # History:
#
# 2020-07-27,
# v0.9: added support for Oauth token to support twitch APIs requirement -mumixam
# fix bug for when api returns null for game_id -mas90
#
# 2019-10-13, mumixam
# v0.8: changed input modifier hooks to use irc_in2_* instead
# added setting 'plugins.var.python.twitch.notice_notify_block'
# added setting 'plugins.var.python.twitch.client_id'
#
# 2019-09-21, mumixam
# v0.7: updated script to use current api
# 2019-03-03,
# v0.6: added support for CLEARMSG -MentalFS
# fixed issue with /whois -mumixam
# 2018-06-03, mumixam
# v0.5: enable curl verbose mode when debug is active, add option to disable ssl/tls verification,
# if stream title contains newline char replace it with space
# 2017-11-02, mumixam
# v0.4: added debug mode for API calls, minor bugfixes
# 2017-06-10, mumixam
# v0.3: fixed whois output of utf8 display names
# 2016-11-03, mumixam
# v0.2: added detailed /help
# 2016-10-30, mumixam
# v0.1: script added to weechat.org
SCRIPT_NAME = "twitch"
SCRIPT_AUTHOR = "mumixam"
SCRIPT_VERSION = "0.9"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "twitch.tv Chat Integration"
OPTIONS={
'servers': ('twitch','Name of server(s) which script will be active on, space seperated'),
'prefix_nicks': ('1','Prefix nicks based on ircv3 tags for mods/subs, This can be cpu intensive on very active chats [1 for enabled, 0 for disabled]'),
'debug': ('0','Debug mode'),
'ssl_verify': ('1', 'Verify SSL/TLS certs'),
'notice_notify_block': ('1', 'Changes notify level of NOTICEs to low'),
'client_id': ('awtv6n371jb7uayyc4jaljochyjbfxs', 'Twitch App ClientID'),
'token': ('', 'Twitch User Token')
}
import weechat
import json
from calendar import timegm
from datetime import datetime, timedelta
import time
import string
import ast
curlopt = {
"httpheader": "\n".join([
"Authorization: Bearer "+OPTIONS['token'][0],
"Client-ID: "+OPTIONS['client_id'][0],
]),
"timeout": "5",
"verbose": "0",
"ssl_verifypeer": "1",
"ssl_verifyhost": "2"
}
gameid_cache = {}
uid_cache = {}
def days_hours_minutes(td):
age = ''
hours = td.seconds // 3600
min = td.seconds // 60 % 60
if not td.days == 0:
age += str(td.days) + 'd '
if not hours == 0:
age += str(hours) + 'h '
if not min == 0:
age += str(min) + 'm'
return age.strip()
def twitch_main(data, buffer, args):
if not args == 'bs':
weechat.buffer_set(buffer, 'localvar_set_tstatus', '')
username = weechat.buffer_get_string(buffer, 'short_name').replace('#', '')
server = weechat.buffer_get_string(buffer, 'localvar_server')
type = weechat.buffer_get_string(buffer, 'localvar_type')
if not (server in OPTIONS['servers'].split() and type == 'channel'):
return weechat.WEECHAT_RC_OK
url = 'https://api.twitch.tv/helix/streams?user_login=' + username
weechat.hook_process_hashtable(
"url:" + url, curlopt, 7 * 1000, "stream_api", buffer)
return weechat.WEECHAT_RC_OK
def makeutf8(data):
data = data.encode('utf8')
if not isinstance(data, str):
data=str(data,'utf8')
return data
def stream_api(data, command, rc, stdout, stderr):
try:
jsonDict = json.loads(stdout.strip())
except Exception as e:
weechat.prnt(data, '%stwitch.py: error communicating with twitch api' % weechat.prefix('error'))
if OPTIONS['debug']:
weechat.prnt(data,'%stwitch.py: return code: %s' % (weechat.prefix('error'),rc))
weechat.prnt(data,'%stwitch.py: stdout: %s' % (weechat.prefix('error'),stdout))
weechat.prnt(data,'%stwitch.py: stderr: %s' % (weechat.prefix('error'),stderr))
weechat.prnt(data,'%stwitch.py: exception: %s' % (weechat.prefix('error'),e))
return weechat.WEECHAT_RC_OK
currentbuf = weechat.current_buffer()
title_fg = weechat.color(
weechat.config_color(weechat.config_get("weechat.bar.title.color_fg")))
title_bg = weechat.color(
weechat.config_color(weechat.config_get("weechat.bar.title.color_bg")))
pcolor = weechat.color('chat_prefix_network')
ccolor = weechat.color('chat')
red = weechat.color('red')
blue = weechat.color('blue')
green = weechat.color('green')
ptime = time.strftime("%H:%M:%S")
subs = weechat.buffer_get_string(data, 'localvar_subs')
r9k = weechat.buffer_get_string(data, 'localvar_r9k')
slow = weechat.buffer_get_string(data, 'localvar_slow')
emote = weechat.buffer_get_string(data, 'localvar_emote')
if not 'data' in jsonDict.keys():
weechat.prnt(data, 'twitch.py: Error with twitch API (data key missing from json)')
if OPTIONS['debug']:
weechat.prnt(data, 'twitch.py: %s' % stdout.strip())
return weechat.WEECHAT_RC_OK
if not jsonDict['data']:
line = "STREAM: %sOFFLINE%s %sCHECKED AT: (%s)" % (
red, title_fg, blue, ptime)
if subs:
line += " %s[SUBS]" % title_fg
if r9k:
line += " %s[R9K]" % title_fg
if slow:
line += " %s[SLOW@%s]" % (title_fg, slow)
if emote:
line += " %s[EMOTE]" % title_fg
weechat.buffer_set(data, "title", line)
else:
currenttime = time.time()
if len(jsonDict['data']) == 1:
jsonDict['data'] = jsonDict['data'][0]
output = 'STREAM: %sLIVE%s' % (green, title_fg)
if 'game_id' in jsonDict['data']:
if jsonDict['data']['game_id']:
game = jsonDict['data']['game_id']
game_id = game
if game in gameid_cache:
game = gameid_cache[game]
output += ' <%s> with' % game
else:
game_id = None
else:
game_id = None
if 'viewer_count' in jsonDict['data']:
viewers = jsonDict['data']['viewer_count']
output += ' %s viewers started' % viewers
if 'started_at' in jsonDict['data']:
createtime = jsonDict['data']['started_at'].replace('Z', 'GMT')
starttime = timegm(
time.strptime(createtime, '%Y-%m-%dT%H:%M:%S%Z'))
dur = timedelta(seconds=currenttime - starttime)
uptime = days_hours_minutes(dur)
output += ' %s ago' % uptime
if 'title' in jsonDict['data']:
titleutf8=jsonDict['data']['title'].replace('\n',' ').encode('utf8')
titleascii=jsonDict['data']['title'].encode('ascii','replace')
if not isinstance(titleutf8, str):
titleascii=str(titleascii,'utf8')
titleutf8=str(titleutf8,'utf8')
oldtitle = weechat.buffer_get_string(data, 'localvar_tstatus')
if not oldtitle == titleascii:
weechat.prnt(data, '%s--%s Title is "%s"' %
(pcolor, ccolor, titleutf8))
weechat.buffer_set(data, 'localvar_set_tstatus', titleascii)
output += ' (%s)' % ptime
if subs:
output += " %s[SUBS]" % title_fg
if r9k:
output += " %s[R9K]" % title_fg
if slow:
output += " %s[SLOW@%s]" % (title_fg, slow)
if emote:
output += " %s[EMOTE]" % title_fg
weechat.buffer_set(data, "title", output)
if game_id is not None and not game_id in gameid_cache:
url = 'https://api.twitch.tv/helix/games?id=' + game_id
weechat.hook_process_hashtable(
"url:" + url, curlopt, 7 * 1000, "game_api", data)
return weechat.WEECHAT_RC_OK
def game_api(data, command, rc, stdout, stderr):
try:
jsonDict = json.loads(stdout.strip())
except Exception as e:
weechat.prnt(data, '%stwitch.py: error communicating with twitch api' % weechat.prefix('error'))
if OPTIONS['debug']:
weechat.prnt(data,'%stwitch.py: return code: %s' % (weechat.prefix('error'),rc))
weechat.prnt(data,'%stwitch.py: stdout: %s' % (weechat.prefix('error'),stdout))
weechat.prnt(data,'%stwitch.py: stderr: %s' % (weechat.prefix('error'),stderr))
weechat.prnt(data,'%stwitch.py: exception: %s' % (weechat.prefix('error'),e))
return weechat.WEECHAT_RC_OK
if 'data' in jsonDict.keys():
if not jsonDict['data']:
return weechat.WEECHAT_RC_OK
if len(jsonDict['data']) == 1:
jsonDict['data'] = jsonDict['data'][0]
old_title = weechat.buffer_get_string(data, "title")
id = jsonDict['data']['id']
name = makeutf8(jsonDict['data']['name'])
new_title = old_title.replace('<{}>'.format(id),'<{}>'.format(name))
weechat.buffer_set(data, "title", new_title)
gameid_cache[id] = name
return weechat.WEECHAT_RC_OK
def channel_api(data, command, rc, stdout, stderr):
try:
jsonDict = json.loads(stdout.strip())
except Exception as e:
weechat.prnt(data, '%stwitch.py: error communicating with twitch api' % weechat.prefix('error'))
if OPTIONS['debug']:
weechat.prnt(data['buffer'],'%stwitch.py: return code: %s' % (weechat.prefix('error'),rc))
weechat.prnt(data['buffer'],'%stwitch.py: stdout: %s' % (weechat.prefix('error'),stdout))
weechat.prnt(data['buffer'],'%stwitch.py: stderr: %s' % (weechat.prefix('error'),stderr))
weechat.prnt(data['buffer'],'%stwitch.py: exception: %s' % (weechat.prefix('error'),e))
return weechat.WEECHAT_RC_OK
currentbuf = weechat.current_buffer()
pcolor = weechat.color('chat_prefix_network')
ccolor = weechat.color('chat')
dcolor = weechat.color('chat_delimiters')
ncolor = weechat.color('chat_nick')
ul = weechat.color("underline")
rul = weechat.color("-underline")
pformat = weechat.config_string(
weechat.config_get("weechat.look.prefix_network"))
if 'total' in jsonDict:
uid = command.split('=')[-1]
name = 'WHOIS'
if 'to_id' in command:
followers = jsonDict['total']
if uid in uid_cache:
name = uid_cache[uid]
output = '%s%s %s[%s%s%s]%s %sFollowers%s: %s' % (
pcolor, pformat, dcolor, ncolor, name, dcolor, ccolor, ul, rul, followers)
weechat.prnt(data, makeutf8(output))
url = 'https://api.twitch.tv/helix/users/follows?from_id=' + uid
url_hook = weechat.hook_process_hashtable(
"url:" + url, curlopt, 7 * 1000, "channel_api", data)
return weechat.WEECHAT_RC_OK
if 'from_id' in command:
following = jsonDict['total']
if uid in uid_cache:
name = uid_cache[uid]
output = '%s%s %s[%s%s%s]%s %sFollowing%s: %s' % (
pcolor, pformat, dcolor, ncolor, name, dcolor, ccolor, ul, rul, following)
weechat.prnt(data, makeutf8(output))
return weechat.WEECHAT_RC_OK
if ('users' in jsonDict) and jsonDict['users'] and len(jsonDict['users'][0]) == 8:
dname = jsonDict['users'][0]['display_name']
name = jsonDict['users'][0]['name']
create = jsonDict['users'][0]['created_at'].split('T')[0]
status = jsonDict['users'][0]['bio']
uid = jsonDict['users'][0]['_id']
uid_cache[uid] = name
output = '%s%s %s[%s%s%s]%s %sDisplay Name%s: %s' % (
pcolor, pformat, dcolor, ncolor, name, dcolor, ccolor, ul, rul, dname)
output += '\n%s%s %s[%s%s%s]%s %sAccount Created%s: %s' % (
pcolor, pformat, dcolor, ncolor, name, dcolor, ccolor, ul, rul, create)
if status:
output += '\n%s%s %s[%s%s%s]%s %sBio%s: %s' % (
pcolor, pformat, dcolor, ncolor, name, dcolor, ccolor, ul, rul, status)
weechat.prnt(data, makeutf8(output))
url = 'https://api.twitch.tv/helix/users/follows?to_id=' + uid
url_hook = weechat.hook_process_hashtable(
"url:" + url, curlopt, 7 * 1000, "channel_api", data)
else:
weechat.prnt(data, 'Error: No Such User')
return weechat.WEECHAT_RC_OK
def twitch_clearchat(data, modifier, modifier_data, string):
mp = weechat.info_get_hashtable(
'irc_message_parse', {"message": string})
server = modifier_data
user = mp['text']
channel | |
predetermined entity with its Spawner. This is supposed to be used
with things like grenades and Molotov cocktails that fly in the arc and
explode upon hitting the ground.
"""
def __init__(self, *args, spawned_item='flame', target_y=None,
explosion_sound=None, **kwargs):
super().__init__(*args, **kwargs)
self.spawned_item = spawned_item
self.target_y = target_y
self.explosion_sound = explosion_sound
self.dispatcher.register_listener(self, 'ecs_move')
def on_event(self, event):
if event.event_type == 'ecs_move' and event.event_value[0] == self.owner.id:
if not self.target_y:
self.target_y = self.owner.position.y + randint(2, 12)
if self.owner.position.y >= self.target_y:
if self.explosion_sound:
self.dispatcher.add_event(BearEvent('play_sound',
self.explosion_sound))
# TODO: remove flame sound if I add other grenades
self.dispatcher.add_event(BearEvent('play_sound',
'molotov_fire'))
self.owner.spawner.spawn(self.spawned_item,
(round(self.owner.widget.width/2),
round(self.owner.widget.height/2)))
self.owner.destructor.destroy()
def __repr__(self):
d = loads(super().__repr__())
d.update({'spawned_item': self.spawned_item,
'target_y': self.target_y})
return dumps(d)
class SoundDestructorComponent(DestructorComponent):
def __init__(self, *args, bg_sound='supercop_bg', **kwargs):
super().__init__(*args, **kwargs)
self.bg_sound = bg_sound
def destroy(self):
self.dispatcher.add_event(BearEvent('set_bg_sound', self.bg_sound))
super().destroy()
class HealthComponent(Component):
"""
A component that monitors owner's health and processes its changes.
"""
def __init__(self, *args, hitpoints=3, **kwargs):
super().__init__(*args, name='health', **kwargs)
self.dispatcher.register_listener(self, ('brut_damage', 'brut_heal'))
self.max_hitpoints = hitpoints
self._hitpoints = hitpoints
def on_event(self, event):
if event.event_type == 'brut_damage' and \
event.event_value[0] == self.owner.id:
self.hitpoints -= event.event_value[1]
elif event.event_type == 'brut_heal' and \
event.event_value[0] == self.owner.id:
self.hitpoints += event.event_value[1]
@property
def hitpoints(self):
return self._hitpoints
@hitpoints.setter
def hitpoints(self, value):
if not isinstance(value, int):
raise BearECSException(f'Attempting to set hitpoints of {self.owner.id} to non-integer {value}')
self._hitpoints = value
if self._hitpoints < 0:
self._hitpoints = 0
if self._hitpoints > self.max_hitpoints:
self._hitpoints = self.max_hitpoints
self.process_hitpoint_update()
def process_hitpoint_update(self):
"""
:return:
"""
raise NotImplementedError('HP update processing should be overridden')
def __repr__(self):
return dumps({'class': self.__class__.__name__,
'hitpoints': self.hitpoints})
class SwitchHealthComponent(HealthComponent):
"""
A health component for various switches, triggers and so on.
When damaged (ie collided into by a combat projectile), switches its state
and orders the widget to change. Does not work as an actual damageable
item.
This logic is not implemented in CollisionComponent because the item needs
that one for correct interaction with walkers and such. OTOH, I don't see
any need for destructible switches, so they aren't likely to need a
regular HealthComponent.
Expects owner's widget component to be SwitchWidgetComponent
"""
def __init__(self, *args,
initial_state=False,
on_event_type='brut_change_config',
on_event_value=(None, None),
on_sound='switch_on',
on_widget='wall_switch_on',
off_event_type='brut_change_config',
off_event_value=(None, None),
off_sound='switch_off',
off_widget='wall_switch_off',
**kwargs):
super().__init__(*args, **kwargs)
if not isinstance(initial_state, bool):
raise BearException(f'{type(initial_state)} used as initial_state for SwitchHealthComponent instead of bool')
self.current_state = initial_state
self.on_event_type = on_event_type
self.on_event_value = on_event_value
self.on_sound = on_sound
self.on_widget = on_widget
self.off_event_type = off_event_type
self.off_event_value = off_event_value
self.off_sound = off_sound
self.off_widget = off_widget
self.dispatcher.register_listener(self, 'brut_change_config')
def on_event(self, event):
if event.event_type == 'brut_damage' and \
event.event_value[0] == self.owner.id:
return self.trigger()
elif event.event_type == 'brut_change_config' and \
event.event_value[0] == self.on_event_value[0]:
# Assumes the event_value is always (str, bool). It not necessarily
# is, but that's the kind of option switches are for.
if event.event_value[1]:
self.switch_on()
else:
self.switch_off()
def trigger(self):
if self.current_state:
return self.switch_off()
else:
return self.switch_on()
def switch_on(self):
self.current_state = True
self.owner.widget.switch_to_image(self.on_widget)
return [BearEvent(self.on_event_type, self.on_event_value),
BearEvent('play_sound', self.on_sound)]
def switch_off(self):
self.current_state = False
self.owner.widget.switch_to_image(self.off_widget)
return [BearEvent(self.off_event_type, self.off_event_value),
BearEvent('play_sound', self.off_sound)]
def __repr__(self):
d = loads(super().__repr__())
d['initial_state'] = self.current_state
d['on_event_type'] = self.on_event_type
d['on_event_value'] = self.on_event_value
d['on_sound'] = self.on_sound
d['on_widget'] = self.on_widget
d['off_event_type'] = self.off_event_type
d['off_event_value'] = self.off_event_value
d['off_sound'] = self.off_sound
d['off_widget'] = self.off_widget
return dumps(d)
class DestructorHealthComponent(HealthComponent):
"""
Destroys entity upon reaching zero HP
"""
def process_hitpoint_update(self):
if self.hitpoints == 0 and hasattr(self.owner, 'destructor'):
self.owner.destructor.destroy()
class SpawnerDestructorHealthComponent(HealthComponent):
"""
Destroys entity upon reaching zero HP and spawns an item
"""
def __init__(self, *args, spawned_item='pistol', relative_pos=(0, 0),
**kwargs):
super().__init__(*args, **kwargs)
self.spawned_item = spawned_item
self.relative_pos = relative_pos
def process_hitpoint_update(self):
if self.hitpoints == 0:
self.owner.spawner.spawn(self.spawned_item, self.relative_pos)
self.owner.destructor.destroy()
class CharacterHealthComponent(HealthComponent):
"""
Health component for characters (both playable and NPCs). Upon death,
creates a corpse and drops whatever the character had in his hands.
Expects owner to have SpawnerComponent, HandInterfaceComponent and
DestructorComponent
"""
def __init__(self, *args, corpse=None, heal_sounds=('bandage', ),
hit_sounds=None, death_sounds=None,
score=None, **kwargs):
super().__init__(*args, **kwargs)
self.corpse_type = corpse
self.hit_sounds = hit_sounds
self.heal_sounds = heal_sounds
self.death_sounds = death_sounds
self.last_hp = self.hitpoints
self.score = score
def process_hitpoint_update(self):
if 0 < self.hitpoints < self.last_hp and self.hit_sounds:
self.last_hp = self.hitpoints
self.dispatcher.add_event(BearEvent('play_sound',
choice(self.hit_sounds)))
elif 0 < self.last_hp < self.hitpoints and self.heal_sounds:
self.last_hp = self.hitpoints
self.dispatcher.add_event(BearEvent('play_sound',
choice(self.heal_sounds)))
if self.hitpoints == 0:
self.owner.spawner.spawn(self.corpse_type,
relative_pos=(0, self.owner.widget.height - 9))
# Drop non-fist items, if any
self.owner.hands.drop('right', restore_fist=True)
self.owner.hands.drop('left', restore_fist=True)
EntityTracker().entities[self.owner.hands.left_item].destructor.destroy()
EntityTracker().entities[self.owner.hands.right_item].destructor.destroy()
# Dump score ball, if any
if self.score:
self.dispatcher.add_event(BearEvent('play_sound', 'coin_drop'))
self.owner.spawner.spawn('score_pickup', (randint(-3, 6),
self.owner.widget.height-2),
score=self.score)
if self.death_sounds:
if self.owner.id == 'cop_1':
self.dispatcher.add_event(BearEvent('set_bg_sound', None))
self.dispatcher.add_event(BearEvent('play_sound',
choice(self.death_sounds)))
self.owner.destructor.destroy()
def __repr__(self):
d = loads(super().__repr__())
d['corpse'] = self.corpse_type
return dumps(d)
class VisualDamageHealthComponent(HealthComponent):
"""
A health component for non-active objects.
Tells the owner's widget to switch image upon reaching certain amounts of HP
This should be in `widgets_dict` parameter to __init__ which is a dict from
int HP to image ID. A corresponding image is shown while HP is not less than
a dict key, but less than the next one (in increasing order).
If HP reaches zero and object has a Destructor component, it is destroyed
"""
def __init__(self, *args, widgets_dict={}, hit_sounds=(), **kwargs):
super().__init__(*args, **kwargs)
self.widgets_dict = OrderedDict()
self.hit_sounds = hit_sounds
for x in sorted(widgets_dict.keys()):
# Int conversion useful when loading from JSON, where dict keys get
# converted to str due to some weird bug. Does nothing during
# normal Component creation
self.widgets_dict[int(x)] = widgets_dict[x]
def process_hitpoint_update(self):
if self.hit_sounds:
self.dispatcher.add_event(BearEvent('play_sound',
choice(self.hit_sounds)))
if self.hitpoints == 0 and hasattr(self.owner, 'destructor'):
self.owner.destructor.destroy()
for x in self.widgets_dict:
if self.hitpoints >= x:
self.owner.widget.switch_to_image(self.widgets_dict[x])
def __repr__(self):
d = loads(super().__repr__())
d['widgets_dict'] = self.widgets_dict
return dumps(d)
class PowerInteractionComponent(Component):
"""
Responsible for the interaction with energy projectiles.
The idea is that while these projectiles are harmful for the characters
(which turns eg pairs of spikes into reliable defensive tools), other
objects can react to them differently: spikes are ignoring them, healing
ray emitters are powered by them, etc. This is the component responsible
for those behaviours
"""
def __init__(self, *args, powered=False, action_cooldown=0.1, **kwargs):
super().__init__(*args, name='powered', **kwargs)
self.powered = powered
self.action_cooldown = action_cooldown
self.have_waited = 0
self.dispatcher.register_listener(self, 'tick')
def get_power(self):
if not self.powered:
# Should charge after being powered even if it had collected some
# charge before
self.have_waited = 0
self.powered = True
def take_action(self, *args, **kwargs):
raise NotImplementedError('Power interaction behaviours should be overridden')
def on_event(self, event):
if self.powered and event.event_type == 'tick':
self.have_waited += event.event_value
while self.have_waited >= self.action_cooldown:
self.take_action()
self.have_waited -= self.action_cooldown
def __repr__(self):
d = loads(super().__repr__())
d.update({'powered': self.powered,
'action_cooldown': self.action_cooldown})
return dumps(d)
class SciencePropPowerInteractionComponent(PowerInteractionComponent):
"""
A prop that does nothing useful. It just switches its widget between powered
and unpowered state.
Expects the owner's WidgetComponent to be a SwitchWidgetComponent
"""
def get_power(self):
super().get_power()
self.owner.widget.switch_to_image('powered')
def take_action(self):
self.powered = False
self.owner.widget.switch_to_image('unpowered')
self.dispatcher.add_event(BearEvent('play_sound', 'blue_machine'))
class SpikePowerInteractionComponent(PowerInteractionComponent):
"""
Power interaction component for spikes.
They spam sparks towards any entities with PowerInteractionComponent
within range
"""
def __init__(self, *args, range=40, **kwargs):
super().__init__(*args, **kwargs)
self.range = range
self.targets = {}
self.target_names = []
self.dispatcher.register_listener(self, ('ecs_add', 'ecs_destroy'))
def get_power(self):
super().get_power()
self.owner.widget.switch_to_image('powered')
def on_event(self, event):
r = super().on_event(event)
if event.event_type == 'ecs_add':
entity = EntityTracker().entities[event.event_value[0]]
if not hasattr(entity, 'powered') or entity.id in self.target_names:
return
if event.event_value[0] == self.owner.id:
# On deployment, look for nearby machines
powered = EntityTracker().filter_entities(lambda x: hasattr(x, 'powered'))
for machine in powered:
dx = self.owner.position.x - machine.position.x
dy = self.owner.position.y - machine.position.y
dist = sqrt(dx ** 2 + dy ** 2)
if dist <= self.range and machine.id != self.owner.id and \
machine.id not in self.target_names:
self.targets[machine.id] = (machine.position.pos[0],
machine.position.pos[1])
self.target_names.append(machine.id)
else:
dx = self.owner.position.x - entity.position.x
dy = self.owner.position.y - entity.position.y
dist = sqrt(dx ** 2 + dy ** 2)
if dist <= self.range and entity.id not in self.target_names:
self.targets[entity.id] = (entity.position.pos[0] + entity.widget.width // 2,
entity.position.pos[1] + entity.widget.height - 12)
self.target_names.append(entity.id)
elif event.event_type == 'ecs_destroy':
if event.event_value in self.targets:
self.target_names.remove(event.event_value)
del self.targets[event.event_value]
def take_action(self, *args, **kwargs):
if self.targets:
target = self.targets[choice(self.target_names)]
dx = target[0] - self.owner.position.x - 2
dy = target[1] - self.owner.position.y - 7
dx_sign = abs(dx) // dx if dx != 0 else 0
dy_sign = abs(dy) // dy if dy != 0 else 0
# Trivially proven from:
# 1) V**2 = vx**2 + vy | |
options.unused_without_snapshot:
warn('building --without-snapshot is no longer possible')
o['variables']['want_separate_host_toolset'] = int(cross_compiling)
if options.without_node_snapshot or options.node_builtin_modules_path:
o['variables']['node_use_node_snapshot'] = 'false'
else:
o['variables']['node_use_node_snapshot'] = b(
not cross_compiling and not options.shared)
if options.without_node_code_cache or options.node_builtin_modules_path:
o['variables']['node_use_node_code_cache'] = 'false'
else:
# TODO(refack): fix this when implementing embedded code-cache when cross-compiling.
o['variables']['node_use_node_code_cache'] = b(
not cross_compiling and not options.shared)
if target_arch == 'arm':
configure_arm(o,options)
elif target_arch in ('mips', 'mipsel', 'mips64el'):
configure_mips(o, target_arch,options)
if flavor == 'aix':
o['variables']['node_target_type'] = 'static_library'
if flavor != 'linux' and (options.enable_pgo_generate or options.enable_pgo_use):
raise Exception(
'The pgo option is supported only on linux.')
if flavor == 'linux':
if options.enable_pgo_generate or options.enable_pgo_use:
version_checked = (5, 4, 1)
if not gcc_version_ge(version_checked):
version_checked_str = ".".join(map(str, version_checked))
raise Exception(
'The options --enable-pgo-generate and --enable-pgo-use '
'are supported for gcc and gxx %s or newer only.' % (version_checked_str))
if options.enable_pgo_generate and options.enable_pgo_use:
raise Exception(
'Only one of the --enable-pgo-generate or --enable-pgo-use options '
'can be specified at a time. You would like to use '
'--enable-pgo-generate first, profile node, and then recompile '
'with --enable-pgo-use')
o['variables']['enable_pgo_generate'] = b(options.enable_pgo_generate)
o['variables']['enable_pgo_use'] = b(options.enable_pgo_use)
if flavor != 'linux' and (options.enable_lto):
raise Exception(
'The lto option is supported only on linux.')
if flavor == 'linux':
if options.enable_lto:
version_checked = (5, 4, 1)
if not gcc_version_ge(version_checked):
version_checked_str = ".".join(map(str, version_checked))
raise Exception(
'The option --enable-lto is supported for gcc and gxx %s'
' or newer only.' % (version_checked_str))
o['variables']['enable_lto'] = b(options.enable_lto)
if flavor in ('solaris', 'mac', 'linux', 'freebsd'):
use_dtrace = not options.without_dtrace
# Don't enable by default on linux and freebsd
if flavor in ('linux', 'freebsd'):
use_dtrace = options.with_dtrace
if flavor == 'linux':
if options.systemtap_includes:
o['include_dirs'] += [options.systemtap_includes]
o['variables']['node_use_dtrace'] = b(use_dtrace)
elif options.with_dtrace:
raise Exception(
'DTrace is currently only supported on SunOS, MacOS or Linux systems.')
else:
o['variables']['node_use_dtrace'] = 'false'
if options.node_use_large_pages or options.node_use_large_pages_script_lld:
warn('''The `--use-largepages` and `--use-largepages-script-lld` options
have no effect during build time. Support for mapping to large pages is
now a runtime option of Node.js. Run `node --use-largepages` or add
`--use-largepages` to the `NODE_OPTIONS` environment variable once
Node.js is built to enable mapping to large pages.''')
if options.no_ifaddrs:
o['defines'] += ['SUNOS_NO_IFADDRS']
# By default, enable ETW on Windows.
if flavor == 'win':
o['variables']['node_use_etw'] = b(not options.without_etw)
elif options.with_etw:
raise Exception('ETW is only supported on Windows.')
else:
o['variables']['node_use_etw'] = 'false'
o['variables']['node_with_ltcg'] = b(options.with_ltcg)
if flavor != 'win' and options.with_ltcg:
raise Exception('Link Time Code Generation is only supported on Windows.')
if options.tag:
o['variables']['node_tag'] = '-' + options.tag
else:
o['variables']['node_tag'] = ''
o['variables']['node_release_urlbase'] = options.release_urlbase or ''
if options.v8_options:
o['variables']['node_v8_options'] = options.v8_options.replace('"', '\\"')
if options.enable_static:
o['variables']['node_target_type'] = 'static_library'
o['variables']['node_debug_lib'] = b(options.node_debug_lib)
if options.debug_nghttp2:
o['variables']['debug_nghttp2'] = 1
else:
o['variables']['debug_nghttp2'] = 'false'
if options.experimental_quic:
if options.shared_openssl:
raise Exception('QUIC requires a modified version of OpenSSL and '
'cannot be enabled when using --shared-openssl.')
o['variables']['experimental_quic'] = 1
else:
o['variables']['experimental_quic'] = 'false'
o['variables']['node_no_browser_globals'] = b(options.no_browser_globals)
o['variables']['node_shared'] = b(options.shared)
node_module_version = getmoduleversion.get_version(node_version_h)
if options.dest_os == 'android':
shlib_suffix = 'so'
elif sys.platform == 'darwin':
shlib_suffix = '%s.dylib'
elif sys.platform.startswith('aix'):
shlib_suffix = '%s.a'
else:
shlib_suffix = 'so.%s'
if '%s' in shlib_suffix:
shlib_suffix %= node_module_version
o['variables']['node_module_version'] = int(node_module_version)
o['variables']['shlib_suffix'] = shlib_suffix
if options.linked_module:
o['variables']['library_files'] = options.linked_module
o['variables']['asan'] = int(options.enable_asan or 0)
if options.coverage:
o['variables']['coverage'] = 'true'
else:
o['variables']['coverage'] = 'false'
if options.shared:
o['variables']['node_target_type'] = 'shared_library'
elif options.enable_static:
o['variables']['node_target_type'] = 'static_library'
else:
o['variables']['node_target_type'] = 'executable'
if options.node_builtin_modules_path:
print('Warning! Loading builtin modules from disk is for development')
o['variables']['node_builtin_modules_path'] = options.node_builtin_modules_path
#####
def configure_napi(output,node_napi_h):
version = getnapibuildversion.get_napi_version(node_napi_h)
output['variables']['napi_build_version'] = version
def configure_library(options,lib, output, pkgname=None):
shared_lib = 'shared_' + lib
output['variables']['node_' + shared_lib] = b(getattr(options, shared_lib))
if getattr(options, shared_lib):
(pkg_libs, pkg_cflags, pkg_libpath, _) = pkg_config(pkgname or lib)
if options.__dict__[shared_lib + '_includes']:
output['include_dirs'] += [options.__dict__[shared_lib + '_includes']]
elif pkg_cflags:
stripped_flags = [flag.strip() for flag in pkg_cflags.split('-I')]
output['include_dirs'] += [flag for flag in stripped_flags if flag]
# libpath needs to be provided ahead libraries
if options.__dict__[shared_lib + '_libpath']:
if flavor == 'win':
if 'msvs_settings' not in output:
output['msvs_settings'] = { 'VCLinkerTool': { 'AdditionalOptions': [] } }
output['msvs_settings']['VCLinkerTool']['AdditionalOptions'] += [
'/LIBPATH:%s' % options.__dict__[shared_lib + '_libpath']]
else:
output['libraries'] += [
'-L%s' % options.__dict__[shared_lib + '_libpath']]
elif pkg_libpath:
output['libraries'] += [pkg_libpath]
default_libs = getattr(options, shared_lib + '_libname')
default_libs = ['-l{0}'.format(l) for l in default_libs.split(',')]
if default_libs:
output['libraries'] += default_libs
elif pkg_libs:
output['libraries'] += pkg_libs.split()
def configure_v8(o,options):
o['variables']['v8_enable_lite_mode'] = 1 if options.v8_lite_mode else 0
o['variables']['v8_enable_gdbjit'] = 1 if options.gdb else 0
o['variables']['v8_no_strict_aliasing'] = 1 # Work around compiler bugs.
o['variables']['v8_optimized_debug'] = 0 if options.v8_non_optimized_debug else 1
o['variables']['dcheck_always_on'] = 1 if options.v8_with_dchecks else 0
o['variables']['v8_enable_object_print'] = 1 if options.v8_enable_object_print else 0
o['variables']['v8_random_seed'] = 0 # Use a random seed for hash tables.
o['variables']['v8_promise_internal_field_count'] = 1 # Add internal field to promises for async hooks.
o['variables']['v8_use_siphash'] = 0 if options.without_siphash else 1
o['variables']['v8_enable_pointer_compression'] = 1 if options.enable_pointer_compression else 0
o['variables']['v8_enable_31bit_smis_on_64bit_arch'] = 1 if options.enable_pointer_compression else 0
o['variables']['v8_trace_maps'] = 1 if options.trace_maps else 0
o['variables']['node_use_v8_platform'] = b(not options.without_v8_platform)
o['variables']['node_use_bundled_v8'] = b(not options.without_bundled_v8)
o['variables']['force_dynamic_crt'] = 1 if options.shared else 0
o['variables']['node_enable_d8'] = b(options.enable_d8)
if options.enable_d8:
o['variables']['test_isolation_mode'] = 'noop' # Needed by d8.gyp.
if options.without_bundled_v8 and options.enable_d8:
raise Exception('--enable-d8 is incompatible with --without-bundled-v8.')
def configure_openssl(o,options):
variables = o['variables']
variables['node_use_openssl'] = b(not options.without_ssl)
variables['node_shared_openssl'] = b(options.shared_openssl)
variables['openssl_is_fips'] = b(options.openssl_is_fips)
variables['openssl_fips'] = ''
if options.openssl_no_asm:
variables['openssl_no_asm'] = 1
if options.without_ssl:
def without_ssl_error(option):
error('--without-ssl is incompatible with %s' % option)
if options.shared_openssl:
without_ssl_error('--shared-openssl')
if options.openssl_no_asm:
without_ssl_error('--openssl-no-asm')
if options.openssl_fips:
without_ssl_error('--openssl-fips')
if options.openssl_default_cipher_list:
without_ssl_error('--openssl-default-cipher-list')
if options.experimental_quic:
without_ssl_error('--experimental-quic')
return
if options.use_openssl_ca_store:
o['defines'] += ['NODE_OPENSSL_CERT_STORE']
if options.openssl_system_ca_path:
variables['openssl_system_ca_path'] = options.openssl_system_ca_path
variables['node_without_node_options'] = b(options.without_node_options)
if options.without_node_options:
o['defines'] += ['NODE_WITHOUT_NODE_OPTIONS']
if options.openssl_default_cipher_list:
variables['openssl_default_cipher_list'] = \
options.openssl_default_cipher_list
if not options.shared_openssl and not options.openssl_no_asm:
is_x86 = 'x64' in variables['target_arch'] or 'ia32' in variables['target_arch']
# blob/OpenSSL_1_1_0-stable/crypto/modes/asm/aesni-gcm-x86_64.pl#L52-L69
openssl110_asm_supported = \
('gas_version' in variables and StrictVersion(variables['gas_version']) >= StrictVersion('2.23')) or \
('xcode_version' in variables and StrictVersion(variables['xcode_version']) >= StrictVersion('5.0')) or \
('llvm_version' in variables and StrictVersion(variables['llvm_version']) >= StrictVersion('3.3')) or \
('nasm_version' in variables and StrictVersion(variables['nasm_version']) >= StrictVersion('2.10'))
if is_x86 and not openssl110_asm_supported:
error('''Did not find a new enough assembler, install one or build with
--openssl-no-asm.
Please refer to BUILDING.md''')
elif options.openssl_no_asm:
warn('''--openssl-no-asm will result in binaries that do not take advantage
of modern CPU cryptographic instructions and will therefore be slower.
Please refer to BUILDING.md''')
if options.openssl_no_asm and options.shared_openssl:
error('--openssl-no-asm is incompatible with --shared-openssl')
if options.openssl_fips or options.openssl_fips == '':
error('FIPS is not supported in this version of Node.js')
configure_library(options,'openssl', o)
def configure_static(o,options):
if options.fully_static or options.partly_static:
if flavor == 'mac':
warn("Generation of static executable will not work on OSX "
"when using the default compilation environment")
return
if options.fully_static:
o['libraries'] += ['-static']
elif options.partly_static:
o['libraries'] += ['-static-libgcc', '-static-libstdc++']
if options.enable_asan:
o['libraries'] += ['-static-libasan']
def write(filename, data,options):
print_verbose('creating %s' % filename,options)
with open(filename, 'w+') as f:
f.write(data)
def glob_to_var(dir_base, dir_sub, patch_dir):
list = []
dir_all = '%s/%s' % (dir_base, dir_sub)
files = os.walk(dir_all)
for ent in files:
(path, dirs, files) = ent
for file in files:
if file.endswith('.cpp') or file.endswith('.c') or file.endswith('.h'):
# srcfile uses "slash" as dir separator as its output is consumed by gyp
srcfile = '%s/%s' % (dir_sub, file)
if patch_dir:
patchfile = '%s/%s/%s' % (dir_base, patch_dir, file)
if os.path.isfile(patchfile):
srcfile = '%s/%s' % (patch_dir, file)
info('Using floating patch "%s" from "%s"' % (patchfile, dir_base))
list.append(srcfile)
break
return list
do_not_edit = '# Do not edit. Generated by the configure script.\n'
def configure_intl(o,options,icu_versions,icu_current_ver_dep):
auto_downloads = nodedownload.parse(options.download_list)
def icu_download(path):
depFile = icu_current_ver_dep
with open(depFile) as f:
icus = json.load(f)
# download ICU, if needed
if not os.access(options.download_path, os.W_OK):
error('''Cannot write to desired download path.
Either create it or verify permissions.''')
attemptdownload = nodedownload.candownload(auto_downloads, "icu")
for icu in icus:
url = icu['url']
(expectHash, hashAlgo, allAlgos) = nodedownload.findHash(icu)
if not expectHash:
error('''Could not find a hash to verify ICU download.
%s may be incorrect.
For the entry %s,
Expected one of these keys: %s''' % (depFile, url, ' '.join(allAlgos)))
local = url.split('/')[-1]
targetfile = os.path.join(options.download_path, local)
if not os.path.isfile(targetfile):
if attemptdownload:
nodedownload.retrievefile(url, targetfile)
else:
print('Re-using existing %s' % targetfile)
if os.path.isfile(targetfile):
print('Checking file integrity with %s:\r' % hashAlgo)
gotHash = nodedownload.checkHash(targetfile, hashAlgo)
print('%s: | |
<reponame>pcguruuu/git-git.nvaccess.org-nvda
# -*- coding: UTF-8 -*-
#NVDAObjects/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2014 NV Access Limited, <NAME>, <NAME>, <NAME>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Module that contains the base NVDA object type"""
from new import instancemethod
import time
import re
import weakref
from logHandler import log
import review
import eventHandler
from displayModel import DisplayModelTextInfo
import baseObject
import speech
import api
import textInfos.offsets
import config
import controlTypes
import appModuleHandler
import treeInterceptorHandler
import braille
import globalPluginHandler
class NVDAObjectTextInfo(textInfos.offsets.OffsetsTextInfo):
"""A default TextInfo which is used to enable text review of information about widgets that don't support text content.
The L{NVDAObject.basicText} attribute is used as the text to expose.
"""
locationText=None
def _get_unit_mouseChunk(self):
return textInfos.UNIT_STORY
def _getStoryText(self):
return self.obj.basicText
def _getStoryLength(self):
return len(self._getStoryText())
def _getTextRange(self,start,end):
text=self._getStoryText()
return text[start:end]
class InvalidNVDAObject(RuntimeError):
"""Raised by NVDAObjects during construction to inform that this object is invalid.
In this case, for the purposes of NVDA, the object should be considered non-existent.
Therefore, L{DynamicNVDAObjectType} will return C{None} if this exception is raised.
"""
class DynamicNVDAObjectType(baseObject.ScriptableObject.__class__):
_dynamicClassCache={}
def __call__(self,chooseBestAPI=True,**kwargs):
if chooseBestAPI:
APIClass=self.findBestAPIClass(kwargs)
if not APIClass: return None
else:
APIClass=self
# Instantiate the requested class.
try:
obj=APIClass.__new__(APIClass,**kwargs)
obj.APIClass=APIClass
if isinstance(obj,self):
obj.__init__(**kwargs)
except InvalidNVDAObject, e:
log.debugWarning("Invalid NVDAObject: %s" % e, stack_info=True)
return None
clsList = []
if "findOverlayClasses" in APIClass.__dict__:
obj.findOverlayClasses(clsList)
else:
clsList.append(APIClass)
# Allow app modules to choose overlay classes.
appModule=obj.appModule
# optimisation: The base implementation of chooseNVDAObjectOverlayClasses does nothing,
# so only call this method if it's been overridden.
if appModule and not hasattr(appModule.chooseNVDAObjectOverlayClasses, "_isBase"):
appModule.chooseNVDAObjectOverlayClasses(obj, clsList)
# Allow global plugins to choose overlay classes.
for plugin in globalPluginHandler.runningPlugins:
if "chooseNVDAObjectOverlayClasses" in plugin.__class__.__dict__:
plugin.chooseNVDAObjectOverlayClasses(obj, clsList)
# Determine the bases for the new class.
bases=[]
for index in xrange(len(clsList)):
# A class doesn't need to be a base if it is already implicitly included by being a superclass of a previous base.
if index==0 or not issubclass(clsList[index-1],clsList[index]):
bases.append(clsList[index])
# Construct the new class.
if len(bases) == 1:
# We only have one base, so there's no point in creating a dynamic type.
newCls=bases[0]
else:
bases=tuple(bases)
newCls=self._dynamicClassCache.get(bases,None)
if not newCls:
name="Dynamic_%s"%"".join([x.__name__ for x in clsList])
newCls=type(name,bases,{})
self._dynamicClassCache[bases]=newCls
oldMro=frozenset(obj.__class__.__mro__)
# Mutate obj into the new class.
obj.__class__=newCls
# Initialise the overlay classes.
for cls in reversed(newCls.__mro__):
if cls in oldMro:
# This class was part of the initially constructed object, so its constructor would have been called.
continue
initFunc=cls.__dict__.get("initOverlayClass")
if initFunc:
initFunc(obj)
# Bind gestures specified on the class.
try:
obj.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
# Allow app modules to make minor tweaks to the instance.
if appModule and hasattr(appModule,"event_NVDAObject_init"):
appModule.event_NVDAObject_init(obj)
return obj
@classmethod
def clearDynamicClassCache(cls):
"""Clear the dynamic class cache.
This should be called when a plugin is unloaded so that any used overlay classes in the unloaded plugin can be garbage collected.
"""
cls._dynamicClassCache.clear()
class NVDAObject(baseObject.ScriptableObject):
"""NVDA's representation of a single control/widget.
Every widget, regardless of how it is exposed by an application or the operating system, is represented by a single NVDAObject instance.
This allows NVDA to work with all widgets in a uniform way.
An NVDAObject provides information about the widget (e.g. its name, role and value),
as well as functionality to manipulate it (e.g. perform an action or set focus).
Events for the widget are handled by special event methods on the object.
Commands triggered by input from the user can also be handled by special methods called scripts.
See L{ScriptableObject} for more details.
The only attribute that absolutely must be provided is L{processID}.
However, subclasses should provide at least the L{name} and L{role} attributes in order for the object to be meaningful to the user.
Attributes such as L{parent}, L{firstChild}, L{next} and L{previous} link an instance to other NVDAObjects in the hierarchy.
In order to facilitate access to text exposed by a widget which supports text content (e.g. an editable text control),
a L{textInfos.TextInfo} should be implemented and the L{TextInfo} attribute should specify this class.
There are two main types of NVDAObject classes:
* API classes, which provide the core functionality to work with objects exposed using a particular API (e.g. MSAA/IAccessible).
* Overlay classes, which supplement the core functionality provided by an API class to handle a specific widget or type of widget.
Most developers need only be concerned with overlay classes.
The overlay classes to be used for an instance are determined using the L{findOverlayClasses} method on the API class.
An L{AppModule} can also choose overlay classes for an instance using the L{AppModule.chooseNVDAObjectOverlayClasses} method.
"""
__metaclass__=DynamicNVDAObjectType
cachePropertiesByDefault = True
#: The TextInfo class this object should use to provide access to text.
#: @type: type; L{textInfos.TextInfo}
TextInfo=NVDAObjectTextInfo
@classmethod
def findBestAPIClass(cls,kwargs,relation=None):
"""
Finds out the highest-level APIClass this object can get to given these kwargs, and updates the kwargs and returns the APIClass.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: the new APIClass
@rtype: DynamicNVDAObjectType
"""
newAPIClass=cls
if 'getPossibleAPIClasses' in newAPIClass.__dict__:
for possibleAPIClass in newAPIClass.getPossibleAPIClasses(kwargs,relation=relation):
if 'kwargsFromSuper' not in possibleAPIClass.__dict__:
log.error("possible API class %s does not implement kwargsFromSuper"%possibleAPIClass)
continue
if possibleAPIClass.kwargsFromSuper(kwargs,relation=relation):
return possibleAPIClass.findBestAPIClass(kwargs,relation=relation)
return newAPIClass if newAPIClass is not NVDAObject else None
@classmethod
def getPossibleAPIClasses(cls,kwargs,relation=None):
"""
Provides a generator which can generate all the possible API classes (in priority order) that inherit directly from the class it was called on.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: a generator
@rtype: generator
"""
import NVDAObjects.window
yield NVDAObjects.window.Window
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
"""
Finds out if this class can be instanciated from the given super kwargs.
If so it updates the kwargs to contain everything it will need to instanciate this class, and returns True.
If this class can not be instanciated, it returns False and kwargs is not touched.
@param relation: why is this class being instanciated? parent, focus, foreground etc...
@type relation: string
@param kwargs: the kwargs for constructing this class's super class.
@type kwargs: dict
@rtype: boolean
"""
raise NotImplementedError
def findOverlayClasses(self, clsList):
"""Chooses overlay classes which should be added to this object's class structure after the object has been initially instantiated.
After an NVDAObject class (normally an API-level class) is instantiated, this method is called on the instance to choose appropriate overlay classes.
This method may use properties, etc. on the instance to make this choice.
The object's class structure is then mutated to contain these classes.
L{initOverlayClass} is then called for each class which was not part of the initially instantiated object.
This process allows an NVDAObject to be dynamically created using the most appropriate NVDAObject subclass at each API level.
Classes should be listed with subclasses first. That is, subclasses should generally call super and then append their own classes to the list.
For example: Called on an IAccessible NVDAObjectThe list might contain DialogIaccessible (a subclass of IAccessible), Edit (a subclass of Window).
@param clsList: The list of classes, which will be modified by this method if appropriate.
@type clsList: list of L{NVDAObject}
"""
clsList.append(NVDAObject)
beTransparentToMouse=False #:If true then NVDA will never consider the mouse to be on this object, rather it will be on an ancestor.
@staticmethod
def objectFromPoint(x,y):
"""Retreaves an NVDAObject instance representing a control in the Operating System at the given x and y coordinates.
@param x: the x coordinate.
@type x: int
@param y: the y coordinate.
@param y: int
@return: The object at the given x and y coordinates.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation=(x,y))
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
@staticmethod
def objectWithFocus():
"""Retreaves the object representing the control currently with focus in the Operating System. This differens from NVDA's focus object as this focus object is the real focus object according to the Operating System, not according to NVDA.
@return: the object with focus.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="focus")
if not APIClass:
return None
obj=APIClass(chooseBestAPI=False,**kwargs)
if not obj:
return None
focusRedirect=obj.focusRedirect
if focusRedirect:
obj=focusRedirect
return obj
@staticmethod
def objectInForeground():
"""Retreaves the object representing the current foreground control according to the Operating System. This differes from NVDA's foreground object as this object is the real foreground object according to the Operating System, not according to NVDA.
@return: the foreground object
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="foreground")
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
def __init__(self):
super(NVDAObject,self).__init__()
self._mouseEntered=False #:True if the mouse has entered this object (for use in L{event_mouseMoved})
self.textRepresentationLineLength=None #:If an integer greater than 0 then lines of text in this object are always this long.
def _isEqual(self,other):
"""Calculates if this object is equal to another object. Used by L{NVDAObject.__eq__}.
@param other: the other object to compare with.
@type other: L{NVDAObject}
@return: True if equal, false otherwise.
@rtype: boolean
"""
return True
def __eq__(self,other):
"""Compaires the objects' memory addresses, their type, and uses L{NVDAObject._isEqual} to see if they are equal.
"""
if self is other:
return True
if type(self) is not type(other):
return False
return self._isEqual(other)
def __ne__(self,other):
"""The opposite to L{NVDAObject.__eq__}
"""
return not self.__eq__(other)
focusRedirect=None #: Another object which should be treeted as the focus if focus is ever given to this object.
def | |
#!/usr/bin/env python
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2008-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
import os
import glob
import platform
import ctypes
import ctypes.util
from ctypes import c_char_p
from ctypes import c_void_p
from ctypes import c_long
from ctypes import c_longlong
from ctypes import POINTER
from ctypes import pointer
from .drmaa2_ctypes import drmaa2_error
from .drmaa2_ctypes import drmaa2_bool
from .drmaa2_ctypes import drmaa2_capability
from .drmaa2_ctypes import drmaa2_string
from .drmaa2_ctypes import drmaa2_string_list
from .drmaa2_ctypes import drmaa2_list
from .drmaa2_ctypes import drmaa2_listtype
from .drmaa2_ctypes import drmaa2_list_entryfree
from .drmaa2_ctypes import drmaa2_dict
from .drmaa2_ctypes import drmaa2_dict_entryfree
from .drmaa2_ctypes import drmaa2_j
from .drmaa2_ctypes import drmaa2_j_list
from .drmaa2_ctypes import drmaa2_jarray
from .drmaa2_ctypes import drmaa2_jinfo
from .drmaa2_ctypes import drmaa2_jtemplate
from .drmaa2_ctypes import drmaa2_jstate
from .drmaa2_ctypes import drmaa2_r
from .drmaa2_ctypes import drmaa2_r_list
from .drmaa2_ctypes import drmaa2_rinfo
from .drmaa2_ctypes import drmaa2_rtemplate
from .drmaa2_ctypes import drmaa2_slotinfo
from .drmaa2_ctypes import drmaa2_queueinfo
from .drmaa2_ctypes import drmaa2_queueinfo_list
from .drmaa2_ctypes import drmaa2_machineinfo
from .drmaa2_ctypes import drmaa2_machineinfo_list
from .drmaa2_ctypes import drmaa2_jsession
from .drmaa2_ctypes import drmaa2_rsession
from .drmaa2_ctypes import drmaa2_msession
from .drmaa2_ctypes import drmaa2_sudo
from .drmaa2_ctypes import drmaa2_notification
from .drmaa2_ctypes import drmaa2_version
from .drmaa2_ctypes import drmaa2_time
from .drmaa2_ctypes import drmaa2_callback
from .byte_string import ByteString
from .log_manager import LogManager
from .singleton import Singleton
from .drmaa2_exceptions import Drmaa2Exception
class LibraryManager(Singleton):
"""
Singleton class for loading and keeping reference to the
underlying C library.
"""
logger = LogManager.get_instance().get_logger('LibraryManager')
__instance = None
def __init__(self):
"""
Constructor.
>>> lm = LibraryManager()
"""
if LibraryManager.__instance:
return
LibraryManager.__instance = self
self.drmaa2_library = self.__load_drmaa2_library()
def get_drmaa2_library(self):
"""
Get reference to the DRMAA2 C library.
>>> drmaa2_lib = LibraryManager.get_instance().get_drmaa2_library()
"""
return self.drmaa2_library
def to_py_string(self, ctypes_string):
py_string = ByteString(ctypes_string.value).decode()
self.drmaa2_library.drmaa2_string_free(pointer(ctypes_string))
return py_string
@classmethod
def get_drms_name(cls):
"""
Retrieve DRMS name.
:returns: DRMS name
>>> print(LibraryManager.get_drms_name())
Univa Grid Engine
"""
lm = LibraryManager.get_instance()
ctypes_string = lm.get_drmaa2_library().drmaa2_get_drms_name();
return lm.to_py_string(ctypes_string)
@classmethod
def get_drmaa_name(cls):
"""
Retrieve DRMAA name.
:returns: DRMAA name
>>> print(LibraryManager.get_drmaa_name())
Univa Grid Engine Drmaa V2
"""
lm = LibraryManager.get_instance()
ctypes_string = lm.get_drmaa2_library().drmaa2_get_drmaa_name();
return lm.to_py_string(ctypes_string)
@classmethod
def drmaa_supports(cls, capability):
"""
Check whether the library supports given capability/
:returns: True if capability is supported, false otherwise.
>>> print(LibraryManager.drmaa_supports(Capability.JT_EMAIL))
True
"""
lm = LibraryManager.get_instance()
c = capability
return lm.get_drmaa2_library().drmaa2_supports(int(c)) > 0;
@classmethod
def __load_drmaa2_library(cls):
cls.logger.debug('Loading DRMAA2 library')
libc_name = ctypes.util.find_library('c')
libc = ctypes.CDLL(libc_name)
libc.memcmp.argtypes = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)
lib_path = 'libdrmaa2.so'
drmaa2_lib = None
try:
SGE_ROOT = os.environ['SGE_ROOT']
p = os.popen(SGE_ROOT + '/util/arch')
try:
SGE_ARCH = p.read().rstrip()
finally:
p.close()
lib_dir = SGE_ROOT + '/drmaa/lib/' + SGE_ARCH
cls.logger.debug('Looking for DRMAA2 library under %s' % lib_dir)
lib_paths = glob.glob(lib_dir + '/libdrmaa2.so')
if not lib_paths:
lib_paths = glob.glob(lib_dir + '/libdrmaa2.dylib')
if len(lib_paths):
lib_path = lib_paths[0]
else:
cls.logger.warn('Could not find DRMAA2 library under %s' % lib_dir)
except KeyError:
cls.logger.debug('SGE_ROOT is not defined')
cls.logger.debug('Library path: %s' % lib_path)
try:
drmaa2_lib = ctypes.cdll.LoadLibrary(str(lib_path))
except OSError:
raise Drmaa2Exception('Could not load DRMAA2 library.')
cls.logger.debug("Initializing DRMAA2 library")
drmaa2_lib.drmaa2_string_free.restype = None
drmaa2_lib.drmaa2_string_free.argtypes = [POINTER(drmaa2_string)]
drmaa2_lib.drmaa2_list_create.restype = drmaa2_list
drmaa2_lib.drmaa2_list_create.argtypes = [drmaa2_listtype, drmaa2_list_entryfree]
drmaa2_lib.drmaa2_list_free.restype = None
drmaa2_lib.drmaa2_list_free.argtypes = [POINTER(drmaa2_list)]
drmaa2_lib.drmaa2_list_get.restype = c_void_p
drmaa2_lib.drmaa2_list_get.argtypes = [drmaa2_list, c_long]
drmaa2_lib.drmaa2_list_add.restype = drmaa2_error
drmaa2_lib.drmaa2_list_add.argtypes = [drmaa2_list, c_void_p]
drmaa2_lib.drmaa2_list_del.restype = drmaa2_error
drmaa2_lib.drmaa2_list_del.argtypes = [drmaa2_list, c_long]
drmaa2_lib.drmaa2_list_size.restype = c_long
drmaa2_lib.drmaa2_list_size.argtypes = [drmaa2_list]
drmaa2_lib.drmaa2_lasterror.restype = drmaa2_error
drmaa2_lib.drmaa2_lasterror.argtypes = []
drmaa2_lib.drmaa2_lasterror_text.restype = drmaa2_string
drmaa2_lib.drmaa2_lasterror_text.argtypes = []
# UGE-specific
drmaa2_lib.uge_drmaa2_list_free_root.restype = None
drmaa2_lib.uge_drmaa2_list_free_root.argtypes = [POINTER(drmaa2_list)]
drmaa2_lib.uge_drmaa2_list_set.restype = drmaa2_error
drmaa2_lib.uge_drmaa2_list_set.argtypes = [drmaa2_list, c_long, c_void_p]
drmaa2_lib.uge_vi_impl_spec_get.restype = drmaa2_dict
drmaa2_lib.uge_vi_impl_spec_get.argtypes = [POINTER(drmaa2_version)]
drmaa2_lib.drmaa2_dict_create.restype = drmaa2_dict
drmaa2_lib.drmaa2_dict_create.argtypes = [drmaa2_dict_entryfree]
drmaa2_lib.drmaa2_dict_free.restype = None
drmaa2_lib.drmaa2_dict_free.argtypes = [POINTER(drmaa2_dict)]
drmaa2_lib.drmaa2_dict_list.restype = drmaa2_string_list
drmaa2_lib.drmaa2_dict_list.argtypes = [drmaa2_dict]
drmaa2_lib.drmaa2_dict_has.restype = drmaa2_bool
drmaa2_lib.drmaa2_dict_has.argtypes = [drmaa2_dict, c_char_p]
drmaa2_lib.drmaa2_dict_get.restype = c_char_p
drmaa2_lib.drmaa2_dict_get.argtypes = [drmaa2_dict, c_char_p]
drmaa2_lib.drmaa2_dict_del.restype = drmaa2_error
drmaa2_lib.drmaa2_dict_del.argtypes = [drmaa2_dict, c_char_p]
drmaa2_lib.drmaa2_dict_set.restype = drmaa2_error
drmaa2_lib.drmaa2_dict_set.argtypes = [drmaa2_dict, c_char_p]
drmaa2_lib.drmaa2_jinfo_create.restype = POINTER(drmaa2_jinfo)
drmaa2_lib.drmaa2_jinfo_create.argtypes = []
drmaa2_lib.drmaa2_jinfo_free.restype = None
drmaa2_lib.drmaa2_jinfo_free.argtypes = [POINTER(POINTER(drmaa2_jinfo))]
drmaa2_lib.drmaa2_slotinfo_free.restype = None
drmaa2_lib.drmaa2_slotinfo_free.argtypes = [POINTER(POINTER(drmaa2_slotinfo))]
drmaa2_lib.drmaa2_rinfo_create.restype = POINTER(drmaa2_rinfo)
drmaa2_lib.drmaa2_rinfo_create.argtypes = []
drmaa2_lib.drmaa2_rinfo_free.restype = None
drmaa2_lib.drmaa2_rinfo_free.argtypes = [POINTER(POINTER(drmaa2_rinfo))]
drmaa2_lib.drmaa2_jtemplate_create.restype = POINTER(drmaa2_jtemplate)
drmaa2_lib.drmaa2_jtemplate_create.argtypes = []
drmaa2_lib.drmaa2_jtemplate_free.restype = None
drmaa2_lib.drmaa2_jtemplate_free.argtypes = [POINTER(POINTER(drmaa2_jtemplate))]
drmaa2_lib.drmaa2_rtemplate_create.restype = POINTER(drmaa2_rtemplate)
drmaa2_lib.drmaa2_rtemplate_create.argtypes = []
drmaa2_lib.drmaa2_rtemplate_free.restype = None
drmaa2_lib.drmaa2_rtemplate_free.argtypes = [POINTER(POINTER(drmaa2_rtemplate))]
drmaa2_lib.drmaa2_queueinfo_free.restype = None
drmaa2_lib.drmaa2_queueinfo_free.argtypes = [POINTER(POINTER(drmaa2_queueinfo))]
drmaa2_lib.drmaa2_machineinfo_free.restype = None
drmaa2_lib.drmaa2_machineinfo_free.argtypes = [POINTER(POINTER(drmaa2_machineinfo))]
drmaa2_lib.drmaa2_notification_free.restype = None
drmaa2_lib.drmaa2_notification_free.argtypes = [POINTER(POINTER(drmaa2_notification))]
drmaa2_lib.drmaa2_version_free.restype = None
drmaa2_lib.drmaa2_version_free.argtypes = [POINTER(POINTER(drmaa2_version))]
drmaa2_lib.drmaa2_jtemplate_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_jtemplate_impl_spec.argtypes = []
drmaa2_lib.drmaa2_jinfo_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_jinfo_impl_spec.argtypes = []
drmaa2_lib.drmaa2_rtemplate_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_rtemplate_impl_spec.argtypes = []
drmaa2_lib.drmaa2_rinfo_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_rinfo_impl_spec.argtypes = []
drmaa2_lib.drmaa2_queueinfo_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_queueinfo_impl_spec.argtypes = []
drmaa2_lib.drmaa2_machineinfo_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_machineinfo_impl_spec.argtypes = []
drmaa2_lib.drmaa2_notification_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_notification_impl_spec.argtypes = []
drmaa2_lib.drmaa2_version_impl_spec.restype = drmaa2_string_list
drmaa2_lib.drmaa2_version_impl_spec.argtypes = []
drmaa2_lib.drmaa2_get_instance_value.restype = drmaa2_string
drmaa2_lib.drmaa2_get_instance_value.argtypes = [c_void_p, c_char_p]
drmaa2_lib.drmaa2_describe_attribute.restype = drmaa2_string
drmaa2_lib.drmaa2_describe_attribute.argtypes = [c_void_p, c_char_p]
drmaa2_lib.drmaa2_set_instance_value.restype = drmaa2_error
drmaa2_lib.drmaa2_set_instance_value.argtypes = [c_void_p, c_char_p, c_char_p]
drmaa2_lib.drmaa2_jsession_free.restype = None
drmaa2_lib.drmaa2_jsession_free.argtypes = [POINTER(POINTER(drmaa2_jsession))]
drmaa2_lib.drmaa2_rsession_free.restype = None
drmaa2_lib.drmaa2_rsession_free.argtypes = [POINTER(POINTER(drmaa2_rsession))]
drmaa2_lib.drmaa2_msession_free.restype = None
drmaa2_lib.drmaa2_msession_free.argtypes = [POINTER(POINTER(drmaa2_msession))]
drmaa2_lib.drmaa2_j_free.restype = None
drmaa2_lib.drmaa2_j_free.argtypes = [POINTER(POINTER(drmaa2_j))]
drmaa2_lib.drmaa2_jarray_free.restype = None
drmaa2_lib.drmaa2_jarray_free.argtypes = [POINTER(POINTER(drmaa2_jarray))]
drmaa2_lib.drmaa2_r_free.restype = None
drmaa2_lib.drmaa2_r_free.argtypes = [POINTER(POINTER(drmaa2_r))]
drmaa2_lib.drmaa2_rsession_get_contact.restype = drmaa2_string
drmaa2_lib.drmaa2_rsession_get_contact.argtypes = [drmaa2_rsession]
drmaa2_lib.drmaa2_rsession_get_session_name.restype = drmaa2_string
drmaa2_lib.drmaa2_rsession_get_session_name.argtypes = [drmaa2_rsession]
drmaa2_lib.drmaa2_rsession_get_reservation.restype = POINTER(drmaa2_r)
drmaa2_lib.drmaa2_rsession_get_reservation.argtypes = [POINTER(drmaa2_rsession), drmaa2_string]
drmaa2_lib.drmaa2_rsession_request_reservation.restype = POINTER(drmaa2_r)
drmaa2_lib.drmaa2_rsession_request_reservation.argtypes = [POINTER(drmaa2_rsession), POINTER(drmaa2_rtemplate)]
drmaa2_lib.drmaa2_rsession_request_reservation_as.restype = POINTER(drmaa2_r)
drmaa2_lib.drmaa2_rsession_request_reservation_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_rsession),
POINTER(drmaa2_rtemplate)]
drmaa2_lib.drmaa2_rsession_get_reservations.restype = drmaa2_r_list
drmaa2_lib.drmaa2_rsession_get_reservations.argtypes = [POINTER(drmaa2_rsession)]
drmaa2_lib.drmaa2_r_get_id.restype = drmaa2_string
drmaa2_lib.drmaa2_r_get_id.argtypes = [drmaa2_r]
drmaa2_lib.drmaa2_r_get_session_name.restype = drmaa2_string
drmaa2_lib.drmaa2_r_get_session_name.argtypes = [drmaa2_r]
# drmaa2_lib.drmaa2_r_get_reservation_template.restype = POINTER(drmaa2_rtemplate)
# drmaa2_lib.drmaa2_r_get_reservation_template.argtypes = [POINTER(drmaa2_r)]
drmaa2_lib.drmaa2_r_get_rtemplate.restype = POINTER(drmaa2_rtemplate)
drmaa2_lib.drmaa2_r_get_rtemplate.argtypes = [POINTER(drmaa2_r)]
drmaa2_lib.drmaa2_r_get_info.restype = POINTER(drmaa2_rinfo)
drmaa2_lib.drmaa2_r_get_info.argtypes = [POINTER(drmaa2_r)]
drmaa2_lib.drmaa2_r_terminate.restype = drmaa2_error
drmaa2_lib.drmaa2_r_terminate.argtypes = [POINTER(drmaa2_r)]
drmaa2_lib.drmaa2_r_terminate_as.restype = drmaa2_error
drmaa2_lib.drmaa2_r_terminate_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_r)]
drmaa2_lib.drmaa2_jarray_get_id.restype = drmaa2_string
drmaa2_lib.drmaa2_jarray_get_id.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_get_jobs.restype = drmaa2_j_list
drmaa2_lib.drmaa2_jarray_get_jobs.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_get_session_name.restype = drmaa2_string
drmaa2_lib.drmaa2_jarray_get_session_name.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_get_jtemplate.restype = POINTER(drmaa2_jtemplate)
drmaa2_lib.drmaa2_jarray_get_jtemplate.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_suspend.restype = drmaa2_error
drmaa2_lib.drmaa2_jarray_suspend.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_resume.restype = drmaa2_error
drmaa2_lib.drmaa2_jarray_resume.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_hold.restype = drmaa2_error
drmaa2_lib.drmaa2_jarray_hold.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_release.restype = drmaa2_error
drmaa2_lib.drmaa2_jarray_release.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jarray_terminate.restype = drmaa2_error
drmaa2_lib.drmaa2_jarray_terminate.argtypes = [POINTER(drmaa2_jarray)]
drmaa2_lib.drmaa2_jsession_get_contact.restype = drmaa2_string
drmaa2_lib.drmaa2_jsession_get_contact.argtypes = [POINTER(drmaa2_jsession)]
drmaa2_lib.drmaa2_jsession_get_session_name.restype = drmaa2_string
drmaa2_lib.drmaa2_jsession_get_session_name.argtypes = [POINTER(drmaa2_jsession)]
drmaa2_lib.drmaa2_jsession_get_job_categories.restype = drmaa2_string_list
drmaa2_lib.drmaa2_jsession_get_job_categories.argtypes = [POINTER(drmaa2_jsession)]
drmaa2_lib.drmaa2_jsession_get_jobs.restype = drmaa2_j_list
drmaa2_lib.drmaa2_jsession_get_jobs.argtypes = [POINTER(drmaa2_jsession), POINTER(drmaa2_jinfo)]
drmaa2_lib.drmaa2_jsession_get_job_array.restype = POINTER(drmaa2_jarray)
drmaa2_lib.drmaa2_jsession_get_job_array.argtypes = [POINTER(drmaa2_jsession), drmaa2_string]
drmaa2_lib.drmaa2_jsession_run_job.restype = POINTER(drmaa2_j)
drmaa2_lib.drmaa2_jsession_run_job.argtypes = [POINTER(drmaa2_jsession), POINTER(drmaa2_jtemplate)]
drmaa2_lib.drmaa2_jsession_run_job_as.restype = POINTER(drmaa2_j)
drmaa2_lib.drmaa2_jsession_run_job_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_jsession),
POINTER(drmaa2_jtemplate)]
drmaa2_lib.drmaa2_jsession_run_bulk_jobs.restype = POINTER(drmaa2_jarray)
drmaa2_lib.drmaa2_jsession_run_bulk_jobs.argtypes = [POINTER(drmaa2_jsession), POINTER(drmaa2_jtemplate),
c_longlong, c_longlong, c_longlong, c_longlong]
drmaa2_lib.drmaa2_jsession_run_bulk_jobs_as.restype = POINTER(drmaa2_jarray)
drmaa2_lib.drmaa2_jsession_run_bulk_jobs_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_jsession),
POINTER(drmaa2_jtemplate), c_longlong, c_longlong,
c_longlong, c_longlong]
drmaa2_lib.drmaa2_jsession_wait_any_started.restype = POINTER(drmaa2_j)
drmaa2_lib.drmaa2_jsession_wait_any_started.argtypes = [POINTER(drmaa2_jsession), drmaa2_j_list, drmaa2_time]
drmaa2_lib.drmaa2_jsession_wait_any_terminated.restype = POINTER(drmaa2_j)
drmaa2_lib.drmaa2_jsession_wait_any_terminated.argtypes = [POINTER(drmaa2_jsession), drmaa2_j_list, drmaa2_time]
drmaa2_lib.drmaa2_jsession_wait_all_started.restype = drmaa2_j_list
drmaa2_lib.drmaa2_jsession_wait_all_started.argtypes = [POINTER(drmaa2_jsession), drmaa2_j_list, drmaa2_time]
drmaa2_lib.drmaa2_jsession_wait_all_terminated.restype = drmaa2_j_list
drmaa2_lib.drmaa2_jsession_wait_all_terminated.argtypes = [POINTER(drmaa2_jsession), drmaa2_j_list, drmaa2_time]
drmaa2_lib.drmaa2_j_suspend.restype = drmaa2_error
drmaa2_lib.drmaa2_j_suspend.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_suspend_as.restype = drmaa2_error
drmaa2_lib.drmaa2_j_suspend_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_resume.restype = drmaa2_error
drmaa2_lib.drmaa2_j_resume.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_resume_as.restype = drmaa2_error
drmaa2_lib.drmaa2_j_resume_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_hold.restype = drmaa2_error
drmaa2_lib.drmaa2_j_hold.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_hold_as.restype = drmaa2_error
drmaa2_lib.drmaa2_j_hold_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_release.restype = drmaa2_error
drmaa2_lib.drmaa2_j_release.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_release_as.restype = drmaa2_error
drmaa2_lib.drmaa2_j_release_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_terminate.restype = drmaa2_error
drmaa2_lib.drmaa2_j_terminate.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_terminate_forced.restype = drmaa2_error
drmaa2_lib.drmaa2_j_terminate_forced.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_terminate_as.restype = drmaa2_error
drmaa2_lib.drmaa2_j_terminate_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_j), drmaa2_bool]
drmaa2_lib.drmaa2_j_terminate_all.restype = drmaa2_error
drmaa2_lib.drmaa2_j_terminate_all.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_terminate_forced_all.restype = drmaa2_error
drmaa2_lib.drmaa2_j_terminate_forced_all.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_terminate_all_as.restype = drmaa2_error
drmaa2_lib.drmaa2_j_terminate_all_as.argtypes = [POINTER(drmaa2_sudo), POINTER(drmaa2_j), drmaa2_bool]
drmaa2_lib.drmaa2_j_reap.restype = drmaa2_error
drmaa2_lib.drmaa2_j_reap.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_get_id.restype = drmaa2_string
drmaa2_lib.drmaa2_j_get_id.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_get_jtemplate.restype = POINTER(drmaa2_jtemplate)
drmaa2_lib.drmaa2_j_get_jtemplate.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_get_state.restype = drmaa2_jstate
drmaa2_lib.drmaa2_j_get_state.argtypes = [POINTER(drmaa2_j), POINTER(drmaa2_string)]
drmaa2_lib.drmaa2_j_get_info.restype = POINTER(drmaa2_jinfo)
drmaa2_lib.drmaa2_j_get_info.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_j_wait_started.restype = drmaa2_error
drmaa2_lib.drmaa2_j_wait_started.argtypes = [POINTER(drmaa2_j), drmaa2_time]
drmaa2_lib.drmaa2_j_wait_terminated.restype = drmaa2_error
drmaa2_lib.drmaa2_j_wait_terminated.argtypes = [POINTER(drmaa2_j)]
drmaa2_lib.drmaa2_msession_get_all_reservations.restype = drmaa2_r_list
drmaa2_lib.drmaa2_msession_get_all_reservations.argtypes = [POINTER(drmaa2_msession)]
drmaa2_lib.drmaa2_msession_get_all_jobs.restype = drmaa2_j_list
drmaa2_lib.drmaa2_msession_get_all_jobs.argtypes = [POINTER(drmaa2_msession), POINTER(drmaa2_jinfo)]
drmaa2_lib.drmaa2_msession_get_all_queues.restype = drmaa2_queueinfo_list
drmaa2_lib.drmaa2_msession_get_all_queues.argtypes = [POINTER(drmaa2_msession), drmaa2_string_list]
drmaa2_lib.drmaa2_msession_get_all_machines.restype = drmaa2_machineinfo_list
drmaa2_lib.drmaa2_msession_get_all_machines.argtypes = [POINTER(drmaa2_msession), drmaa2_string_list]
drmaa2_lib.drmaa2_get_drms_name.restype = drmaa2_string
drmaa2_lib.drmaa2_get_drms_name.argtypes = []
drmaa2_lib.drmaa2_get_drms_version.restype = POINTER(drmaa2_version)
drmaa2_lib.drmaa2_get_drms_version.argtypes = []
drmaa2_lib.drmaa2_get_drmaa_name.restype = drmaa2_string
drmaa2_lib.drmaa2_get_drmaa_name.argtypes = []
drmaa2_lib.drmaa2_get_drmaa_version.restype = POINTER(drmaa2_version)
drmaa2_lib.drmaa2_get_drmaa_version.argtypes = []
drmaa2_lib.drmaa2_supports.restype = drmaa2_bool
drmaa2_lib.drmaa2_supports.argtypes = [drmaa2_capability]
drmaa2_lib.drmaa2_create_jsession.restype = POINTER(drmaa2_jsession)
drmaa2_lib.drmaa2_create_jsession.argtypes = [c_char_p, c_char_p]
drmaa2_lib.drmaa2_create_jsession_as.restype = POINTER(drmaa2_jsession)
drmaa2_lib.drmaa2_create_jsession_as.argtypes = [POINTER(drmaa2_sudo), c_char_p, c_char_p]
drmaa2_lib.drmaa2_create_rsession.restype = POINTER(drmaa2_rsession)
drmaa2_lib.drmaa2_create_rsession.argtypes = [c_char_p, c_char_p]
drmaa2_lib.drmaa2_create_rsession_as.restype = POINTER(drmaa2_rsession)
drmaa2_lib.drmaa2_create_rsession_as.argtypes = [POINTER(drmaa2_sudo), c_char_p, c_char_p]
drmaa2_lib.drmaa2_open_jsession.restype = POINTER(drmaa2_jsession)
drmaa2_lib.drmaa2_open_jsession.argtypes = [c_char_p]
drmaa2_lib.drmaa2_open_rsession.restype = POINTER(drmaa2_rsession)
drmaa2_lib.drmaa2_open_rsession.argtypes = [c_char_p]
drmaa2_lib.drmaa2_open_msession.restype = POINTER(drmaa2_msession)
drmaa2_lib.drmaa2_open_msession.argtypes = [c_char_p]
drmaa2_lib.drmaa2_close_jsession.restype = drmaa2_error
| |
<reponame>joaofbsm/upgraded-guacamole
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Feature extractor and model builder"""
from __future__ import print_function
import os
import sys
import MySQLdb
import pandas as pd
import numpy as np
from tqdm import tqdm
#==================================FUNCTIONS==================================#
def onehot_champions(match, db):
champions = pd.read_sql("SELECT id FROM Champion", db)
champions["pos"] = champions.index
champions = champions.set_index("id").to_dict()
blue_team = match[:5]
red_team = match[5:10]
blue_champions = np.zeros(len(champions["pos"]))
red_champions = np.zeros(len(champions["pos"]))
for _, player in blue_team.iterrows():
blue_champions[champions["pos"][player["championId"]]] = 1
for _, player in red_team.iterrows():
red_champions[champions["pos"][player["championId"]]] = 1
result = np.concatenate((blue_champions, red_champions))
return result
def onehot_spells(match, db):
spells = pd.read_sql("SELECT id FROM SummonerSpell "
"WHERE name='Barrier' OR name='Cleanse' "
"OR name='Exhaust' OR name='Flash' OR name='Ghost' "
"OR name='Heal' OR name='Ignite' OR name='Smite' "
"OR name='Teleport'", db)
spells["pos"] = spells.index
spells = spells.set_index("id").to_dict()
blue_team = match[:5]
red_team = match[5:10]
blue_spells = np.zeros(len(spells["pos"]))
red_spells = np.zeros(len(spells["pos"]))
for _, player in blue_team.iterrows():
blue_spells[spells["pos"][player["spell1Id"]]] += 1
blue_spells[spells["pos"][player["spell2Id"]]] += 1
for _, player in red_team.iterrows():
red_spells[spells["pos"][player["spell1Id"]]] += 1
red_spells[spells["pos"][player["spell2Id"]]] += 1
result = np.concatenate((blue_spells, red_spells))
return result
def onehot_summoner_masteries_team(match, db, cursor):
masteries = pd.read_sql("SELECT id FROM Mastery", db)
masteries["pos"] = masteries.index
masteries = masteries.set_index("id").to_dict()
get_summoner_masteries = ("SELECT M.masteryId, M.rank "
"FROM MatchParticipant P, MatchMastery M, "
"MatchDetail D, MatchPlayer PL "
"WHERE PL.summonerId = %s "
"AND P._match_id = %s "
"AND PL._participant_id = P._id "
"AND P._id = M._participant_id "
"AND P._match_id = D.matchId AND D.mapId = 11 "
"ORDER BY P._match_id, PL.summonerId")
blue_team = match[:5]
red_team = match[5:10]
blue_summoner_masteries = np.zeros(45)
red_summoner_masteries = np.zeros(45)
for _, player in blue_team.iterrows():
cursor.execute(get_summoner_masteries, (player["summonerId"],
player["matchId"]))
summoner_masteries = list(cursor)
for mastery, rank in summoner_masteries:
blue_summoner_masteries[masteries["pos"][mastery]] += rank
for _, player in red_team.iterrows():
cursor.execute(get_summoner_masteries, (player["summonerId"],
player["matchId"]))
summoner_masteries = list(cursor)
for mastery, rank in summoner_masteries:
red_summoner_masteries[masteries["pos"][mastery]] += rank
results = np.concatenate((blue_summoner_masteries, red_summoner_masteries))
return results
def dmg_types_team(match, db):
champion_dmg = pd.read_sql("SELECT _champion_id, attack, defense, magic "
"FROM ChampionInfo "
"ORDER BY _champion_id", db)
champion_dmg = champion_dmg.set_index("_champion_id").T.to_dict("list")
blue_team = match[:5]
red_team = match[5:10]
blueteam_dmg = np.zeros((3))
redteam_dmg = np.zeros((3))
for _, player in blue_team.iterrows():
blueteam_dmg += champion_dmg[player["championId"]]
for _, player in red_team.iterrows():
redteam_dmg += champion_dmg[player["championId"]]
result = np.concatenate((blueteam_dmg, redteam_dmg))
return result
def dmg_types_percent_team(match, db):
champion_dmg = pd.read_sql("SELECT _champion_id, attack, magic "
"FROM ChampionInfo "
"ORDER BY _champion_id", db)
champion_dmg = champion_dmg.set_index("_champion_id").T.to_dict("list")
blue_team = match[:5]
red_team = match[5:10]
blueteam_dmg = np.zeros((2))
redteam_dmg = np.zeros((2))
for _, player in blue_team.iterrows():
blueteam_dmg += champion_dmg[player["championId"]]
total_dmg = np.sum(blueteam_dmg)
blueteam_dmg = 100 * np.around(np.divide(blueteam_dmg, total_dmg),
decimals=5)
for _, player in red_team.iterrows():
redteam_dmg += champion_dmg[player["championId"]]
total_dmg = np.sum(redteam_dmg)
redteam_dmg = 100 * np.around(np.divide(redteam_dmg, total_dmg),
decimals=5)
result = np.concatenate((blueteam_dmg, redteam_dmg))
return result
def mastery_scores_team(match, cursor):
get_mastery_scores = ("SELECT mastery "
"FROM SummonerMasteries "
"WHERE summId = %s")
blue_team = match[:5]
red_team = match[5:10]
mastery_scores = np.zeros(2)
for _, player in blue_team.iterrows():
cursor.execute(get_mastery_scores, [player["summonerId"]])
mastery_score = list(cursor)
if not mastery_score:
return None
mastery_score = mastery_score[0][0]
mastery_scores[0] += mastery_score
for _, player in red_team.iterrows():
cursor.execute(get_mastery_scores, [player["summonerId"]])
mastery_score = list(cursor)
if not mastery_score:
return None
mastery_score = mastery_score[0][0]
mastery_scores[1] += mastery_score
return mastery_scores
def champion_masteries_team(match, cursor):
get_champion_masteries = ("SELECT mastery "
"FROM SummonerChampMasteries "
"WHERE summId = %s AND championId = %s")
blue_team = match[:5]
red_team = match[5:10]
champion_masteries = np.zeros(2)
for _, player in blue_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
champion_masteries[0] += champion_mastery
for _, player in red_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
champion_masteries[1] += champion_mastery
return champion_masteries
def champion_masteries_summoner(match, cursor):
get_champion_masteries = ("SELECT mastery "
"FROM SummonerChampMasteries "
"WHERE summId = %s AND championId = %s")
blue_team = match[:5]
red_team = match[5:10]
blue_champion_masteries = np.zeros(5)
red_champion_masteries = np.zeros(5)
i = 0
for _, player in blue_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
blue_champion_masteries[i] = champion_mastery
i += 1
i = 0
for _, player in red_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
red_champion_masteries[i] = champion_mastery
i += 1
champion_masteries = np.concatenate((blue_champion_masteries,
red_champion_masteries))
return champion_masteries
def summoner_wins_and_rate_team(match, cursor):
get_history = ("SELECT wins, losses "
"FROM SummonerHistory "
"WHERE summId = %s")
blue_team = match[:5]
red_team = match[5:10]
blue_total = np.zeros(1)
red_total = np.zeros(1)
blue_wins = np.zeros(1)
red_wins = np.zeros(1)
blue_rate = np.zeros(1)
red_rate = np.zeros(1)
for _, player in blue_team.iterrows():
cursor.execute(get_history, [player["summonerId"]])
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
blue_total += wins + losses
blue_wins += wins
# Harmonic mean
if blue_total > 0:
blue_rate = (blue_wins / (blue_total * 1.0)) * 100
for _, player in red_team.iterrows():
cursor.execute(get_history, [player["summonerId"]])
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
red_total += wins + losses
red_wins += wins
if red_total > 0:
red_rate = (red_wins / (red_total * 1.0)) * 100
result = np.concatenate((blue_rate, blue_wins, red_rate, red_wins))
return result
def champion_wins_and_rate_team(match, cursor):
get_history = ("SELECT wins, losses "
"FROM SummonerChampHistory "
"WHERE summId = %s AND championId = %s")
blue_team = match[:5]
red_team = match[5:10]
blue_total = np.zeros(1)
red_total = np.zeros(1)
blue_wins = np.zeros(1)
red_wins = np.zeros(1)
blue_rate = np.zeros(1)
red_rate = np.zeros(1)
for _, player in blue_team.iterrows():
cursor.execute(get_history, (player["summonerId"], player["championId"]))
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
blue_total += wins + losses
blue_wins += wins
if blue_total > 0:
blue_rate = (blue_wins / (blue_total * 1.0)) * 100
for _, player in red_team.iterrows():
cursor.execute(get_history, (player["summonerId"], player["championId"]))
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
red_total += wins + losses
red_wins += wins
if red_total > 0:
red_rate = (red_wins / (red_total * 1.0)) * 100
result = np.concatenate((blue_rate, blue_wins, red_rate, red_wins))
return result
def team_features_zero_to_ten(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND PA._id = PT._participant_id "
"AND PT._id = PTD._timeline_id")
blue_team = match[:5]
red_team = match[5:10]
blue_zero_to_ten = np.zeros(4)
red_zero_to_ten = np.zeros(4)
for _, player in blue_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
blue_zero_to_ten[0] += features[2]
elif features[1] == "damageTakenPerMinDeltas":
blue_zero_to_ten[1] += features[2]
elif features[1] == "goldPerMinDeltas":
blue_zero_to_ten[2] += features[2]
elif features[1] == "xpPerMinDeltas":
blue_zero_to_ten[3] += features[2]
for _, player in red_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
red_zero_to_ten[0] += features[2]
elif features[1] == "damageTakenPerMinDeltas":
red_zero_to_ten[1] += features[2]
elif features[1] == "goldPerMinDeltas":
red_zero_to_ten[2] += features[2]
elif features[1] == "xpPerMinDeltas":
red_zero_to_ten[3] += features[2]
zero_to_ten = np.concatenate((blue_zero_to_ten, red_zero_to_ten))
return zero_to_ten
def team_features_zero_to_twenty(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen, "
"PTD.tenToTwenty "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND PA._id = PT._participant_id "
"AND PT._id = PTD._timeline_id")
blue_team = match[:5]
red_team = match[5:10]
blue_zero_to_twenty = np.zeros(4)
red_zero_to_twenty = np.zeros(4)
for _, player in blue_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
blue_zero_to_twenty[0] += features[2] + features[3]
elif features[1] == "damageTakenPerMinDeltas":
blue_zero_to_twenty[1] += features[2] + features[3]
elif features[1] == "goldPerMinDeltas":
blue_zero_to_twenty[2] += features[2] + features[3]
elif features[1] == "xpPerMinDeltas":
blue_zero_to_twenty[3] += features[2] + features[3]
for _, player in red_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
red_zero_to_twenty[0] += features[2] + features[3]
elif features[1] == "damageTakenPerMinDeltas":
red_zero_to_twenty[1] += features[2] + features[3]
elif features[1] == "goldPerMinDeltas":
red_zero_to_twenty[2] += features[2] + features[3]
elif features[1] == "xpPerMinDeltas":
red_zero_to_twenty[3] += features[2] + features[3]
zero_to_twenty = np.concatenate((blue_zero_to_twenty, red_zero_to_twenty))
return zero_to_twenty
def team_features_zero_to_thirty(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen, "
"PTD.tenToTwenty, PTD.twentyToThirty "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND | |
is, the
extrapolation is considered valid for all temperatures and pressures.
It is not guaranteed that a method will work or give an accurate
prediction simply because this method considers the method valid.
Parameters
----------
T : float
Temperature at which to test the method, [K]
P : float
Pressure at which to test the method, [Pa]
method : str
Name of the method to test
Returns
-------
validity : bool
Whether or not a method is valid
'''
validity = True
if method == COOLPROP:
validity = PhaseSI('T', T, 'P', P, self.CASRN) in ['gas', 'supercritical_gas', 'supercritical', 'supercritical_liquid']
elif method in self.tabular_data:
if not self.tabular_extrapolation_permitted:
Ts, Ps, properties = self.tabular_data[method]
if T < Ts[0] or T > Ts[-1] or P < Ps[0] or P > Ps[-1]:
validity = False
else:
raise Exception('Method not valid')
return validity
LALIBERTE_MU = 'Laliberte'
MIXING_LOG_MOLAR = 'Logarithmic mixing, molar'
MIXING_LOG_MASS = 'Logarithmic mixing, mass'
SIMPLE = 'Simple'
viscosity_liquid_mixture_methods = [LALIBERTE_MU, MIXING_LOG_MOLAR, MIXING_LOG_MASS, SIMPLE]
'''Holds all mixing rules available for the :obj:`ViscosityLiquidMixture`
class, for use in iterating over them.'''
class ViscosityLiquidMixture(MixtureProperty):
'''Class for dealing with the viscosity of a liquid mixture as a
function of temperature, pressure, and composition.
Consists of one electrolyte-specific method, and logarithmic rules based
on either mole fractions of mass fractions.
Prefered method is :obj:`mixing_logarithmic <chemicals.utils.mixing_logarithmic>` with mole
fractions, or **Laliberte** if the mixture is aqueous and has electrolytes.
Parameters
----------
CASs : list[str], optional
The CAS numbers of all species in the mixture
ViscosityLiquids : list[ViscosityLiquid], optional
ViscosityLiquid objects created for all species in the mixture, [-]
MWs : list[float], optional
Molecular weights of all species in the mixture, [g/mol]
correct_pressure_pure : bool, optional
Whether to try to use the better pressure-corrected pure component
models or to use only the T-only dependent pure species models, [-]
Notes
-----
To iterate over all methods, use the list stored in
:obj:`viscosity_liquid_mixture_methods`.
**LALIBERTE_MU**:
Electrolyte model equation with coefficients; see
:obj:`thermo.electrochem.Laliberte_viscosity` for more details.
**MIXING_LOG_MOLAR**:
Logarithmic mole fraction mixing rule described in
:obj:`chemicals.utils.mixing_logarithmic`.
**MIXING_LOG_MASS**:
Logarithmic mole fraction mixing rule described in
:obj:`chemicals.utils.mixing_logarithmic`.
**SIMPLE**:
Linear mole fraction mixing rule described in
:obj:`mixing_simple <chemicals.utils.mixing_simple>`.
See Also
--------
:obj:`thermo.electrochem.Laliberte_viscosity`
References
----------
.. [1] <NAME>. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
'''
name = 'liquid viscosity'
units = 'Pa*s'
property_min = 0
'''Mimimum valid value of liquid viscosity.'''
property_max = 2E8
'''Maximum valid value of liquid viscosity. Generous limit, as
the value is that of bitumen in a Pitch drop experiment.'''
ranked_methods = [LALIBERTE_MU, MIXING_LOG_MOLAR, MIXING_LOG_MASS, SIMPLE]
pure_references = ('ViscosityLiquids',)
pure_reference_types = (ViscosityLiquid, )
def __init__(self, CASs=[], ViscosityLiquids=[], MWs=[],
correct_pressure_pure=True):
self.CASs = CASs
self.ViscosityLiquids = self.pure_objs = ViscosityLiquids
self.MWs = MWs
self._correct_pressure_pure = correct_pressure_pure
self.Tmin = None
'''Minimum temperature at which no method can calculate the
liquid viscosity under.'''
self.Tmax = None
'''Maximum temperature at which no method can calculate the
liquid viscosity above.'''
self.load_all_methods()
self.set_poly_fit_coeffs()
def load_all_methods(self):
r'''Method to initialize the object by precomputing any values which
may be used repeatedly and by retrieving mixture-specific variables.
All data are stored as attributes. This method also sets :obj:`Tmin`,
:obj:`Tmax`, and :obj:`all_methods` as a set of methods which should
work to calculate the property.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = [MIXING_LOG_MOLAR, MIXING_LOG_MASS, SIMPLE]
if len(self.CASs) > 1 and '7732-18-5' in self.CASs:
wCASs = [i for i in self.CASs if i != '7732-18-5']
Laliberte_data = electrochem.Laliberte_data
laliberte_incomplete = False
v1s, v2s, v3s, v4s, v5s, v6s = [], [], [], [], [], []
for CAS in wCASs:
if CAS in Laliberte_data.index:
dat = Laliberte_data.loc[CAS].values
if isnan(dat[12]):
laliberte_incomplete = True
break
v1s.append(float(dat[12]))
v2s.append(float(dat[13]))
v3s.append(float(dat[14]))
v4s.append(float(dat[15]))
v5s.append(float(dat[16]))
v6s.append(float(dat[17]))
else:
laliberte_incomplete = True
break
if not laliberte_incomplete:
methods.append(LALIBERTE_MU)
self.wCASs = wCASs
self.index_w = self.CASs.index('7732-18-5')
self.Laliberte_v1s = v1s
self.Laliberte_v2s = v2s
self.Laliberte_v3s = v3s
self.Laliberte_v4s = v4s
self.Laliberte_v5s = v5s
self.Laliberte_v6s = v6s
self.all_methods = all_methods = set(methods)
Tmins = [i.Tmin for i in self.ViscosityLiquids if i.Tmin]
Tmaxs = [i.Tmax for i in self.ViscosityLiquids if i.Tmax]
if Tmins:
self.Tmin = max(Tmins)
if Tmaxs:
self.Tmax = max(Tmaxs)
for m in self.ranked_methods:
if m in all_methods:
self.method = m
break
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate viscosity of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see :obj:`mixture_property <thermo.utils.MixtureProperty.mixture_property>`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid mixture, [Pa*s]
'''
if method == LALIBERTE_MU:
ws = list(ws) ; ws.pop(self.index_w)
return Laliberte_viscosity(T, ws, self.wCASs)
if self._correct_pressure_pure:
mus = []
for obj in self.ViscosityLiquids:
mu = obj.TP_dependent_property(T, P)
if mu is None:
mu = obj.T_dependent_property(T)
mus.append(mu)
else:
if self.locked:
poly_fit_data = self.poly_fit_data
Tmins, Tmaxs, coeffs = poly_fit_data[0], poly_fit_data[3], poly_fit_data[6]
mus = []
for i in range(len(zs)):
if T < Tmins[i]:
mu = (T - Tmins[i])*poly_fit_data[1][i] + poly_fit_data[2][i]
elif T > Tmaxs[i]:
mu = (T - Tmaxs[i])*poly_fit_data[4][i] + poly_fit_data[5][i]
else:
mu = 0.0
for c in coeffs[i]:
mu = mu*T + c
mus.append(exp(mu))
else:
mus = [i.T_dependent_property(T) for i in self.ViscosityLiquids]
if method == MIXING_LOG_MOLAR:
ln_mu = 0.0
for i in range(len(zs)):
ln_mu += zs[i]*log(mus[i])
return exp(ln_mu)
elif method == MIXING_LOG_MASS:
ln_mu = 0.0
for i in range(len(ws)):
ln_mu += ws[i]*log(mus[i])
return exp(ln_mu)
elif method == SIMPLE:
mu = 0.0
for i in range(len(zs)):
mu += zs[i]*mus[i]
return mu
else:
raise Exception('Method not valid')
def test_method_validity(self, T, P, zs, ws, method):
r'''Method to test the validity of a specified method for the given
conditions. If **Laliberte** is applicable, all other methods are
returned as inapplicable. Otherwise, there are no checks or strict
ranges of validity.
Parameters
----------
T : float
Temperature at which to check method validity, [K]
P : float
Pressure at which to check method validity, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Method name to use
Returns
-------
validity : bool
Whether or not a specifid method is valid
'''
if LALIBERTE_MU in self.all_methods:
# If everything is an electrolyte, accept only it as a method
if method in self.all_methods:
return method == LALIBERTE_MU
if method in self.all_methods:
return True
else:
raise ValueError('Method not valid')
BROKAW = 'Brokaw'
HERNING_ZIPPERER = 'Herning-Zipperer'
WILKE = 'Wilke'
viscosity_gas_mixture_methods = [BROKAW, HERNING_ZIPPERER, WILKE, SIMPLE]
'''Holds all mixing rules available for the :obj:`ViscosityGasMixture`
class, for use in iterating over them.'''
class ViscosityGasMixture(MixtureProperty):
'''Class for dealing with the viscosity of a gas mixture as a
function of temperature, pressure, and composition.
Consists of three gas viscosity specific mixing rules and a mole-weighted
simple mixing rule.
Prefered method is :obj:`Brokaw <chemicals.viscosity.Brokaw>`.
Parameters
----------
MWs : list[float], optional
Molecular weights of all species in the mixture, [g/mol]
molecular_diameters : list[float], optional
Lennard-Jones molecular diameters, [Angstrom]
Stockmayers : list[float], optional
Lennard-Jones depth of potential-energy minimum over k
or epsilon_k, [K]
CASs : list[str], optional
The CAS numbers of all species in the mixture
ViscosityGases : list[ViscosityGas], optional
ViscosityGas objects created for all species in the mixture, [-]
correct_pressure_pure : bool, optional
Whether to try to use the better pressure-corrected pure component
models or to use only the T-only dependent pure species models, [-]
Notes
-----
To iterate over all methods, use the list stored in
:obj:`viscosity_liquid_mixture_methods`.
**Brokaw**:
Mixing rule described in :obj:`Brokaw <chemicals.viscosity.Brokaw>`.
**Herning-Zipperer**:
Mixing rule described in | |
+ self.start)
class BoolGenerator(RandomGenerator):
"""Generate True/False at a given 'rate'.
- float rate: truth's rate, defaults to 0.5
"""
DIRS = { 'rate':float }
def __init__(self, att, params=None):
RandomGenerator.__init__(self, att, params)
self.rate = self.params.get('rate', 0.5)
assert 0.0 <= self.rate and self.rate <= 1.0, \
"%s: rate %s not in [0,1]" % (self, self.rate)
self.cleanParams(BoolGenerator.DIRS)
def genData(self):
return False if self.rate == 0.0 else \
True if self.rate == 1.0 else \
self._rand.random() < self.rate
class FloatGenerator(RandomGenerator):
"""Generate floats with various distributions.
The generation is driven by 'type' and parameters 'alpha' and 'beta'.
- str sub: subtype of random generator
- float alpha, beta: parameters
"""
DIRS = { 'float':str, 'alpha':float, 'beta':float }
def __init__(self, att, params=None):
RandomGenerator.__init__(self, att, params)
self.sub = self.params.get('float', 'uniform')
if self.sub == True or self.sub == '':
self.sub = 'uniform'
self.alpha = self.params.get('alpha', 0.0)
self.beta = self.params.get('beta', 1.0)
r, a, b, s = self._rand, self.alpha, self.beta, self.sub
# check consistency
if self.sub in [ 'exp', 'pareto' ]:
assert not 'beta' in self.params, \
"{0}: unexpected 'beta' for float generator '{1}'".\
format(self, s)
# genData() is overwritten depending on the generator subtype
self.genData = \
(lambda: r.gauss(a, b)) if s == 'gauss' else \
(lambda: r.betavariate(a, b)) if s == 'beta' else \
(lambda: r.expovariate(a)) if s == 'exp' else \
(lambda: r.gammavariate(a, b)) if s == 'gamma' else \
(lambda: r.lognormvariate(a, b)) if s == 'log' else \
(lambda: r.normalvariate(a, b)) if s == 'norm' else \
(lambda: r.paretovariate(a)) if s == 'pareto' else \
(lambda: r.uniform(a, b)) if s == 'uniform' else \
(lambda: r.vonmisesvariate(a, b)) if s == 'vonmises' else \
(lambda: r.weibullvariate(a, b)) if s == 'weibull' else \
None
assert self.genData, "%s: unexpected float generator '%s'" % (self, s)
self.cleanParams(FloatGenerator.DIRS)
from fractions import gcd
import math
class IntGenerator(RandomGenerator):
"""Generate integers, possibly mangled and offset.
- int offset: generate between offset and offset+size-1
- int shift, step, xor: mangling parameters
return offset + (shift + step * (i "^" xor)) % size
- str sub: serial, serand, uniform, power & scale
. serial: a counter
. serand: serial up to size, then random
- uniform, power and scale are random generators
- power & scale use either parameter 'alpha' or 'rate' to define
their skewness.
"""
# 60 handy primes for step mangling, about every 10,000,000
PRIMES = [ 1234567129, 1244567413, 1254567911, 1264567631, 1274567381,
1284567247, 1294567787, 1304567897, 1314568139, 1324568251,
1334568007, 1344567943, 1354567987, 1364568089, 1374568339,
1384568699, 1394567981, 1404568153, 1414568359, 1424568473,
1434567973, 1444568269, 1454567999, 1464568463, 1474568531,
1484568011, 1494568219, 1504568887, 1514568533, 1524567899,
1534568531, 1544568271, 1554568441, 1564568519, 1574568419,
1584567949, 1594568149, 1604568283, 1614568231, 1624568417,
1634568427, 1644568397, 1654568557, 1664568677, 1674568109,
1684568321, 1694568241, 1704567959, 1714568899, 1724568239,
1734567899, 1744567901, 1754567891, 1764567913, 1774567901,
1784567899, 1794567911, 1804567907, 1814567891, 1824567893 ]
DIRS = { 'sub':str, 'mangle':bool,
'size':int, 'offset':int, 'step':int, 'shift':int, 'xor':int,
'alpha':float, 'rate':float }
# constructor
def __init__(self, att, params=None):
RandomGenerator.__init__(self, att, params)
# set generator subtype depending on attribute
self.sub = self.params.get('sub')
if not self.sub:
self.sub = 'serial' if att is not None and att.isUnique() else \
'uniform'
# {'x','y'} does not work with 2.6
assert self.sub in ['serial', 'serand', 'uniform', 'power', 'scale'], \
"%s: invalid int generator '%s'" % (self, self.sub)
# set offset from different sources
# first check for explicit directives
if 'offset' in self.params:
self.offset = self.params['offset']
# then PK or FK information
elif att is not None and att.isPK and opts.offset:
self.offset = opts.offset
elif att is not None and att.FK:
fk = att.FK.getPK()
self.offset = \
fk.params.get('offset', opts.offset if opts.offset else 1)
else:
self.offset = 1
# scale & power
if 'alpha' in self.params or 'rate' in self.params:
assert self.sub in ['power', 'scale'], \
"{0}: unexpected 'alpha'/'beta' for int generator '{1}'". \
format(self, self.sub)
assert not ('alpha' in self.params and 'rate' in self.params), \
"%s: not both 'alpha' and 'rate' for '%s'" % (self, self.sub)
if 'alpha' in self.params:
self.alpha, self.rate = float(self.params['alpha']), None
elif 'rate' in self.params:
self.alpha, self.rate = None, float(self.params['rate'])
else:
self.alpha, self.rate = None, None
# set step, shift & xor...
self.shift, self.xor, self.step = 0, 0, 1
self.mangle = self.params.get('mangle', False)
self.step = self.params['step'] if 'step' in self.params else \
IntGenerator.PRIMES[random.randrange(0, \
len(IntGenerator.PRIMES))] if self.mangle else \
1
assert self.step != 0, "%s: 'step' must not be zero" % self
self.shift = self.params['shift'] if 'shift' in self.params else None
self.xor = self.params['xor'] if 'xor' in self.params else None
self.mask = 0
# set size if explicit, other will have to be set later.
if 'size' in self.params:
self.setSize(self.params['size'])
elif att is not None and att.size is not None:
self.setSize(att.size) # possibly computed from table & mult
else: # later? when??
self.size = None
self.cleanParams(IntGenerator.DIRS)
#
# generated size
#
def setSize(self, size):
assert (isinstance(size, int) or isinstance(size, long)) and size > 0, \
"%s: 'size' %s must be > 0" % (self, size)
self.size = size
# shortcut if nothing to generate...
if size <= 1:
self.shift = 0
return
# adjust step, xor, shift depending on size
if self.step != 1 and gcd(size, self.step) != 1:
# very unlikely for big primes steps
sys.stderr.write("{0}: step {1} ignored for size {2}\n".
format(self.att, self.step, size))
self.step = 1
if self.xor is None:
self.xor = random.randrange(1, 1000*size) if self.mangle else 0
if self.shift is None:
self.shift = random.randrange(0, size) if self.mangle else 0
if self.xor != 0:
# note: int.bit_length available from 2.7 & 3.1
m = 1
while m <= self.size:
m *= 2
self.mask = int(m/2) # ???
# get generator parameters, which may depend on size
if self.sub == 'power' or self.sub == 'scale':
if self.rate is not None:
assert 0.0 < self.rate and self.rate < 1.0, \
"%s: rate %s not in (0,1)" % (self, self.rate)
if self.sub == 'power':
self.alpha = - math.log(size) / math.log(self.rate)
else: # self.sub == 'scale':
self.alpha = self.rate * (size - 1.0) / (1.0 - self.rate)
elif self.alpha is None:
self.alpha = 1.0
assert self.alpha > 0, \
"%s: 'alpha' %f not > 0" % (self, self.alpha)
else: # should not get there
assert self.alpha is None, \
"%s: useless 'alpha' %s set" % (self, self.alpha)
# generate an integer
def genData(self):
assert self.size is not None and self.size > 0, \
"%s: cannot draw from empty set" % self
# update counter
self.gens += 1
# set base in 0..size-1 depending on generator type
if self.size == 1:
return self.offset
assert self.shift is not None, "%s: shift is set" % self
if self.sub == 'serial' or \
self.sub == 'serand' and self.gens - 1 < self.size:
base = (self.gens - 1) % self.size
elif self.sub == 'uniform' or self.sub == 'serand':
base = int(self._rand.randrange(0, self.size))
elif self.sub == 'power':
base = int(self.size * self._rand.random() ** self.alpha)
else: # self.sub == 'scale':
v = self._rand.random()
base = int(self.size * (v / ((1 - self.alpha ) * v + self.alpha)))
assert 0 <= base and base < self.size, \
"%s: base %d not in [0,%d)" % (self, base, self.size)
# return possibly mangled result
if self.xor != 0:
# non linear step: apply xor to the largest possible power of 2
m = self.mask
while m > 0:
if m & self.size != 0 and m & base == 0:
base = ((base ^ self.xor) & (m - 1)) | (base & ~ (m - 1))
break
m = int(m/2)
# then linear step:
return self.offset + (self.shift + self.step * base) % self.size
# ??? This could also be based on FloatGenerator? '4.2 days' is okay for pg.
class IntervalGenerator(IntGenerator):
"""Generate intervals.
- str unit: time unit for the interval, default is 's' (seconds)
"""
DIRS = { 'unit':str }
def __init__(self, att, params=None):
IntGenerator.__init__(self, att, params)
self.unit = self.params.get('unit', 's')
self.cleanParams(IntervalGenerator.DIRS)
def genData(self):
# ??? maybe it should not depend on db?
return db.intervalValue(super(IntervalGenerator, self).genData(),
self.unit)
from datetime import date, datetime, | |
deaths.
If no character is provided, the timeline of all registered characters in the server will be shown.
Characters must be registered in order to see their timelines.
- 🌟 Indicates level ups
- 💀 Indicates deaths
"""
permissions = ctx.bot_permissions
if not permissions.embed_links:
await ctx.send("Sorry, I need `Embed Links` permission for this command.")
return
if ctx.is_private:
user_servers = self.bot.get_user_guilds(ctx.author.id)
user_worlds = self.bot.get_user_worlds(ctx.author.id)
else:
user_servers = [ctx.guild]
user_worlds = [self.bot.tracked_worlds.get(ctx.guild.id)]
if user_worlds[0] is None:
await ctx.send("This server is not tracking any tibia worlds.")
return
c = userDatabase.cursor()
entries = []
author = None
author_icon = discord.Embed.Empty
count = 0
now = time.time()
per_page = 20 if ctx.long else 5
await ctx.channel.trigger_typing()
try:
if name is None:
title = "Timeline"
c.execute("SELECT name, user_id, world, char_deaths.level as level, killer, 'death' AS `type`, date, "
"vocation "
"FROM char_deaths, chars WHERE char_id = id AND char_deaths.level >= ? "
"UNION "
"SELECT name, user_id, world, char_levelups.level as level, null, 'levelup' AS `type`, date, "
"vocation "
"FROM char_levelups, chars WHERE char_id = id AND char_levelups.level >= ? "
"ORDER BY date DESC", (config.announce_threshold, config.announce_threshold))
while True:
row = c.fetchone()
if row is None:
break
user = self.bot.get_member(row["user_id"], user_servers)
if user is None:
continue
if row["world"] not in user_worlds:
continue
count += 1
row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"]))
row["user"] = user.display_name
row["voc_emoji"] = get_voc_emoji(row["vocation"])
if row["type"] == "death":
row["emoji"] = config.death_emoji
entries.append("{emoji}{voc_emoji} {name} (**@{user}**) - At level **{level}** by {killer} - "
"*{time} ago*".format(**row))
else:
row["emoji"] = config.levelup_emoji
entries.append("{emoji}{voc_emoji} {name} (**@{user}**) - Level **{level}** - *{time} ago*"
.format(**row))
if count >= 200:
break
else:
c.execute("SELECT id, name, user_id, vocation FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result is None:
await ctx.send("I don't have a character with that name registered.")
return
# If user doesn't share a server with the owner, don't display it
owner = self.bot.get_member(result["user_id"], user_servers)
if owner is None:
await ctx.send("I don't have a character with that name registered.")
return
author = owner.display_name
author_icon = owner.avatar_url
name = result["name"]
emoji = get_voc_emoji(result["vocation"])
title = f"{emoji} {name} timeline"
c.execute("SELECT level, killer, 'death' AS `type`, date "
"FROM char_deaths WHERE char_id = ? AND level >= ? "
"UNION "
"SELECT level, null, 'levelup' AS `type`, date "
"FROM char_levelups WHERE char_id = ? AND level >= ? "
"ORDER BY date DESC", (result["id"], config.announce_threshold, result["id"], config.announce_threshold))
while True:
row = c.fetchone()
if row is None:
break
count += 1
row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"]))
if row["type"] == "death":
row["emoji"] = config.death_emoji
entries.append("{emoji} At level **{level}** by {killer} - *{time} ago*"
.format(**row)
)
else:
row["emoji"] = config.levelup_emoji
entries.append("{emoji} Level **{level}** - *{time} ago*".format(**row))
if count >= 200:
break
finally:
c.close()
if count == 0:
await ctx.send("There are no registered events.")
return
pages = Pages(ctx, entries=entries, per_page=per_page)
pages.embed.title = title
pages.embed.set_author(name=author, icon_url=author_icon)
try:
await pages.paginate()
except CannotPaginate as e:
await ctx.send(e)
@timeline.command(name="user")
@checks.is_in_tracking_world()
async def timeline_user(self, ctx: NabCtx, *, name: str):
"""Shows a users's recent level ups and deaths on their characters."""
permissions = ctx.bot_permissions
if not permissions.embed_links:
await ctx.send("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
await ctx.send("You must tell me a user's name to look for his/her story.")
return
if ctx.is_private:
user_servers = self.bot.get_user_guilds(ctx.author.id)
user_worlds = self.bot.get_user_worlds(ctx.author.id)
else:
user_servers = [ctx.guild]
user_worlds = [self.bot.tracked_worlds.get(ctx.guild.id)]
if user_worlds[0] is None:
await ctx.send("This server is not tracking any tibia worlds.")
return
user = self.bot.get_member(name, user_servers)
if user is None:
await ctx.send("I don't see any users with that name.")
return
c = userDatabase.cursor()
entries = []
count = 0
now = time.time()
per_page = 20 if ctx.long else 5
await ctx.channel.trigger_typing()
try:
title = f"{user.display_name} timeline"
c.execute("SELECT name, user_id, world, char_deaths.level AS level, killer, 'death' AS `type`, date, vocation "
"FROM char_deaths, chars WHERE char_id = id AND char_deaths.level >= ? AND user_id = ? "
"UNION "
"SELECT name, user_id, world, char_levelups.level as level, null, 'levelup' AS `type`, date, vocation "
"FROM char_levelups, chars WHERE char_id = id AND char_levelups.level >= ? AND user_id = ? "
"ORDER BY date DESC", (config.announce_threshold, user.id, config.announce_threshold, user.id))
while True:
row = c.fetchone()
if row is None:
break
if row["world"] not in user_worlds:
continue
count += 1
row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"]))
row["voc_emoji"] = get_voc_emoji(row["vocation"])
if row["type"] == "death":
row["emoji"] = config.death_emoji
entries.append("{emoji}{voc_emoji} {name} - At level **{level}** by {killer} - *{time} ago*"
.format(**row)
)
else:
row["emoji"] = config.levelup_emoji
entries.append("{emoji}{voc_emoji} {name} - Level **{level}** - *{time} ago*".format(**row))
if count >= 200:
break
finally:
c.close()
if count == 0:
await ctx.send("There are no registered events.")
return
author_icon = user.avatar_url
pages = Pages(ctx, entries=entries, per_page=per_page)
pages.embed.set_author(name=title, icon_url=author_icon)
try:
await pages.paginate()
except CannotPaginate as e:
await ctx.send(e)
@commands.command(aliases=['serversave'])
async def time(self, ctx: NabCtx):
"""Displays Tibia server's time and time until server save."""
offset = get_tibia_time_zone() - get_local_timezone()
tibia_time = dt.datetime.now()+dt.timedelta(hours=offset)
server_save = tibia_time
if tibia_time.hour >= 10:
server_save += dt.timedelta(days=1)
server_save = server_save.replace(hour=10, minute=0, second=0, microsecond=0)
time_until_ss = server_save - tibia_time
hours, remainder = divmod(int(time_until_ss.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
timestrtibia = tibia_time.strftime("%H:%M")
server_save_str = '{h} hours and {m} minutes'.format(h=hours, m=minutes)
reply = "It's currently **{0}** in Tibia's servers.".format(timestrtibia)
if config.display_brasilia_time:
offsetbrasilia = get_brasilia_time_zone() - get_local_timezone()
brasilia_time = dt.datetime.now()+dt.timedelta(hours=offsetbrasilia)
timestrbrasilia = brasilia_time.strftime("%H:%M")
reply += "\n**{0}** in Brazil (Brasilia).".format(timestrbrasilia)
if config.display_sonora_time:
offsetsonora = -7 - get_local_timezone()
sonora_time = dt.datetime.now()+dt.timedelta(hours=offsetsonora)
timestrsonora = sonora_time.strftime("%H:%M")
reply += "\n**{0}** in Mexico (Sonora).".format(timestrsonora)
reply += "\nServer save is in {0}.\nRashid is in **{1}** today."\
.format(server_save_str, get_rashid_info()["city"])
await ctx.send(reply)
@commands.command(aliases=['check', 'char', 'character'])
async def whois(self, ctx: NabCtx, *, name):
"""Shows a character's or a discord user's information.
If the parameter matches a discord user, it displays a list of the characters linked to that user.
If the parameter matches a character, it will display the character's info
If the character found is registered to a discord user, it will show the owner of the character.
Users can be looked through their username, user#discriminator or their user id.
Additionally, if the character is in the highscores, their ranking will be shown.
"""
if not ctx.bot_permissions.embed_links:
await ctx.send("Sorry, I need `Embed Links` permission for this command.")
return
if ctx.is_lite:
try:
char = await get_character(name)
if char is None:
await ctx.send("I couldn't find a character with that name")
return
except NetworkError:
await ctx.send("Sorry, I couldn't fetch the character's info, maybe you should try again...")
return
embed = discord.Embed(description=self.get_char_string(char))
embed.set_author(name=char.name, url=char.url, icon_url=tibia_logo)
await ctx.send(embed=embed)
return
if name.lower() == ctx.me.display_name.lower():
await ctx.invoke(self.bot.all_commands.get('about'))
return
try:
char = await get_character(name, bot=self.bot)
except NetworkError:
await ctx.send("Sorry, I couldn't fetch the character's info, maybe you should try again...")
return
char_string = self.get_char_string(char)
user = self.bot.get_member(name, ctx.guild)
# If the user is a bot, then don't, just don't
if user is not None and user.bot:
user = None
embed = self.get_user_embed(ctx, user)
# No user or char with that name
if char is None and user is None:
await ctx.send("I don't see any user or character with that name.")
return
# We found a user
if embed is not None:
# Check if we found a char too
if char is not None:
# If it's owned by the user, we append it to the same embed.
if char.owner == int(user.id):
embed.add_field(name="Character", value=char_string, inline=False)
if char.last_login is not None:
embed.set_footer(text="Last login")
embed.timestamp = char.last_login
await ctx.send(embed=embed)
return
# Not owned by same user, we display a separate embed
else:
char_embed = discord.Embed(description=char_string)
char_embed.set_author(name=char.name, url=char.url, icon_url=tibia_logo)
if char.last_login is not None:
char_embed.set_footer(text="Last login")
char_embed.timestamp = char.last_login
await ctx.send(embed=embed)
await ctx.send(embed=char_embed)
return
else:
# Tries to display user's highest level character since there is no character match
if ctx.is_private:
display_name = '@'+user.name
user_guilds = self.bot.get_user_guilds(ctx.author.id)
user_tibia_worlds = [world for server, world in self.bot.tracked_worlds.items() if
server in [s.id for s in user_guilds]]
else:
if self.bot.tracked_worlds.get(ctx.guild.id) is None:
user_tibia_worlds = []
else:
user_tibia_worlds = [self.bot.tracked_worlds[ctx.guild.id]]
if len(user_tibia_worlds) != 0:
placeholders = ", | |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.77103e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.523777,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.906992,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.520186,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.95095,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.51773,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.90517,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.34585e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0189873,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.137304,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.140423,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.137307,
'Execution Unit/Register Files/Runtime Dynamic': 0.15941,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.331783,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.871483,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.58992,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00523994,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00523994,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00455938,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00176249,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00201719,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0170565,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0504048,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.134992,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.53452,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.458494,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.19547,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0693774,
'L2/Runtime Dynamic': 0.013658,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.52041,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.06399,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.138575,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.138575,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.17746,
'Load Store Unit/Runtime Dynamic': 2.88597,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.341702,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.683403,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.121271,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.12187,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0889396,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.768148,
'Memory Management Unit/Runtime Dynamic': 0.210809,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.4506,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.23713e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0267832,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.273002,
'Renaming Unit/Int Front End RAT/Subthreshold | |
<gh_stars>0
from google.appengine.api import users
from google.appengine.api import users as app_engine_users
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.ext.webapp.util import run_wsgi_app
from webapp2_extras import security
from webapp2_extras import routes
from webapp2_extras.routes import RedirectRoute
import google.appengine.api.app_identity as app_identity
import cgi
import datetime
import jinja2
import json
import logging
import os
import random
# Debugging. To use, start sdk via shell and add `pdb.set_trace()` to code.
import pdb
import re
import traceback
import urllib
import webapp2
import webapp2_extras.appengine.auth.models
from api import Api, PermissionDenied
from base_handler import BaseHandler
from model import User, Practice, Theme, Topic, Lesson, ResetPasswordToken
import config
import util
import view_counter
import mandrill
import locales
# Make sure this is off in production, it exposes exception messages.
debug = util.is_development()
class MetaView(type):
"""Allows code to be run before and after get and post methods.
See: http://stackoverflow.com/questions/6780907/python-wrap-class-method
"""
@staticmethod
def wrap(method):
"""Return a wrapped instance method"""
def outer(self, *args, **kwargs):
## BEFORE GET ##
# Is the user returning from a google authentication page? If so,
# examine the credentials in their cookie and attempt to log them
# in.
logging.info("MetaView Outer: ".format(self.request));
if (self.request.get('google_login') == 'true'):
if self.get_current_user():
# Don't try to log someone in if they already are, just
# clear the URL param.
refresh_url = util.set_query_parameters(
self.request.url,
google_login='',
)
self.redirect(refresh_url)
else:
# This will set up a redirect, so make sure to return
# afterwards.
self.handle_google_response()
return
## INHERITING GET HANDLER RUNS HERE ##
return_value = method(self, *args, **kwargs)
## AFTER GET ##
# nothing here... yet...
return return_value
return outer
def __new__(cls, name, bases, attrs):
"""If the class has an http GET method, wrap it."""
if 'get' in attrs:
attrs['get'] = cls.wrap(attrs['get'])
return super(MetaView, cls).__new__(cls, name, bases, attrs)
class ViewHandler(BaseHandler):
"""Superclass for page-generating handlers."""
__metaclass__ = MetaView
def get_jinja_environment(self, template_path='templates'):
return jinja2.Environment(
autoescape=True,
extensions=['jinja2.ext.autoescape'],
loader=jinja2.FileSystemLoader(template_path),
)
def write(self, template_filename, template_path='templates', **kwargs):
util.profiler.add_event("Begin ViewHandler:write")
jinja_environment = self.get_jinja_environment(template_path)
# Jinja environment filters:
@jinja2.evalcontextfilter
def jinja_json_filter(eval_context, value):
"""Seralize value as JSON and mark as safe for jinja."""
return jinja2.Markup(json.dumps(value))
jinja_environment.filters['to_json'] = jinja_json_filter
def nl2br(value):
"""Replace new lines with <br> for html view"""
return value.replace('\n', '<br>\n')
jinja_environment.filters['nl2br'] = nl2br
def format_datetime(value):
# Formats datetime as Ex: "January 9, 2015"
return '{dt:%B} {dt.day}, {dt.year}'.format(dt=value)
jinja_environment.filters['datetime'] = format_datetime
def format_ampescape(value):
return value.replace('&', '%26')
jinja_environment.filters['ampescape'] = format_ampescape
def format_filetype(value):
if value.split('/')[0] in ['application']:
if value.split('/')[1] in ['pdf']:
formatted_type = 'pdf file'
elif value.split('/')[1].find('wordprocessing') > -1:
formatted_type = 'word document'
elif value.split('/')[1].find('presentation') > -1:
formatted_type = 'presentation'
else:
formatted_type = 'document'
elif value.split('/')[0] in ['image']:
formatted_type = 'image file'
else:
formatted_type = value.split('/')[0]
return formatted_type
jinja_environment.filters['filetype'] = format_filetype
util.profiler.add_event("Begin ViewHandler:add_jinja_filters")
user = self.get_current_user()
util.profiler.add_event("Begin ViewHandler:get_current_user()")
# Only get sign in links if no user is present
if user is None:
# Sets up the google sign in link, used in modal on all pages,
# which must include a special flag to alert this handler that
# google credentials are present in the cookie. It should also
# incorporate any redirect already set in the URL.
redirect = str(self.request.get('redirect')) or self.request.url
google_redirect = util.set_query_parameters(
redirect, google_login='true')
google_login_url = app_engine_users.create_login_url(google_redirect)
else:
google_login_url = ''
util.profiler.add_event("Begin ViewHandler:get_login_redirects")
# default parameters that all views get
kwargs['user'] = user
kwargs['google_login_url'] = google_login_url
kwargs['hosting_domain'] = os.environ['HOSTING_DOMAIN']
kwargs['share_url'] = self.request.url
kwargs['google_client_id'] = config.google_client_id
kwargs['server_time'] = datetime.datetime.today().replace(microsecond=0)
util.profiler.add_event("Begin ViewHandler:set_user_params")
# Determine which Facebook app depending on environment
kwargs['localhost'] = False
if util.is_localhost():
kwargs['localhost'] = True
kwargs['facebook_app_id'] = config.facebook_app_id_test
kwargs['facebook_app_secret'] = config.facebook_app_secret_test
elif os.environ['HOSTING_DOMAIN'] == 'acceptance-dot-mindsetkit.appspot.com':
kwargs['facebook_app_id'] = config.facebook_app_id_acceptance
kwargs['facebook_app_secret'] = config.facebook_app_secret_acceptance
else:
kwargs['facebook_app_id'] = config.facebook_app_id
kwargs['facebook_app_secret'] = config.facebook_app_secret
util.profiler.add_event("Begin ViewHandler:start_fetching_themes")
# Fetch all themes and topics for navigation
courses = self.api.get('Theme')
if courses:
# Fetch all topics for courses
course_topic_ids = [id for course in courses for id in course.topics]
course_topics = self.api.get_by_id(course_topic_ids)
# Associate topics with appropriate courses
for course in courses:
course.associate_topics(course_topics)
# Special case for "Teachers" kit
if course.name == 'Growth Mindset for Teachers':
kwargs['teacher_topics'] = course.topics_list
kwargs['courses'] = courses
util.profiler.add_event("Begin ViewHandler:finish_fetching_themes")
logging.info(util.profiler)
# Try to load the requested template. If it doesn't exist, replace
# it with a 404.
try:
template = jinja_environment.get_template(template_filename)
except jinja2.exceptions.TemplateNotFound:
logging.error("TemplateNotFound: {}".format(template_filename))
return self.http_not_found()
# Render the template with data and write it to the HTTP response.
self.response.write(template.render(kwargs))
def handle_google_response(self):
"""Figure out the results of the user's interaction with google.
Attempt to login a/o register, then refresh to clear temporary url
parameters.
"""
logging.info("Handling a google login response.")
error_code = None
response = self.authenticate('google')
logging.info("Response is: {}".format(response))
if isinstance(response, User):
user = response
logging.info("User {} found, logging them in.".format(user.email))
elif (('email_exists' in response) or
(response == 'credentials_missing')):
# Provide the error code to the template so the UI can advise
# the user.
error_code = response
elif response == 'credentials_invalid':
logging.info("There's no record of this google user, registering.")
response = self.register('google')
if isinstance(response, User):
user = response
logging.info("Registered {}.".format(user.email))
else:
# This will get written into the template, and the UI can
# display an appropriate message.
error_code = response
logging.info("Error in auto-registering google user.")
# Now that google's response has been handled, refresh the
# request. This will create one of two behaviors:
# * If the user was correctly logged in a/o registered, they get
# the requested page, ready to use, no complications, no params.
# * If there was an error, an error code is available about why,
# and the url fragment/hash will trigger the login modal so a
# message can be displayed.
params = {'google_login': ''} # means remove this parameter
new_fragment = '' # means remove hash/fragment
if error_code:
logging.info("Error code: {}.".format(error_code))
params['google_login_error'] = error_code
new_fragment = 'login'
refresh_url = util.set_query_parameters(
self.request.url, new_fragment=new_fragment, **params)
self.redirect(refresh_url)
def dispatch(self):
try:
logging.info("ViewHandler.dispatch()")
# Call the overridden dispatch(), which has the effect of running
# the get() or post() etc. of the inheriting class.
BaseHandler.dispatch(self)
except Exception as error:
trace = traceback.format_exc()
# We don't want to tell the public about our exception messages.
# Just provide the exception type to the client, but log the full
# details on the server.
logging.error("{}\n{}".format(error, trace))
response = {
'success': False,
'message': error.__class__.__name__,
}
if debug:
self.response.write('<pre>{}</pre>'.format(
traceback.format_exc()))
else:
self.response.write("We are having technical difficulties.")
return
def http_not_found(self, **kwargs):
"""Respond with a 404.
Example use:
class Foo(ViewHandler):
def get(self):
return self.http_not_found()
"""
# default parameters that all views get
user = self.get_current_user()
# Sets up the google sign in link, used in modal on all pages, which
# must include a special flag to alert this handler that google
# credentials are present in the cookie. It should also incorporate any
# redirect already set in the URL.
redirect = str(self.request.get('redirect')) or self.request.url
google_redirect = util.set_query_parameters(
redirect, google_login='true')
google_login_url = app_engine_users.create_login_url(google_redirect)
kwargs['user'] = user
kwargs['google_login_url'] = google_login_url
kwargs['hosting_domain'] = os.environ['HOSTING_DOMAIN']
kwargs['share_url'] = self.request.url
kwargs['google_client_id'] = config.google_client_id
# Determine which Facebook app depending on environment
kwargs['localhost'] = False
if util.is_localhost():
kwargs['localhost'] = True
kwargs['facebook_app_id'] = config.facebook_app_id_test
kwargs['facebook_app_secret'] = config.facebook_app_secret_test
elif os.environ['HOSTING_DOMAIN'] == 'acceptance-dot-mindsetkit.appspot.com':
kwargs['facebook_app_id'] = config.facebook_app_id_acceptance
kwargs['facebook_app_secret'] = config.facebook_app_secret_acceptance
else:
kwargs['facebook_app_id'] = config.facebook_app_id
kwargs['facebook_app_secret'] = config.facebook_app_secret
# Fetch all themes and topics for navigation
courses = self.api.get('Theme')
if courses:
# fetch topics for each theme
course_topic_ids = [id for course in courses for id in course.topics]
course_topics = self.api.get_by_id(course_topic_ids)
# associate topics with appropriate courses
for course in courses:
course.associate_topics(course_topics)
# Special case for "Teachers" kit
if course.name == 'Growth Mindset for Teachers':
kwargs['teacher_topics'] = course.topics_list
kwargs['courses'] = courses
self.error(404)
jinja_environment = self.get_jinja_environment()
template = jinja_environment.get_template('404.html')
self.response.write(template.render(kwargs))
def head(self, **kwargs):
# You're not supposed to give a message body to HEAD calls
# http://stackoverflow.com/questions/1501573/when-should-i-be-responding-to-http-head-requests-on-my-website
self.response.clear()
def options(self, **kwargs):
# OPTION Response based on ->
# http://zacstewart.com/2012/04/14/http-options-method.html
self.response.set_status(200)
self.response.headers['Allow'] = 'GET,HEAD,OPTIONS'
class Logout(ViewHandler):
"""Clears the user's session, closes connections to google."""
def get(self):
self.log_out()
redirect = self.request.get('redirect') or '/'
if util.is_localhost():
# In the SDK, it makes sense to log the current user out of Google
# entirely (otherwise admins have to click logout twice, b/c
# existing code will attempt to sign them right in again).
self.redirect(app_engine_users.create_logout_url(redirect))
else:
# In production, we don't want to sign users out | |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Noise Mechanisms.
"""
from abc import abstractmethod
from mindspore import Tensor
from mindspore.nn import Cell
from mindspore.ops import operations as P
from mindspore.ops.composite import normal
from mindspore.common.parameter import Parameter
from mindspore.common import dtype as mstype
from mindarmour.utils._check_param import check_param_type
from mindarmour.utils._check_param import check_value_positive
from mindarmour.utils._check_param import check_param_in_range
from mindarmour.utils._check_param import check_value_non_negative
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'NoiseMechanism'
class ClipMechanismsFactory:
""" Factory class of clip mechanisms"""
def __init__(self):
pass
@staticmethod
def create(mech_name, decay_policy='Linear', learning_rate=0.001,
target_unclipped_quantile=0.9, fraction_stddev=0.01, seed=0):
"""
Args:
mech_name(str): Clip noise generated strategy, support 'Gaussian' now.
decay_policy(str): Decay policy of adaptive clipping, decay_policy must
be in ['Linear', 'Geometric']. Default: Linear.
learning_rate(float): Learning rate of update norm clip. Default: 0.001.
target_unclipped_quantile(float): Target quantile of norm clip. Default: 0.9.
fraction_stddev(float): The stddev of Gaussian normal which used in
empirical_fraction, the formula is :math:`empirical fraction + N(0, fraction sstddev)`.
Default: 0.01.
seed(int): Original random seed, if seed=0 random normal will use secure
random number. IF seed!=0 random normal will generate values using
given seed. Default: 0.
Raises:
NameError: `mech_name` must be in ['Gaussian'].
Returns:
Mechanisms, class of noise generated Mechanism.
Examples:
>>> decay_policy = 'Linear'
>>> beta = Tensor(0.5, mstype.float32)
>>> norm_bound = Tensor(1.0, mstype.float32)
>>> beta_stddev = 0.01
>>> learning_rate = 0.001
>>> target_unclipped_quantile = 0.9
>>> clip_mechanism = ClipMechanismsFactory()
>>> ada_clip = clip_mechanism.create('Gaussian',
>>> decay_policy=decay_policy,
>>> learning_rate=learning_rate,
>>> target_unclipped_quantile=target_unclipped_quantile,
>>> fraction_stddev=beta_stddev)
>>> next_norm_bound = ada_clip(beta, norm_bound)
"""
if mech_name == 'Gaussian':
return AdaClippingWithGaussianRandom(decay_policy, learning_rate,
target_unclipped_quantile, fraction_stddev, seed)
raise NameError("The {} is not implement, please choose "
"['Gaussian']".format(mech_name))
class NoiseMechanismsFactory:
""" Factory class of noise mechanisms"""
def __init__(self):
pass
@staticmethod
def create(mech_name, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, noise_decay_rate=6e-6,
decay_policy=None):
"""
Args:
mech_name(str): Noise generated strategy, could be 'Gaussian' or
'AdaGaussian'. Noise would be decayed with 'AdaGaussian' mechanism
while be constant with 'Gaussian' mechanism.
norm_bound(float): Clipping bound for the l2 norm of the gradients. Default: 1.0.
initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 1.0.
seed(int): Original random seed, if seed=0 random normal will use secure
random number. IF seed!=0 random normal will generate values using
given seed. Default: 0.
noise_decay_rate(float): Hyper parameter for controlling the noise decay. Default: 6e-6.
decay_policy(str): Mechanisms parameters update policy. If decay_policy is None, no
parameters need update. Default: None.
Raises:
NameError: `mech_name` must be in ['Gaussian', 'AdaGaussian'].
Returns:
Mechanisms, class of noise generated Mechanism.
Examples:
>>> norm_bound = 1.0
>>> initial_noise_multiplier = 1.0
>>> network = LeNet5()
>>> batch_size = 32
>>> batches = 128
>>> epochs = 1
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> noise_mech = NoiseMechanismsFactory().create('Gaussian',
>>> norm_bound=norm_bound,
>>> initial_noise_multiplier=initial_noise_multiplier)
>>> clip_mech = ClipMechanismsFactory().create('Gaussian',
>>> decay_policy='Linear',
>>> learning_rate=0.001,
>>> target_unclipped_quantile=0.9,
>>> fraction_stddev=0.01)
>>> net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1,
>>> momentum=0.9)
>>> model = DPModel(micro_batches=2,
>>> clip_mech=clip_mech,
>>> norm_bound=norm_bound,
>>> noise_mech=noise_mech,
>>> network=network,
>>> loss_fn=loss,
>>> optimizer=net_opt,
>>> metrics=None)
>>> ms_ds = ds.GeneratorDataset(dataset_generator,
>>> ['data', 'label'])
>>> model.train(epochs, ms_ds, dataset_sink_mode=False)
"""
if mech_name == 'Gaussian':
return NoiseGaussianRandom(norm_bound=norm_bound,
initial_noise_multiplier=initial_noise_multiplier,
seed=seed,
decay_policy=decay_policy)
if mech_name == 'AdaGaussian':
return NoiseAdaGaussianRandom(norm_bound=norm_bound,
initial_noise_multiplier=initial_noise_multiplier,
seed=seed,
noise_decay_rate=noise_decay_rate,
decay_policy=decay_policy)
raise NameError("The {} is not implement, please choose "
"['Gaussian', 'AdaGaussian']".format(mech_name))
class _Mechanisms(Cell):
"""
Basic class of noise generated mechanism.
"""
@abstractmethod
def construct(self, gradients):
"""
Construct function.
"""
class NoiseGaussianRandom(_Mechanisms):
"""
Gaussian noise generated mechanism.
Args:
norm_bound(float): Clipping bound for the l2 norm of the gradients.
Default: 1.0.
initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 1.0.
seed(int): Original random seed, if seed=0, random normal will use secure
random number. If seed!=0, random normal will generate values using
given seed. Default: 0.
decay_policy(str): Mechanisms parameters update policy. Default: None.
Returns:
Tensor, generated noise with shape like given gradients.
Examples:
>>> gradients = Tensor([0.2, 0.9], mstype.float32)
>>> norm_bound = 0.1
>>> initial_noise_multiplier = 1.0
>>> seed = 0
>>> decay_policy = None
>>> net = NoiseGaussianRandom(norm_bound, initial_noise_multiplier, seed, decay_policy)
>>> res = net(gradients)
>>> print(res)
"""
def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, decay_policy=None):
super(NoiseGaussianRandom, self).__init__()
norm_bound = check_param_type('norm_bound', norm_bound, float)
self._norm_bound = check_value_positive('norm_bound', norm_bound)
self._norm_bound = Tensor(norm_bound, mstype.float32)
initial_noise_multiplier = check_param_type('initial_noise_multiplier', initial_noise_multiplier, float)
self._initial_noise_multiplier = check_value_positive('initial_noise_multiplier',
initial_noise_multiplier)
self._initial_noise_multiplier = Tensor(initial_noise_multiplier, mstype.float32)
self._mean = Tensor(0, mstype.float32)
if decay_policy is not None:
raise ValueError('decay_policy must be None in GaussianRandom class, but got {}.'.format(decay_policy))
self._decay_policy = decay_policy
seed = check_param_type('seed', seed, int)
self._seed = check_value_non_negative('seed', seed)
def construct(self, gradients):
"""
Generated Gaussian noise.
Args:
gradients(Tensor): The gradients.
Returns:
Tensor, generated noise with shape like given gradients.
"""
shape = P.Shape()(gradients)
stddev = P.Mul()(self._norm_bound, self._initial_noise_multiplier)
noise = normal(shape, self._mean, stddev, self._seed)
return noise
class NoiseAdaGaussianRandom(NoiseGaussianRandom):
"""
Adaptive Gaussian noise generated mechanism. Noise would be decayed with
training. Decay mode could be 'Time' mode, 'Step' mode, 'Exp' mode.
`self._noise_multiplier` will be update during the model.train, using
_MechanismsParamsUpdater.
Args:
norm_bound(float): Clipping bound for the l2 norm of the gradients.
Default: 1.0.
initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 1.0.
seed(int): Original random seed, if seed=0 random normal will use secure
random number. IF seed!=0 random normal will generate values using
given seed. Default: 0.
noise_decay_rate(float): Hyper parameter for controlling the noise decay.
Default: 6e-6.
decay_policy(str): Noise decay strategy include 'Step', 'Time', 'Exp'.
Default: 'Exp'.
Returns:
Tensor, generated noise with shape like given gradients.
Examples:
>>> gradients = Tensor([0.2, 0.9], mstype.float32)
>>> norm_bound = 1.0
>>> initial_noise_multiplier = 1.0
>>> seed = 0
>>> noise_decay_rate = 6e-6
>>> decay_policy = "Exp"
>>> net = NoiseAdaGaussianRandom(norm_bound, initial_noise_multiplier, seed, noise_decay_rate, decay_policy)
>>> res = net(gradients)
>>> print(res)
"""
def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, noise_decay_rate=6e-6, decay_policy='Exp'):
super(NoiseAdaGaussianRandom, self).__init__(norm_bound=norm_bound,
initial_noise_multiplier=initial_noise_multiplier,
seed=seed)
self._noise_multiplier = Parameter(self._initial_noise_multiplier,
name='noise_multiplier')
noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float)
check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0)
self._noise_decay_rate = Tensor(noise_decay_rate, mstype.float32)
if decay_policy not in ['Time', 'Step', 'Exp']:
raise NameError("The decay_policy must be in ['Time', 'Step', 'Exp'], but "
"get {}".format(decay_policy))
self._decay_policy = decay_policy
def construct(self, gradients):
"""
Generated Adaptive Gaussian noise.
Args:
gradients(Tensor): The gradients.
Returns:
Tensor, generated noise with shape like given gradients.
"""
shape = P.Shape()(gradients)
stddev = P.Mul()(self._norm_bound, self._noise_multiplier)
noise = normal(shape, self._mean, stddev, self._seed)
return noise
class _MechanismsParamsUpdater(Cell):
"""
Update mechanisms parameters, the parameters will refresh in train period.
Args:
decay_policy(str): Pass in by the mechanisms class, mechanisms parameters
update policy.
decay_rate(Tensor): Pass in by the mechanisms class, hyper parameter for
controlling the decay size.
cur_noise_multiplier(Parameter): Pass in by the mechanisms class,
current params value in this time.
init_noise_multiplier(Parameter):Pass in by the mechanisms class,
initial params value to be updated.
Returns:
Tuple, next params value.
"""
def __init__(self, decay_policy, decay_rate, cur_noise_multiplier, init_noise_multiplier):
super(_MechanismsParamsUpdater, self).__init__()
self._decay_policy = decay_policy
self._decay_rate = decay_rate
self._cur_noise_multiplier = cur_noise_multiplier
self._init_noise_multiplier = init_noise_multiplier
self._div = P.Div()
self._add = P.Add()
self._assign = P.Assign()
self._sub = P.Sub()
self._one = Tensor(1, mstype.float32)
self._mul = P.Mul()
self._exp = P.Exp()
def construct(self):
"""
update parameters to `self._cur_params`.
Returns:
Tuple, next step parameters value.
"""
if self._decay_policy == 'Time':
temp = self._div(self._init_noise_multiplier, self._cur_noise_multiplier)
temp = self._add(temp, self._decay_rate)
next_noise_multiplier = self._assign(self._cur_noise_multiplier,
self._div(self._init_noise_multiplier, temp))
elif self._decay_policy == 'Step':
temp = self._sub(self._one, self._decay_rate)
next_noise_multiplier = self._assign(self._cur_noise_multiplier,
self._mul(temp, self._cur_noise_multiplier))
else:
next_noise_multiplier = self._assign(self._cur_noise_multiplier,
self._div(self._cur_noise_multiplier, self._exp(self._decay_rate)))
return next_noise_multiplier
class AdaClippingWithGaussianRandom(Cell):
"""
Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm bound = norm bound -
learning rate*(beta - target unclipped quantile)`.
If `decay_policy` is 'Geometric', the update formula is :math:`norm bound =
norm bound*exp(-learning rate*(empirical fraction - target unclipped quantile))`.
where beta is the empirical fraction of samples | |
#
# This file defines the layer that talks to lldb
#
import os
import re
import sys
import lldb
import vim
from vim_ui import UI
# =================================================
# Convert some enum value to its string counterpart
# =================================================
# Shamelessly copy/pasted from lldbutil.py in the test suite
def state_type_to_str(enum):
"""Returns the stateType string given an enum."""
if enum == lldb.eStateInvalid:
return "invalid"
elif enum == lldb.eStateUnloaded:
return "unloaded"
elif enum == lldb.eStateConnected:
return "connected"
elif enum == lldb.eStateAttaching:
return "attaching"
elif enum == lldb.eStateLaunching:
return "launching"
elif enum == lldb.eStateStopped:
return "stopped"
elif enum == lldb.eStateRunning:
return "running"
elif enum == lldb.eStateStepping:
return "stepping"
elif enum == lldb.eStateCrashed:
return "crashed"
elif enum == lldb.eStateDetached:
return "detached"
elif enum == lldb.eStateExited:
return "exited"
elif enum == lldb.eStateSuspended:
return "suspended"
else:
raise Exception("Unknown StateType enum")
class StepType:
INSTRUCTION = 1
INSTRUCTION_OVER = 2
INTO = 3
OVER = 4
OUT = 5
class LLDBController(object):
""" Handles Vim and LLDB events such as commands and lldb events. """
# Timeouts (sec) for waiting on new events. Because vim is not multi-threaded, we are restricted to
# servicing LLDB events from the main UI thread. Usually, we only process events that are already
# sitting on the queue. But in some situations (when we are expecting an event as a result of some
# user interaction) we want to wait for it. The constants below set these wait period in which the
# Vim UI is "blocked". Lower numbers will make Vim more responsive, but LLDB will be delayed and higher
# numbers will mean that LLDB events are processed faster, but the Vim UI may appear less responsive at
# times.
eventDelayStep = 2
eventDelayLaunch = 1
eventDelayContinue = 1
def __init__(self):
""" Creates the LLDB SBDebugger object and initializes the UI class. """
self.target = None
self.process = None
self.load_dependent_modules = True
self.dbg = lldb.SBDebugger.Create()
self.commandInterpreter = self.dbg.GetCommandInterpreter()
self.ui = UI()
def completeCommand(self, a, l, p):
""" Returns a list of viable completions for command a with length l and cursor at p """
assert l[0] == 'L'
# Remove first 'L' character that all commands start with
l = l[1:]
# Adjust length as string has 1 less character
p = int(p) - 1
result = lldb.SBStringList()
num = self.commandInterpreter.HandleCompletion(l, p, 1, -1, result)
if num == -1:
# FIXME: insert completion character... what's a completion
# character?
pass
elif num == -2:
# FIXME: replace line with result.GetStringAtIndex(0)
pass
if result.GetSize() > 0:
results = filter(None, [result.GetStringAtIndex(x)
for x in range(result.GetSize())])
return results
else:
return []
def doStep(self, stepType):
""" Perform a step command and block the UI for eventDelayStep seconds in order to process
events on lldb's event queue.
FIXME: if the step does not complete in eventDelayStep seconds, we relinquish control to
the main thread to avoid the appearance of a "hang". If this happens, the UI will
update whenever; usually when the user moves the cursor. This is somewhat annoying.
"""
if not self.process:
sys.stderr.write("No process to step")
return
t = self.process.GetSelectedThread()
if stepType == StepType.INSTRUCTION:
t.StepInstruction(False)
if stepType == StepType.INSTRUCTION_OVER:
t.StepInstruction(True)
elif stepType == StepType.INTO:
t.StepInto()
elif stepType == StepType.OVER:
t.StepOver()
elif stepType == StepType.OUT:
t.StepOut()
self.processPendingEvents(self.eventDelayStep, True)
def doSelect(self, command, args):
""" Like doCommand, but suppress output when "select" is the first argument."""
a = args.split(' ')
return self.doCommand(command, args, "select" != a[0], True)
def doProcess(self, args):
""" Handle 'process' command. If 'launch' is requested, use doLaunch() instead
of the command interpreter to start the inferior process.
"""
a = args.split(' ')
if len(args) == 0 or (len(a) > 0 and a[0] != 'launch'):
self.doCommand("process", args)
#self.ui.update(self.target, "", self)
else:
self.doLaunch('-s' not in args, "")
def doAttach(self, process_name):
""" Handle process attach. """
error = lldb.SBError()
self.processListener = lldb.SBListener("process_event_listener")
self.target = self.dbg.CreateTarget('')
self.process = self.target.AttachToProcessWithName(
self.processListener, process_name, False, error)
if not error.Success():
sys.stderr.write("Error during attach: " + str(error))
return
self.ui.activate()
self.pid = self.process.GetProcessID()
print "Attached to %s (pid=%d)" % (process_name, self.pid)
def doDetach(self):
if self.process is not None and self.process.IsValid():
pid = self.process.GetProcessID()
state = state_type_to_str(self.process.GetState())
self.process.Detach()
self.processPendingEvents(self.eventDelayLaunch)
def doLaunch(self, stop_at_entry, args):
""" Handle process launch. """
error = lldb.SBError()
fs = self.target.GetExecutable()
exe = os.path.join(fs.GetDirectory(), fs.GetFilename())
if self.process is not None and self.process.IsValid():
pid = self.process.GetProcessID()
state = state_type_to_str(self.process.GetState())
self.process.Destroy()
launchInfo = lldb.SBLaunchInfo(args.split(' '))
self.process = self.target.Launch(launchInfo, error)
if not error.Success():
sys.stderr.write("Error during launch: " + str(error))
return
# launch succeeded, store pid and add some event listeners
self.pid = self.process.GetProcessID()
self.processListener = lldb.SBListener("process_event_listener")
self.process.GetBroadcaster().AddListener(
self.processListener, lldb.SBProcess.eBroadcastBitStateChanged)
print "Launched %s %s (pid=%d)" % (exe, args, self.pid)
if not stop_at_entry:
self.doContinue()
else:
self.processPendingEvents(self.eventDelayLaunch)
def doTarget(self, args):
""" Pass target command to interpreter, except if argument is not one of the valid options, or
is create, in which case try to create a target with the argument as the executable. For example:
target list ==> handled by interpreter
target create blah ==> custom creation of target 'blah'
target blah ==> also creates target blah
"""
target_args = [ # "create",
"delete",
"list",
"modules",
"select",
"stop-hook",
"symbols",
"variable"]
a = args.split(' ')
if len(args) == 0 or (len(a) > 0 and a[0] in target_args):
self.doCommand("target", args)
return
elif len(a) > 1 and a[0] == "create":
exe = a[1]
elif len(a) == 1 and a[0] not in target_args:
exe = a[0]
err = lldb.SBError()
self.target = self.dbg.CreateTarget(
exe, None, None, self.load_dependent_modules, err)
if not self.target:
sys.stderr.write(
"Error creating target %s. %s" %
(str(exe), str(err)))
return
self.ui.activate()
self.ui.update(self.target, "created target %s" % str(exe), self)
def doContinue(self):
""" Handle 'contiue' command.
FIXME: switch to doCommand("continue", ...) to handle -i ignore-count param.
"""
if not self.process or not self.process.IsValid():
sys.stderr.write("No process to continue")
return
self.process.Continue()
self.processPendingEvents(self.eventDelayContinue)
def doBreakpoint(self, args):
""" Handle breakpoint command with command interpreter, except if the user calls
"breakpoint" with no other args, in which case add a breakpoint at the line
under the cursor.
"""
a = args.split(' ')
if len(args) == 0:
show_output = False
# User called us with no args, so toggle the bp under cursor
cw = vim.current.window
cb = vim.current.buffer
name = cb.name
line = cw.cursor[0]
# Since the UI is responsbile for placing signs at bp locations, we have to
# ask it if there already is one or more breakpoints at (file,
# line)...
if self.ui.haveBreakpoint(name, line):
bps = self.ui.getBreakpoints(name, line)
args = "delete %s" % " ".join([str(b.GetID()) for b in bps])
self.ui.deleteBreakpoints(name, line)
else:
args = "set -f %s -l %d" % (name, line)
else:
show_output = True
self.doCommand("breakpoint", args, show_output)
return
def doRefresh(self):
""" process pending events and update UI on request """
status = self.processPendingEvents()
def doShow(self, name):
""" handle :Lshow <name> """
if not name:
self.ui.activate()
return
if self.ui.showWindow(name):
self.ui.update(self.target, "", self)
def doHide(self, name):
""" handle :Lhide <name> """
if self.ui.hideWindow(name):
self.ui.update(self.target, "", self)
def doExit(self):
self.dbg.Terminate()
self.dbg = None
def getCommandResult(self, command, command_args):
""" Run cmd in the command interpreter and returns (success, output) """
result = lldb.SBCommandReturnObject()
cmd = "%s %s" % (command, command_args)
self.commandInterpreter.HandleCommand(cmd, result)
return (result.Succeeded(), result.GetOutput()
if result.Succeeded() else result.GetError())
def doCommand(
self,
command,
command_args,
print_on_success=True,
goto_file=False):
""" Run cmd in interpreter and print result (success or failure) on the vim status line. """
(success, output) = self.getCommandResult(command, command_args)
if success:
self.ui.update(self.target, "", self, goto_file)
if len(output) > 0 and print_on_success:
print output
else:
sys.stderr.write(output)
def getCommandOutput(self, command, command_args=""):
""" runs cmd in the command interpreter andreturns (status, result) """
result = lldb.SBCommandReturnObject()
cmd = "%s %s" % (command, command_args)
self.commandInterpreter.HandleCommand(cmd, result)
return (result.Succeeded(), result.GetOutput()
if result.Succeeded() else result.GetError())
def processPendingEvents(self, wait_seconds=0, goto_file=True):
""" Handle any events that are queued from the inferior.
Blocks for at most wait_seconds, or if wait_seconds == 0,
process only events that are already queued.
"""
status = None
num_events_handled = 0
if self.process is not None:
event = lldb.SBEvent()
old_state = self.process.GetState()
new_state = None
done = False
| |
<reponame>w3c/tracecontext-spec
#!/usr/bin/env python
import os
import sys
import unittest
from client import TestClient
from server import TestServer
from tracecontext import Traceparent, Tracestate
client = None
server = None
def environ(name, default = None):
if not name in os.environ:
if default:
os.environ[name] = default
else:
raise EnvironmentError('environment variable {} is not defined'.format(name))
return os.environ[name]
STRICT_LEVEL = int(environ('STRICT_LEVEL', '2'))
print('STRICT_LEVEL: {}'.format(STRICT_LEVEL))
def setUpModule():
global client
global server
environ('SERVICE_ENDPOINT')
client = client or TestClient(host = '127.0.0.1', port = 7777, timeout = 5)
server = server or TestServer(host = '127.0.0.1', port = 7777, timeout = 3)
server.start()
with client.scope() as scope:
response = scope.send_request()
def tearDownModule():
server.stop()
class TestBase(unittest.TestCase):
import re
traceparent_name_re = re.compile(r'^traceparent$', re.IGNORECASE)
traceparent_format = r'^([0-9a-f]{2})-([0-9a-f]{32})-([0-9a-f]{16})-([0-9a-f]{2})$'
traceparent_format_re = re.compile(traceparent_format)
tracestate_name_re = re.compile(r'^tracestate$', re.IGNORECASE)
def make_request(self, headers, count = 1):
import pprint
with client.scope() as scope:
arguments = {
'url': environ('SERVICE_ENDPOINT'),
'headers': headers,
'arguments': [],
}
for idx in range(count):
arguments['arguments'].append({'url': scope.url(str(idx)), 'arguments': []})
response = scope.send_request(arguments = arguments)
verbose = ['', '']
verbose.append('Harness trying to send the following request to your service {0}'.format(arguments['url']))
verbose.append('')
verbose.append('POST {} HTTP/1.1'.format(arguments['url']))
for key, value in arguments['headers']:
verbose.append('{}: {}'.format(key, value))
verbose.append('')
verbose.append(pprint.pformat(arguments['arguments']))
verbose.append('')
results = response['results'][0]
if 'exception' in results:
verbose.append('Harness got an exception {}'.format(results['exception']))
verbose.append('')
verbose.append(results['msg'])
else:
verbose.append('Your service {} responded with HTTP status {}'.format(arguments['url'], results['status']))
verbose.append('')
for key, value in results['headers']:
verbose.append('{}: {}'.format(key, value))
verbose.append('')
if isinstance(results['body'], str):
verbose.append(results['body'])
else:
verbose.append(pprint.pformat(results['body']))
for idx in range(count):
if str(idx) in response:
verbose.append('Your service {} made the following callback to harness'.format(arguments['url']))
verbose.append('')
for key, value in response[str(idx)]['headers']:
verbose.append('{}: {}'.format(key, value))
verbose.append('')
verbose.append('')
verbose = os.linesep.join(verbose)
if 'HARNESS_DEBUG' in os.environ:
print(verbose)
result = []
for idx in range(count):
self.assertTrue(str(idx) in response, 'your test service failed to make a callback to the test harness {}'.format(verbose))
result.append(response[str(idx)])
return result
def get_traceparent(self, headers):
retval = []
for key, value in headers:
if self.traceparent_name_re.match(key):
retval.append((key, value))
self.assertEqual(len(retval), 1, 'expect one traceparent header, got {} {!r}'.format('more' if retval else 'zero', retval))
return Traceparent.from_string(retval[0][1])
def get_tracestate(self, headers):
tracestate = Tracestate()
for key, value in headers:
if self.tracestate_name_re.match(key):
tracestate.from_string(value)
return tracestate
def make_single_request_and_get_tracecontext(self, headers):
headers = self.make_request(headers)[0]['headers']
return (self.get_traceparent(headers), self.get_tracestate(headers))
class TraceContextTest(TestBase):
def test_both_traceparent_and_tracestate_missing(self):
'''
harness sends a request without traceparent or tracestate
expects a valid traceparent from the output header
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([])
def test_traceparent_included_tracestate_missing(self):
'''
harness sends a request with traceparent but without tracestate
expects a valid traceparent from the output header, with the same trace_id but different parent_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertNotEqual(traceparent.parent_id.hex(), '1234567890123456')
def test_traceparent_duplicated(self):
'''
harness sends a request with two traceparent headers
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789011-1234567890123456-01'],
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789011')
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_header_name(self):
'''
harness sends an invalid traceparent using wrong names
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['trace-parent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['trace.parent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_header_name_valid_casing(self):
'''
harness sends a valid traceparent using different combination of casing
expects a valid traceparent from the output header
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['TraceParent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['TrAcEpArEnT', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['TRACEPARENT', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_0x00(self):
'''
harness sends an invalid traceparent with extra trailing characters
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01.'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01-what-the-future-will-be-like'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_0xcc(self):
'''
harness sends an valid traceparent with future version 204 (0xcc)
expects a valid traceparent from the output header with the same trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'cc-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'cc-12345678901234567890123456789012-1234567890123456-01-what-the-future-will-be-like'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'cc-12345678901234567890123456789012-1234567890123456-01.what-the-future-will-be-like'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_0xff(self):
'''
harness sends an invalid traceparent with version 255 (0xff)
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'ff-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in version
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '.0-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '0.-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_too_long(self):
'''
harness sends an invalid traceparent with version more than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '000-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '0000-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_too_short(self):
'''
harness sends an invalid traceparent with version less than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '0-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_id_all_zero(self):
'''
harness sends an invalid traceparent with trace_id = 00000000000000000000000000000000
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-00000000000000000000000000000000-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '00000000000000000000000000000000')
def test_traceparent_trace_id_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in trace_id
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-.2345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '.2345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-1234567890123456789012345678901.-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '1234567890123456789012345678901.')
def test_traceparent_trace_id_too_long(self):
'''
harness sends an invalid traceparent with trace_id more than 32 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-123456789012345678901234567890123-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '123456789012345678901234567890123')
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertNotEqual(traceparent.trace_id.hex(), '23456789012345678901234567890123')
def test_traceparent_trace_id_too_short(self):
'''
harness sends an invalid traceparent with trace_id less than 32 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-1234567890123456789012345678901-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '1234567890123456789012345678901')
def test_traceparent_parent_id_all_zero(self):
'''
harness sends an invalid traceparent with parent_id = 0000000000000000
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-0000000000000000-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_parent_id_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in parent_id
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-.234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-123456789012345.-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_parent_id_too_long(self):
'''
harness sends an invalid traceparent with parent_id more than 16 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-12345678901234567-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_parent_id_too_short(self):
'''
harness sends an invalid traceparent with parent_id less than 16 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-123456789012345-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_flags_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in trace_flags
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-.0'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-0.'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_flags_too_long(self):
'''
harness sends an invalid traceparent with trace_flags more than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-001'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_flags_too_short(self):
'''
harness sends an invalid traceparent with trace_flags less than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-1'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_ows_handling(self):
'''
harness sends an valid traceparent with heading and trailing OWS
expects a valid traceparent from the output header
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', ' 00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '\t00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01 '],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01\t'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '\t 00-12345678901234567890123456789012-1234567890123456-01 \t'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_tracestate_included_traceparent_missing(self):
'''
harness sends a request with tracestate but without traceparent
expects a valid traceparent from the output header
expects the tracestate to be discarded
'''
traceparent, tracestate1 = self.make_single_request_and_get_tracecontext([
['tracestate', 'foo=1'],
])
traceparent, tracestate2 = self.make_single_request_and_get_tracecontext([
['tracestate', 'foo=1,bar=2'],
])
self.assertEqual(len(tracestate1), len(tracestate2))
def test_tracestate_included_traceparent_included(self):
'''
harness sends a request with both tracestate and traceparent
expects a valid traceparent from the output header with the same trace_id
expects the tracestate to be inherited
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,bar=2'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn("foo", tracestate)
self.assertIn("bar", tracestate)
self.assertEqual(tracestate['foo'], '1')
self.assertEqual(tracestate['bar'], '2')
def test_tracestate_header_name(self):
'''
harness sends an invalid tracestate using wrong names
expects the tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['trace-state', 'foo=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['trace.state', 'foo=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
def test_tracestate_header_name_valid_casing(self):
'''
harness sends a valid tracestate using different combination of casing
expects the tracestate to be inherited
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['TraceState', 'foo=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['TrAcEsTaTe', 'foo=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['TRACESTATE', 'foo=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
def test_tracestate_empty_header(self):
'''
harness sends a request with empty tracestate header
expects the empty tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', ''],
])
self.assertTrue(not tracestate or tracestate != '')
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', ''],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', ''],
['tracestate', 'foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
def test_tracestate_multiple_headers_different_keys(self):
'''
harness sends a request with multiple tracestate headers, each contains different set of keys
expects a combined tracestate
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,bar=2'],
['tracestate', 'rojo=1,congo=2'],
['tracestate', 'baz=3'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate))
self.assertTrue('bar=2' in str(tracestate))
self.assertTrue('rojo=1' in str(tracestate))
self.assertTrue('congo=2' in str(tracestate))
self.assertTrue('baz=3' in str(tracestate))
self.assertTrue(str(tracestate).index('foo=1') < str(tracestate).index('bar=2'))
self.assertTrue(str(tracestate).index('bar=2') < str(tracestate).index('rojo=1'))
self.assertTrue(str(tracestate).index('rojo=1') < str(tracestate).index('congo=2'))
self.assertTrue(str(tracestate).index('congo=2') < str(tracestate).index('baz=3'))
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_duplicated_keys(self):
'''
harness sends a request with an invalid tracestate header with duplicated keys
expects the tracestate to be inherited, and the duplicated keys to be either kept as-is or one of them
to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate))
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,foo=2'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate) or 'foo=2' in str(tracestate))
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 'foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate))
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 'foo=2'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate) or 'foo=2' in str(tracestate))
def test_tracestate_all_allowed_characters(self):
'''
harness sends a request with a valid tracestate header with all legal characters
expects the tracestate to be inherited
'''
key_without_vendor = ''.join([
''.join(map(chr, range(0x61, 0x7A + 1))), # lcalpha
'0123456789', # DIGIT
'_',
'-',
'*',
'/',
])
key_with_vendor = key_without_vendor + '@a-z0-9_-*/'
value = ''.join([
''.join(map(chr, range(0x20, 0x2B + 1))),
''.join(map(chr, range(0x2D, 0x3C + 1))),
''.join(map(chr, range(0x3E, 0x7E + 1))),
])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', key_without_vendor + '=' + value],
])
self.assertIn(key_without_vendor, tracestate)
self.assertEqual(tracestate[key_without_vendor], value)
traceparent, tracestate = | |
<gh_stars>1-10
import asyncio
import copy
import sentinels
import typing
import uuid
from .. import aio
from ..context import Context
from ..message import (
Envelope,
Message,
)
from .messages import (
ShutdownMessage,
TimerConfiguration,
TimerDoneMessage,
ActletDoneMessage,
)
from .router import Router
UNHANDLED = sentinels.Sentinel('unhandled')
class EnvelopeTracker:
"""Envelope tracker to track message processing progress.
>>> envelope_tracker = proxy.tell(Message)
>>> await envelope_tracker
>>> # If we got here the actor has processed our message.
"""
def __init__(self, envelope):
self._envelope = envelope
self._fut = asyncio.get_event_loop().create_future()
def __repr__(self):
return f'{self.__class__.__name__}(envelope={self._envelope})'
def __await__(self):
return self._fut.__await__()
@property
def is_handled(self):
"""Returns if the message was handled.
Notice, a processed message that has no appropriate handler is considered as NOT handled.
Returns:
bool: Whether or not the message was handled.
"""
return self._fut.result() is not UNHANDLED
class ActorMetaclass(type):
"""Actor metaclass for @register_handler decorator.
"""
def __new__(cls, name, bases, dct):
# First create the actor class.
new_cls = super().__new__(cls, name, bases, dct)
# Assign new actor object to the class.
new_cls._router = Router()
# Iterate all new created class methods, if a method has hidden '_handler' attribute add it to the router.
for method in dct.values():
if hasattr(method, '_handle'):
new_cls._router.add(method._handle, method)
# Iterate all base classes, if the base class has hidden '_router' field, any handled messages that are not handled here should be copied.
for base in bases:
if hasattr(base, '_router'):
for message_cls, handler in base._router._handlers.items():
if message_cls not in new_cls._router._handlers:
new_cls._router.add(message_cls, handler)
return new_cls
def register_handler(message_cls: typing.Type[Message]):
"""@register_handler decorator, it just marks the function with the hidden _handle attribute.
Upon creation the actor collects it and register it in the router.
Args:
message_cls (typing.Type[Message]): The message class to register the decorated function as a handler to.
"""
def decorator(func):
func._handle = message_cls
return func
return decorator
class Actor(aio.service.Service, metaclass=ActorMetaclass):
"""The Actor.
Worker that handles messages one at a time.
"""
def __init_subclass__(cls, **kwargs):
"""Registering `cls` in the actors repository.
"""
# Avoiding circular logs by lazy import.
from ..core import ACTOR_REPOSITORY
super().__init_subclass__(**kwargs)
ACTOR_REPOSITORY.add(cls)
def __init__(self, context: Context, actor_id: str):
super().__init__()
self._actor_id = actor_id
self._context = context
# Queue for handling incoming messages.
self._queue = asyncio.Queue()
# Event to signal that the actor is initialized
self._initialize_event = asyncio.Event()
self._initialize_exception = None
# Event to signal that the actor has shutdown.
self._shutdown_event = asyncio.Event()
# Actor sub-actlets
self._actlets = {}
def __repr__(self):
return f'{self.__class__.__name__}(context={self.context.__class__.__name__}, actor_id={self.actor_id})'
@property
def actor_id(self):
return self._actor_id
@property
def context(self):
return self._context
async def initialize(self, context):
"""async __init__ function, should overriden by derived classes.
"""
pass
def post(self, envelope: Envelope) -> EnvelopeTracker:
"""Posts an envelope to this actor message queue.
Args:
envelope (libactors.Envelope): Envelope to post.
Returns:
EnvelopeTracker: Envelope tracker to track message progress and obtain the result.
"""
# NOTE: envelope is deep copied since it might include references to internal state structures of actors.
# After copying the message, the receiver actor may either access or modify any property safely.
envelope = copy.deepcopy(envelope)
tracker = EnvelopeTracker(envelope)
self._queue.put_nowait((envelope, tracker))
return tracker
def tell_me(self, message: Message) -> EnvelopeTracker:
"""Posts message to self.
Args:
message (libactors.Message): Message to post.
Returns:
EnvelopeTracker: Envelope tracker to track message progress and obtain the result.
"""
envelope = Envelope(
id=str(uuid.uuid4()),
sender=self.context.identity,
receiver=self.context.identity,
message=message,
)
return self.post(envelope)
async def stop(self, context):
context.info(f'stopping {self}')
super().stop()
@register_handler(ShutdownMessage)
async def handle_shutdown(self, context, message):
"""Shutdown actor by stopping the service and canceling all active actlets.
"""
await asyncio.gather(
*[actlet.cancel() for actlet in self._actlets.values()],
return_exceptions=True,
)
await self.stop(context)
@register_handler(ActletDoneMessage)
async def handle_actlet_done(self, context, message: ActletDoneMessage):
context.debug(f'Actlet {context.sender} is done')
if isinstance(message.result, Message):
self.tell_me(message.result)
del self._actlets[context.sender]
@register_handler(TimerDoneMessage)
async def handle_timer_done(self, context, message):
"""TimerDoneMessage handler, should be overridden by derived classes if relevant.
"""
context.debug(f'Timer {context.sender} is done')
async def wait_until_initialized(self) -> None:
"""Awaits until the actor is initialized.
If actor initialization has failed (raises), the exception will be raised in the callee context.
"""
await self._initialize_event.wait()
if self._initialize_exception is not None:
raise self._initialize_exception
async def wait_until_shutdown(self) -> None:
"""Awaits until the actor has shutdown.
"""
await self._shutdown_event.wait()
def is_shutdown(self):
return self._shutdown_event.is_set()
async def serve(self):
"""Actor's infinite serving loop.
* Initialize actor:
o set as initialized.
* Fetch messages from queue:
o handle message if handler exists.
"""
assert self.context is not None
try:
try:
# Initialize actor.
await self.initialize(self.context)
# Finished initialization, setting event so all listeners would get notified.
self._initialize_event.set()
except Exception as e:
self.context.exception(f'caught exception while initialized {self.actor_id}')
# Finished initialization, setting event so all listeners would get notified.
self._initialize_event.set()
# Putting exception so anyone who gets notified will get it.
self._initialize_exception = e
raise
waiter: aio.multiwaiter.MultiWaiter
async with aio.multiwaiter.in_multi_waiter() as waiter:
# Service stop event.
waiter.add(self, lambda obj: obj.wait_stop())
# Envelope event.
waiter.add(self._queue, lambda obj: obj.get())
while True:
self.context.debug('waiting for work')
await waiter.wait_first()
# Service is done
if waiter.done(self):
self.context.info('service was stopped')
break
# If we got here, we have an event in our queue.
envelope, tracker = waiter.result(self._queue)
self.context.debug(f'received envelope: {type(envelope.message)}')
# Fetching handler of message `envelope.message`.
handler = self._router.match(envelope.message)
if handler is None:
# If no handler we just mark the envelope tracker as unhandled.
self.context.warning(f'no handler for message: {type(envelope.message)}')
tracker._fut.set_result(UNHANDLED)
self._queue.task_done()
continue
try:
# Trying to handle the message.
self.context.debug(
f'handling envelope using handler {handler.__qualname__}'
)
result = await handler(
self, self.context(envelope=envelope), envelope.message
)
tracker._fut.set_result(result)
except Exception as e:
self.context.exception(
f'got an exception while handling envelope: {envelope}. exc: {e}'
)
tracker._fut.set_exception(e)
self._queue.task_done()
finally:
# cleanup
self._shutdown_event.set()
await self.context.core.remove_actor(self._actor_id)
# NOTE: This async for future-proofing.
async def create_actlet(
self, context: Context, name: str, function: typing.Callable, configuration: Message
):
"""creates sub-actlet which runs the function in parallel to this actor's work.
when done, the result is posted as a message to the actor
Args:
context (Context): Context object.
name (str): Actlet's name.
function (typing.Callable): Actlet's entrypoint function.
configuration (Message): Actlet's configuration.
Raises:
RuntimeError: If there is an actlet named `name` already.
Returns:
libactors.actor.actlet: The created actlet object.
"""
# Avoiding circular logs by lazy import.
from .actlet import Actlet
if self.is_actlet_exists(name):
raise RuntimeError(f'actlet: {name} already exists')
if hasattr(function, '__self__'):
raise RuntimeError(f'actlet function: {function} must not be bound')
name = self._generate_actlet_name(name)
actlet = Actlet(
name=name,
context=context(identity=name),
entry_point=function,
# Actlet is created with proxy to the created actor.
proxy=context.get_proxy(self.actor_id),
# NOTE: configuration is deep copied since it might include references to internal state structures of actors.
# After copying the configuration, the receiver actor may either access or modify any property safely.
configuration=copy.deepcopy(configuration),
)
# Add the actlet to the mapping so the actor can keep track of all its actlets.
self._actlets[name] = actlet
return actlet
async def cancel_actlet(self, context: Context, name: str):
"""Cancels the actlet named `name`.
Args:
context (Context): Context object.
name (str): Actlet's name.
"""
actlet_name = self._generate_actlet_name(name)
await self._actlets[actlet_name].cancel()
del self._actlets[actlet_name]
@staticmethod
async def _generic_timer(
context: Context, proxy, configuration: TimerConfiguration
) -> TimerDoneMessage:
"""Generic timer function that sends a `configuration.message` every `configuration.interval` seconds.
Args:
context (libactors.Context): Context object.
proxy (libactors.ActorProxy): Proxy to whom send `configuration.message`
configuration (TimerConfiguration): Timer's configuration.
Returns:
TimerDoneMessage: Upon exception/completed all requested repetitions.
"""
try:
# If timer is delayed we wait now.
if configuration.delay:
await asyncio.sleep(configuration.delay)
repetitions = configuration.repetitions if configuration.repetitions else float('inf')
# If timer was configured with `now`, send message before counting down.
if configuration.now:
await proxy.tell(
context=context,
message=configuration.message,
)
# Altough it looks weird the repetitions is not on the waiting cycles, but on the amount of messages to send.
repetitions -= 1
while repetitions > 0:
context.debug(f'sleeping {configuration.interval} seconds')
# NOTE(gil): it is okay to use asyncio.sleep instead of asyncio.call_later or such (asyncio.sleep calls asyncio.call_later)
await asyncio.sleep(configuration.interval)
await proxy.tell(
context=context,
message=configuration.message,
)
repetitions -= 1
except asyncio.CancelledError:
raise
except Exception as e:
context.exception(f'exception in timer: {e}')
return TimerDoneMessage()
def _generate_actlet_name(self, name: str):
return f'{self.context.identity}/actlet/{name}'
def _generate_timer_name(self, name: str):
return f'timer/{name}'
async def create_timer(
self,
context: Context,
name: str,
message: Message,
interval: float,
delay: float = 0,
now: bool = False,
repetitions: int = 0
):
"""Creates a timer with name `name` (look for `libactors.actor.messages.TimerConfiguration` for full argument description).
Args:
context (Context): Context object.
| |
<reponame>mao-example/End-to-End-Incremental-Learning
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# import matplotlib.pyplot as plt
import time
import torchvision.transforms as transforms
import torchvision.models.resnet as resnet
import resnet_v2
import PIL.Image as Image
import utils
import math
import copy
import os
dataset = 'cifar100'
images_train, labels_train, images_val, labels_val, images_test, labels_test = utils.load_data(dataset)
# parameters
iteration = 400
iteration_finetune = 300
lr = 0.5
schedules = range(50, iteration, 50)
gamma = 0.5
momentum = 0.9
decay = 0.0001
# decay = 0.0005
batchsize = 400
num_class = 100
num_class_novel = 10
memory_K = 2000
T = 2
dist_ratio = 0.5
gredient_noise_ratio = 0
network_depth = 32
flag_augmentation_e2e = True
stop_acc = 0.998
flag_dist_all = False
period_train = num_class//num_class_novel
memory_size = memory_K//num_class_novel
net = resnet_v2.resnet(depth=network_depth, num_classes=100)
# print parameters
print('parameters:')
print('dataset: ', dataset)
print('lr: ', lr)
# print('schedules: follow the paper, divide 10 per 10 steps')
print('schedules: ', schedules)
print('gamma: ', gamma)
print('momentum: ', momentum)
print('decay: ', decay)
print('batchsize: ', batchsize)
print('iteration: ', iteration)
print('iteration_finetune: ', iteration_finetune)
print('num_class: ', num_class)
print('num_class_novel: ', num_class_novel)
print('memory_K: ', memory_K)
print('T: ', T)
print('dist_ratio: ', dist_ratio)
print('gredient_noise_ratio: ', gredient_noise_ratio)
print('network_depth: ', network_depth)
print('flag_augmentation_e2e: ', flag_augmentation_e2e)
print('stop_acc: ', stop_acc)
print('flag_dist_all: ', flag_dist_all)
# gpu
num_gpu = torch.cuda.device_count()
if num_gpu > 0:
print('GPU number = %d' % (num_gpu))
device_ids = np.arange(num_gpu).tolist()
print('device_ids:')
print(device_ids)
net = nn.DataParallel(net, device_ids=device_ids).cuda()
else:
print('only cpu is available')
np.random.seed(100)
class_order = np.random.permutation(num_class)
print('class order:')
print(class_order)
class_old = np.array([], dtype=int)
memory_images = np.zeros(shape=(0, memory_size, 3, 32, 32), dtype=np.uint8)
memory_labels = np.zeros(shape=(0, memory_size), dtype=int)
acc_nvld_basic = np.zeros((period_train))
acc_nvld_finetune = np.zeros((period_train))
crossentropy = nn.CrossEntropyLoss()
# get feature dim
feat = net.forward(torch.from_numpy(np.zeros(shape=(1, 3, 32, 32))).float().cuda())
dim = np.shape(feat.cpu().data.numpy())[-1]
print('feature dim = %d'%(dim))
# get model state
first_model_path = 'model/first_model_e2e_aug_%d_%d_%s%s' % (network_depth, 0, ''.join(str(e) for e in class_order[:num_class_novel]), '.pkl')
flag_model = os.path.exists(first_model_path)
for period in range(period_train):
print('------------------')
print('------------------')
print('period = %d'%(period))
class_novel = class_order[period*num_class_novel:(period+1)*num_class_novel]
print('class_novel:')
print(class_novel)
images_novel_train = images_train[class_novel]
images_novel_train = np.reshape(images_novel_train, newshape=(-1, 3, 32, 32))
labels_novel_train = labels_train[class_novel]
labels_novel_train = np.reshape(labels_novel_train, newshape=(-1))
images_novel_test = images_test[class_novel]
images_novel_test = np.reshape(images_novel_test, newshape=(-1, 3, 32, 32))
labels_novel_test = labels_test[class_novel]
labels_novel_test = np.reshape(labels_novel_test, newshape=(-1))
num_class_old = class_old.shape[0]
if period == 0:
images_combined_train = images_novel_train
labels_combined_train = labels_novel_train
else:
images_combined_train = np.concatenate((images_novel_train, np.reshape(memory_images, newshape=(-1, 3, 32, 32))), axis=0)
labels_combined_train = np.concatenate((labels_novel_train, np.reshape(memory_labels, newshape=(-1))), axis=0)
images_nvld_test = images_test[np.concatenate((class_old, class_novel), axis=0)]
images_nvld_test = np.reshape(images_nvld_test, newshape=(-1, 3, 32, 32))
labels_nvld_test = labels_test[np.concatenate((class_old, class_novel), axis=0)]
labels_nvld_test = np.reshape(labels_nvld_test, newshape=(-1))
# data augmentation
if flag_augmentation_e2e == True:
# augmentation
images_combined_train, labels_combined_train = utils.data_augmentation_e2e(images_combined_train,
labels_combined_train)
# normalization
images_combined_train = images_combined_train/255.0
images_nvld_test = images_nvld_test/255.0
v_mean_0 = np.mean(images_train[:,:,0,:,:]/255.0)
v_mean_1 = np.mean(images_train[:,:,1,:,:]/255.0)
v_mean_2 = np.mean(images_train[:,:,2,:,:]/255.0)
images_combined_train[:,0] -= v_mean_0
images_combined_train[:,1] -= v_mean_1
images_combined_train[:,2] -= v_mean_2
images_nvld_test[:, 0] -= v_mean_0
images_nvld_test[:, 1] -= v_mean_1
images_nvld_test[:, 2] -= v_mean_2
print('training size = %d'%(labels_combined_train.shape[0]))
# training
lrc = lr
print('current lr = %f' % (lrc))
acc_training = []
softmax = nn.Softmax(dim=-1).cuda()
##################################
net_old = copy.deepcopy(net)
##################################
for iter in range(iteration):
# learning rate
if iter in schedules:
lrc *= gamma
print('current lr = %f'%(lrc))
# criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=lrc, momentum=momentum,
weight_decay=decay, nesterov=True)
# train
idx_train = np.random.permutation(labels_combined_train.shape[0])
loss_avg = 0
loss_cls_avg = 0
loss_dist_avg = 0
acc_avg = 0
num_exp = 0
tstart = time.clock()
batchnum_train = math.ceil(labels_combined_train.shape[0] / batchsize)
# load model
if period == 0 and flag_model:
print('load model: %s' % first_model_path)
net.load_state_dict(torch.load(first_model_path))
# break
for bi in range(batchnum_train):
if period == 0 and flag_model: # loaded model, do not need training
num_exp = 1
break
if bi == batchnum_train - 1:
idx = idx_train[bi * batchsize:]
else:
idx = idx_train[bi * batchsize:(bi + 1) * batchsize]
img = images_combined_train[idx]
lab = labels_combined_train[idx]
lab_onehot = utils.one_hot(lab, num_class)
# transform
if flag_augmentation_e2e == False: # old transform
img = utils.img_transform(img, 'train')
img = torch.from_numpy(img).float()
img = img.cuda()
lab_onehot = torch.from_numpy(lab_onehot)
lab_onehot = lab_onehot.float()
lab_onehot = lab_onehot.cuda()
# print("Outside: input size", img.size(), "output_size", lab.size())
output = net.forward(img)
# classification loss
indices = torch.LongTensor(np.concatenate((class_old, class_novel), axis=0))
indices = indices.cuda()
prob_cls = torch.index_select(output, 1, indices)
prob_cls = softmax(prob_cls)
lab_onehot = torch.index_select(lab_onehot, 1, indices)
loss_cls = F.binary_cross_entropy(prob_cls, lab_onehot)
# distillation loss for only old class data !!!
if period>0:
indices = torch.LongTensor(class_old)
indices = indices.cuda()
dist = torch.index_select(output, 1, indices)
dist = softmax(dist/T)
output_old = net_old.forward(img)
output_old = torch.index_select(output_old, 1, indices)
lab_dist = Variable(output_old, requires_grad = False)
lab_dist = softmax(lab_dist/T)
if not flag_dist_all:
# only for old class data
indices = [id for id, la in enumerate(lab) if la in class_old]
indices = torch.LongTensor(indices)
indices = indices.cuda()
dist = torch.index_select(dist, 0, indices)
lab_dist = torch.index_select(lab_dist, 0, indices)
loss_dist = F.binary_cross_entropy(dist, lab_dist)
else:
loss_dist = 0
loss = loss_cls + dist_ratio*loss_dist
loss_avg += loss.item()
loss_cls_avg += loss_cls.item()
if period == 0:
loss_dist_avg += 0
else:
loss_dist_avg += loss_dist.item()
acc = np.sum(np.equal(np.argmax(prob_cls.cpu().data.numpy(), axis=-1), np.argmax(lab_onehot.cpu().data.numpy(), axis=-1)))
acc_avg += acc
num_exp += np.shape(lab)[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
# add random noise to gradients / weights
if gredient_noise_ratio > 0:
for p in net.parameters():
p.data.sub_(gredient_noise_ratio * lrc * torch.from_numpy(
(np.random.random(np.shape(p.data.cpu().data.numpy())) - 0.5)*2).float().cuda())
loss_avg /= num_exp
loss_cls_avg /= num_exp
loss_dist_avg /= num_exp
acc_avg /= num_exp
acc_training.append(acc_avg)
tend = time.clock()
tcost = tend - tstart
print('Training Period: %d \t Iter: %d \t time = %.1f \t loss = %.6f \t acc = %.4f' % (period, iter, tcost, loss_avg, acc_avg))
# print('Training Period: %d \t Iter: %d \t time = %.1f \t loss_cls = %.6f \t loss_dist = %.6f \t loss = %.6f \t acc = %.4f'%(period, iter, tcost, loss_cls_avg, loss_dist_avg, loss_avg, acc_avg))
# test all (novel + old) classes based on logists
if period > -1:
# images_nvld_test = images_test[np.concatenate((class_old, class_novel), axis=0)]
# images_nvld_test = np.reshape(images_nvld_test, newshape=(-1, 3, 32, 32))
# labels_nvld_test = labels_test[np.concatenate((class_old, class_novel), axis=0)]
# labels_nvld_test = np.reshape(labels_nvld_test, newshape=(-1))
idx_test = np.random.permutation(labels_nvld_test.shape[0])
loss_avg = 0
acc_avg = 0
num_exp = 0
tstart = time.clock()
batchnum_test = math.ceil(labels_nvld_test.shape[0] / batchsize)
for bi in range(batchnum_test):
if bi == batchnum_test - 1:
idx = idx_test[bi * batchsize:]
else:
idx = idx_test[bi * batchsize:(bi + 1) * batchsize]
img = images_nvld_test[idx]
lab = labels_nvld_test[idx]
lab_onehot = utils.one_hot(lab, num_class)
# normalization
if flag_augmentation_e2e == False: # old transform
img = utils.img_transform(img, 'test')
img = torch.from_numpy(img).float()
img = img.cuda()
output = net.forward(img)
indices = torch.LongTensor(np.concatenate((class_old, class_novel), axis=0))
indices = indices.cuda()
output = torch.index_select(output, 1, indices)
output = softmax(output)
output = output.cpu().data.numpy()
lab_onehot = lab_onehot[:, np.concatenate((class_old, class_novel), axis=0)]
acc = np.sum(np.equal(np.argmax(output, axis=-1), np.argmax(lab_onehot, axis=-1)))
acc_avg += acc
num_exp += np.shape(lab)[0]
acc_avg /= num_exp
tend = time.clock()
tcost = tend - tstart
print('Testing novel+old Period: %d \t Iter: %d \t time = %.1f \t\t\t\t\t\t acc = %.4f' % (period, iter, tcost, acc_avg))
acc_nvld_basic[period] = acc_avg
if period == 0 and flag_model: # loaded model, do not need extra test
break
if len(acc_training)>20 and acc_training[-1]>stop_acc and acc_training[-5]>stop_acc:
print('training loss converged')
break
# save model
if period == 0 and (not flag_model):
print('save model: %s' % first_model_path)
torch.save(net.state_dict(), first_model_path)
# balanced finetune
net_old = copy.deepcopy(net)
# finetune
if period>0:
# lrc = lr
lrc = lr*0.1 # small learning rate for finetune
print('current lr = %f' % (lrc))
softmax = nn.Softmax(dim=-1).cuda()
acc_finetune_train = []
for iter in range(iteration_finetune):
# learning rate
if iter in schedules:
lrc *= gamma
print('current lr = %f'%(lrc))
# criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=lrc, momentum=momentum,
weight_decay=decay, nesterov=True)
# finetune train
idx_finetune_novel = np.random.permutation(labels_novel_train.shape[0])
idx_finetune_novel = idx_finetune_novel[:memory_size*num_class_novel]
idx_finetune_old = np.arange(start=labels_novel_train.shape[0], stop=labels_combined_train.shape[0])
idx_finetune = np.concatenate((idx_finetune_novel, idx_finetune_old), axis=0)
np.random.shuffle(idx_finetune)
loss_avg = 0
acc_avg = 0
num_exp = 0
tstart = time.clock()
batchnum_train = math.ceil(idx_finetune.shape[0] // batchsize)
for bi in range(batchnum_train):
if bi == batchnum_train - 1:
idx = idx_finetune[bi * batchsize:]
else:
idx = idx_finetune[bi * batchsize:(bi + 1) * batchsize]
img = images_combined_train[idx]
lab = labels_combined_train[idx]
lab_onehot = utils.one_hot(lab, num_class)
# transform
if flag_augmentation_e2e == False:
img = utils.img_transform(img, 'train')
img = torch.from_numpy(img).float()
img = img.cuda()
lab_onehot = torch.from_numpy(lab_onehot)
lab_onehot = lab_onehot.float()
lab_onehot = lab_onehot.cuda()
# print("Outside: input size", img.size(), "output_size", lab.size())
output = net.forward(img)
# classification loss
indices = torch.LongTensor(np.concatenate((class_old, class_novel), axis=0))
indices = indices.cuda()
prob_cls = torch.index_select(output, 1, indices)
prob_cls = softmax(prob_cls)
lab_onehot = torch.index_select(lab_onehot, 1, indices)
loss_cls = F.binary_cross_entropy(prob_cls, lab_onehot)
# distillation loss for all classes (maybe the author only distillates for novel classes)
if period>0:
indices = torch.LongTensor(np.concatenate((class_old, class_novel), axis=0))
indices = indices.cuda()
dist = torch.index_select(output, 1, indices)
dist = softmax(dist/T)
output_old = net_old.forward(img)
output_old = torch.index_select(output_old, 1, indices)
lab_dist = | |
<reponame>WesCoomber/582Wcoomber3
#!/usr/bin/env python2.7
import json, random, time, string, inspect
import os, sys, signal, io, operator
import geocoder as gc
from randomdict import RandomDict
from PIL import Image
from sets import Set
from glob import glob
from yelp.client import Client
from yelp.oauth1_authenticator import Oauth1Authenticator
import urllib
import multiprocessing as mp
from multiprocessing import Manager, Queue, Process
import threading
from threading import Thread, Lock, Condition
from slide import App
import config
from config import *
auth = Oauth1Authenticator(
consumer_key = config.consumer_key,
consumer_secret = config.consumer_secret,
token = config.token,
token_secret = config.token_secret
)
client = Client(auth)
DEBUG = config.debug
PREFETCH = config.prefetch
''' custom dirs '''
JSON_DIR = 'json'
PIC_DIR = 'photo'
JSON_business = 'yelp_academic_dataset_business.json'
JSON_review = 'yelp_academic_dataset_review.json'
JSON_user = 'yelp_academic_dataset_user.json'
JSON_picture = 'photo_id_to_business_id.json'
class User():
def __init__(self, uid):
self.uid = uid
self.bids = []
def get_uid(self):
return self.uid
def add_bid(self, bid):
self.bids.append(bid)
def has_bid(self, bid):
if bid in self.bids:
return True
return False
def get_bids(self):
return self.bids
def has_common(self, b):
common = set(self.bids).intersection(b.get_bids())
if len(common) > 0:
return (True, common)
return (False, None)
def get_num_reviews(self):
return len(self.bids)
################# local file related ops ######################
def getfile(name, dtype='json'):
cwd = os.getcwd()
if dtype == 'picture':
return cwd + '/' + PIC_DIR + '/' + name
return cwd + '/' + JSON_DIR + '/' + name
def mkdir(bid):
cwd = os.getcwd()
if os.path.isdir(cwd + '/.tmp') is False:
os.mkdir(cwd + '/.tmp')
name = cwd + '/.tmp/'+ bid
if os.path.isdir(name) is False:
os.mkdir(name)
return name
################# remote yelp related ops ######################
def form_params(name, lat, lon):
geo = '{},{}'.format(lat,lon)
params = { 'term':name, 'lang':'en', 'cll': geo}
# DM("params: {}".format(params))
return params
def fetch_pics(obj):
bid = obj['business_id']
name = obj['name']
city = obj['city']
lat = obj['latitude']
lon = obj['longitude']
params = form_params(name, lat, lon)
dir_name = mkdir(bid)
DM("fetching {} images in {}".format(name.encode('utf8'), dir_name.encode('utf8')))
resp = client.search(city, **params)
list_urls = []
for a in resp.businesses:
image_url = a.image_url
if image_url is not None:
image_url = image_url.replace('ms.jpg', 'o.jpg')
list_urls.append(image_url)
count = 0
for url in list_urls:
urllib.urlretrieve(url, "{}/{}.jpg".format(dir_name, count))
count += 1
if count < 10:
params = form_params(city, lat, lon)
resp = client.search(city, **params)
list_urls = []
for a in resp.businesses:
image_url = a.image_url
if image_url is not None:
image_url = image_url.replace('ms.jpg', 'o.jpg')
list_urls.append(image_url)
for url in list_urls:
urllib.urlretrieve(url, "{}/{}.jpg".format(dir_name, count))
count += 1
DM("done fetching {} images in {}".format(len(list_urls), dir_name.encode('utf8')))
return bid
################# yelp json related ops ######################
def get_obj_business():
arr = []
with open(getfile(JSON_business)) as fin:
for line in fin:
obj = json.loads(line)
if len(obj['state']) == 2 and 'Restaurants' in obj['categories']:
arr.append(obj)
return arr
def fetch_reviews_opt(user_map, lines):
DM('Process: {} {} {}'.format(mp.current_process().name, 'Starting', len(lines)))
count = 0
for line in lines:
# if count > 30000:
# return
obj = json.loads(line)
bid = obj['business_id']
uid = obj['user_id']
try:
# user_obj = users[uid]
user_obj = user_map[uid]
except Exception as e:
# if new user? create object and add to hash table
user_obj = User(uid)
user_obj.add_bid(bid)
user_map[uid] = user_obj
count += 1
DM('Process: {} {} {}'.format(mp.current_process().name, 'Exiting', len(lines)))
return
def fetch_reviews(user_map):
count = 0
DM('fetching reviews... takes about {}'.format("4 mins"))
num_lines = sum(1 for line in open(getfile(JSON_review)))
cpus = mp.cpu_count()
for_each = int(num_lines / cpus)
cpus += 1
DM("{} {} {}".format(for_each, num_lines, cpus))
line_count = 0
lines = [[] for i in range(cpus)]
tmp_lines = []
count = 0 # index for list
with open(getfile(JSON_review)) as fin:
for line in fin:
line_count += 1
lines[count].append(line)
if (line_count % for_each) == 0:
# print count, line_count, num_lines
count += 1
start = time.time()
jobs = []
for i in range(cpus):
# t = Thread(name=i, target=fetch_reviews_opt, args=(user_map, lines[i], ))
t = mp.Process(name=i, target=fetch_reviews_opt, args=(user_map, lines[i], ))
jobs.append(t)
t.start()
for job in jobs:
job.join()
end = time.time()
# print "Took {} secs".format(end-start)
DM('done fetching reviews... {}'.format(count))
def fetch_business(bus_map):
count = 0
DM("fetching businesses...")
with open(getfile(JSON_business)) as fin:
for line in fin:
obj = json.loads(line)
count += 1
bid = obj['business_id']
# bus[bid] = obj
bus_map[bid] = obj
# r_bus[bid] = obj
DM("done fetching business... {}".format(len(bus_map)))
return
################# slide wrapper (execute only in main process) ######################
def slide_worker(bid):
App(bid).run()
return
################# demo purpose CUI ops ######################
''' takes bus_list, return bus object'''
def show_list(bus_map):
count = 0
new_list = [] # contains bids
for k,v in bus_map.items():
# k == bid, v = obj
count += 1
bid = k
obj = v
new_list.append(bid)
if DEBUG:
# if True:
val = (v['stars']*100.0) + v['review_count']
print " %3d. [%5d] %s" % (count, val, obj['name'])
else:
print " %3d. %s" % (count, obj['name'])
# choice == bid for now...
choice = 0
while 1:
choice = input("==> Select: ")
if choice <= 0 or choice > len(bus_map):
print "Woooops! Retry mannnnn!"
else:
# print choice
break
choice -= 1
bid = new_list[choice]
bus_obj = bus_map[bid]
DM("{} {} {}".format(choice, bid, bus_obj))
return bus_obj
################# prefetching stuff ######################
'''TODO:
- heuristics:
0. stars
1. rating
2. review_counts
3. common reviews'''
def get_steroids(bus_map, commons):
if PREFETCH == False: # if disabled...
return []
tmp_map = {}
for k, v in bus_map.items():
val = (v['stars'] * 100.0)
val += v['review_count']
if inspect.isclass(commons) and len(commons.key()) > 1:
tmp_val = commons[k]
print tmp_val
val += tmp_val
tmp_map[k] = val
new_tmp = sorted(tmp_map.items(), key=lambda x: x[1], reverse=True)
steroids = []
count = 0
thrs = 5
for k,v in new_tmp:
# v is "stars" , k is bid
stars = v
obj = bus_map[k]
if count < thrs:
steroids.append(obj)
count += 1
DM("Number of steroids: {}".format(count))
return steroids
def exec_steroids(steroids):
jobs = []
for obj in steroids:
t = Thread(name=obj['name'].encode('utf8'), target=fetch_pics, args=(obj, ))
jobs.append(t)
t.start()
return jobs
def get_bus_map(bus, r_bus, commons):
tmp_bus = {}
NUM_BUS = 15
count = 0
# if isinstance(commons, list):
# for k, v in commons:
# print k,v
# print bus[k]
if isinstance(commons, list) or \
(inspect.isclass(commons) and len(commons.key())) > 1:
for k, v in commons:
count += 1
obj = bus[k]
tmp_bus[k] = obj
# print obj
if count > NUM_BUS:
return tmp_bus
# count < 15? fetch more from random...
for i in range(count, NUM_BUS):
obj = r_bus.random_value()
bid = obj['business_id']
tmp_bus[bid] = obj
return tmp_bus
def get_commons(target, users, commons):
''' given busieness id (bid), find *users* who have written down the
review'''
common_users = []
for k, obj in users.items():
if obj.has_bid(target):
common_users.append(obj)
common_bids = {}
count = 0
for obj in common_users:
count += 1
for i in range(count, len(common_users)):
obj2 = common_users[i]
answer, commons = obj.has_common(obj2)
if answer:
for common in commons:
if common != target:
from_map = None
try: # dict = (bid, counts)
common_bids[common] += 1
except Exception as e:
# DNE
common_bids[common] = 1
ret = sorted(common_bids.items(), key=lambda x: x[1], reverse=True)
print "DONE~!"
# print ret
for k, v in ret:
commons[k] = v
return ret
################# slide wrapper (execute only in main process) ######################
''' notes:
originally, we wanted to construct a hierarchy where a user selects
state -> city -> restaurants type
but b/c dataset contains only limited types of restaurants, it's meaningless...
OK let's do this... show 20 businesses w/ top review counts,
upon select, fetch some picture...
move to next selection (try to find common set of reviewers...)
'''
def main():
DM('initializing....')
count = 0
bus = {}
r_bus = RandomDict()
users = {}
jobs = []
manager = Manager()
user_map = manager.dict()
bus_map = manager.dict()
jobs.append(mp.Process(name='review', target=fetch_reviews, args=(user_map,)))
jobs.append(mp.Process(name='business', target=fetch_business, args=(bus_map,)))
for p in jobs:
p.start()
for p in jobs:
p.join()
# fetch_reviews()
# fetch_business()
users = user_map
bus = bus_map
for k, v in bus.items():
r_bus[k] = v
commons = manager.dict()
################ starting ..>!
while 1:
tmp_bus = get_bus_map(bus, r_bus, commons)
steroids = get_steroids(tmp_bus, commons)
jobs = exec_steroids(steroids)
print '==============================================================='
target_bus = show_list(tmp_bus)
target_bid = target_bus['business_id']
DM("target selected is {}".format(target_bid))
for t in jobs:
t.join()
if target_bus not in steroids or PREFETCH is False:
print ("Steroid-miss: %s " % target_bus['name'])
eval_start = time.time()
check_bid = fetch_pics(target_bus)
print ('Time taken: {}'.format(time.time() - eval_start))
if check_bid != target_bid:
print "ERROR: bid has been changed...!"
exit(-1)
''' TODO:
- multiprocessing:
1. fetching review & business concurrently
2. manager | |
import torch
import torch.nn.functional as F
import queue
from itertools import count
import numpy as np
from ..models import RecurrentDESMILES
class AstarTree:
def __init__(self, fp, desmiles, max_length=30, max_branches=5000):
assert type(fp) is torch.Tensor
assert type(desmiles) is RecurrentDESMILES
assert next(desmiles.parameters()).device == fp.device
if fp.dim() == 1:
fp = fp.unsqueeze(0)
self.fp = fp
self.desmiles = desmiles
self.max_length = max_length
self.max_branches = max_branches
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.unique_identifier = count() # counter breaks ties between equivalent probabilities
self.non_leaf_queue = queue.PriorityQueue()
self.leaf_queue = queue.PriorityQueue()
self.desmiles.embed_fingerprints(fp) # initialize rnn_desmiles by embedding the fingerprint
root = self.initialize_root_node()
self.non_leaf_queue.put((torch.tensor(0.0, device=self.device), next(self.unique_identifier), root))
self.leaf_queue.put((torch.tensor(float("inf"), device=self.device), next(self.unique_identifier), root))
self.num_expand = 1
self.num_branches = 0
@classmethod
def create_from_astar_tree(cls, other):
astar_tree = cls(other.fp, other.desmiles, max_length=other.max_length, max_branches=other.max_branches)
astar_tree.non_leaf_queue = other.non_leaf_queue
astar_tree.leaf_queue = other.leaf_queue
return astar_tree
def initialize_root_node(self):
root = torch.zeros(self.max_length, dtype=torch.long, device=self.device)
root[0] = 3
return root
def return_leaf_node(self):
score, _, seq = self.leaf_queue.get() # pop the top, decode and yield.
self.last_leaf_node = seq
return score, seq
def __next__(self):
# If no more room for expansion
while True:
if self.num_branches > self.max_branches:
if self.leaf_queue.qsize() > 0:
return self.return_leaf_node()
else:
return np.inf, self.last_leaf_node
# Return a leaf node if it has the best score
if self.leaf_queue.queue[0][0] < self.non_leaf_queue.queue[0][0]:
return self.return_leaf_node()
# Otherwise, purge queue and branch
self.purge_queue() # purge non-leaf queue if necessary
self.branch_and_bound()
def branch_and_bound(self):
# get nodes to expand
seqs, scores = self.branch()
#seqs, scores = self.post_branch_callback(seqs, scores)
self.bound(seqs, scores)
self.num_branches += 1
def branch(self):
# Create scores and sequences for the num_expand nodes we will expand
# it doesn't actually do the branching.
scores = []
seqs = []
num_nodes = self.non_leaf_queue.qsize()
while(len(scores) < min(self.num_expand, num_nodes)):
score, _, seq = self.non_leaf_queue.get()
scores.append(score)
seqs.append(seq)
scores = torch.tensor(scores, device=self.device)
seqs = torch.stack(seqs)
return seqs, scores
def bound(self, seqs, scores):
# Perform the branch operation and then bound the results:
with torch.no_grad():
# Clone hidden states which are the embedded fingerprints only.
# This only needs as argument the len(seqs) [or seq.shape[0]]
hiddens = self.clone_hidden_states(seqs)
# Get the probabilities for all the children
# The call to get_log_probabilities will overwrite the hidden states based on the sequences.
log_probs = self.get_log_probabilities(seqs)
# reset the hidden states to those of the embedded fingerprints.
self.desmiles.hiddens = hiddens
seqs, scores = self.get_children(seqs, log_probs, scores)
self.add_children_to_queue(seqs, scores)
def add_children_to_queue(self, seqs, scores):
# sort scores and grab the first max_branches to add to the two separate queues.
sort_idx = self.sort_scores(scores, self.max_branches)
scores = scores[sort_idx]
seqs = seqs[sort_idx] # this is a 2D tensor with dimensions: (8000 x num_expanded_children) x 30 (padded sequences)
is_leaf_node = self.are_children_leaf_nodes(seqs)
for i, (score, child) in enumerate(zip(scores[is_leaf_node].tolist(), seqs[is_leaf_node])):
self.leaf_queue.put((score, next(self.unique_identifier), child))
for i, (score, child) in enumerate(zip(scores[~is_leaf_node].tolist(), seqs[~is_leaf_node])):
self.non_leaf_queue.put((score, next(self.unique_identifier), child))
def are_children_leaf_nodes(self, children):
# the -1 is a hack for using the index in the last_chars line. Could revert back to actual legths.
lengths = (children > 0).sum(dim=1) - 1
last_chars = torch.tensor([child[length] for child, length in zip(children, lengths)], device=self.device)
# last_chars == 0 means a pad character was chosen. for now I call this a leaf node so that it is not expanded further
# The special characters are hard coded here:
last_char_is_stop = (last_chars == 1) | (last_chars == 2) | (last_chars == 0)
# this leaf node check only runs up to 30 elements, hardcoded (differs from earlier leaf_node check).
is_leaf_node = (((lengths + 1) > 3) & last_char_is_stop) | ((lengths + 1) == 30)
return is_leaf_node
@staticmethod
def sort_scores(scores, max_branches):
return torch.sort(scores)[1][:max_branches]
def get_children(self, seqs, log_probs, parent_nlps):
lengths = (seqs > 0).sum(dim=1)
children = torch.arange(log_probs.size(1), device=self.device, dtype=torch.long)[None,:].expand(seqs.shape[0], log_probs.size(1))
new_seqs = []
for seq, child, length in zip(seqs, children, lengths):
seq = seq.expand(child.size(0), seq.size(0))
seq=torch.cat((seq[:,:length], child[:,None], seq[:,length:-1]), dim=1)
new_seqs.append(seq)
new_seqs = torch.stack(new_seqs)
new_seqs = new_seqs.reshape(-1, new_seqs.shape[-1])
scores = (parent_nlps[:,None] - log_probs).reshape(-1)
return new_seqs, scores
def clone_hidden_states(self, seqs):
num_sequences = seqs.shape[0]
if num_sequences != self.desmiles.bs:
self.desmiles.select_hidden(torch.zeros(num_sequences, dtype=torch.long))
self.desmiles.bs = num_sequences
hiddens = [(h[0].clone(),h[1].clone()) for h in self.desmiles.hiddens]
return hiddens
def get_log_probabilities(self, seqs):
logits = self.desmiles(seqs.transpose(0,1))
lengths = (seqs > 0).sum(dim=1) - 1
# get the energy corresponding to the next token (specified by lengths)
logits = torch.stack([logits[i,l] for i,l in zip(np.arange(lengths.shape[0]), lengths.tolist())])
assert(logits.dim() == 2)
return F.log_softmax(logits, dim=1)
def purge_queue(self, downto=10000, maxsize=1000000): ## will need to update to also keep mol_queue mols.
if self.non_leaf_queue.qsize() > maxsize:
print("PURGING")
q2 = queue.PriorityQueue()
## Get top elements into new queue
for i in range(downto):
q2.put(self.non_leaf_queue.get())
self.non_leaf_queue = q2
class AstarTreeParallel(AstarTree):
def __init__(self, fp, desmiles, max_length=30, max_branches=5000, num_expand=1):
super().__init__(fp, desmiles, max_length=30, max_branches=5000)
self.num_expand = num_expand
def branch(self):
seqs, scores = super().branch()
return self.sort_by_length(seqs, scores)
@staticmethod
def sort_by_length(seqs, scores):
lengths = (seqs > 0).sum(dim=1)
length_idx = AstarTreeParallel.sort_lengths(lengths)
seqs = seqs[length_idx]
scores = scores[length_idx]
return seqs, scores
@staticmethod
def sort_lengths(lengths):
return torch.sort(-lengths)[1]
class AstarTreeParallelNotSafe:
def __init__(self, fp, desmiles, max_length=30, max_branches=5000, num_expand=1):
assert type(fp) is torch.Tensor
assert type(desmiles) is RecurrentDESMILES
assert next(desmiles.parameters()).device == fp.device
if fp.dim() == 1:
fp = fp.unsqueeze(0)
self.fp = fp
self.desmiles = desmiles
self.max_length = max_length
self.max_branches = max_branches
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.unique_identifier = count() # counter breaks ties between equivalent probabilities
self.non_leaf_queue = queue.PriorityQueue()
self.leaf_queue = queue.PriorityQueue()
self.desmiles.embed_fingerprints(fp) # initialize rnn_desmiles by embedding the fingerprint
root = self.initialize_root_node()
self.non_leaf_queue.put((torch.tensor(0.0, device=self.device), next(self.unique_identifier), root))
self.leaf_queue.put((torch.tensor(float("inf"), device=self.device), next(self.unique_identifier), root))
self.num_expand = 1
self.num_expand = num_expand
self.node_to_hiddens = {}
score, ident, root = self.non_leaf_queue.get()
self.node_to_hiddens[root] = (self.clone_hiddens(), 0)
self.non_leaf_queue.put((score, ident, root))
self.num_expand = num_expand
self.num_branches = 0
def initialize_root_node(self):
# The sequences are kept in reverse order from right to left starting from the last element;
# This is only happening for the AstarTreeParallelNotSafe version.
# In this way we don't need to look at the length vectors since some sequences will have differing lengths.
# In the memory safe way, we can keep the sequences right padded.
root = torch.zeros(self.max_length, dtype=torch.long, device=self.device)
root[-1] = 3
return root
def clone_hiddens(self):
return [(h[0].clone(),h[1].clone()) for h in self.desmiles.hiddens]
def __next__(self):
# If no more room for expansion
while True:
# In this version of Astar we don't keep expanding if we've reached the max branches.
# This differs from the logic of the early astar algorithm used in the paper.
if self.num_branches > self.max_branches:
if self.leaf_queue.qsize() > 0:
return self.return_leaf_node()
else:
return np.inf, self.last_leaf_node
# Return a leaf node if it has the best score
if self.leaf_queue.queue[0][0] < self.non_leaf_queue.queue[0][0]:
return self.return_leaf_node()
# Otherwise, purge queue and branch
self.purge_queue() # purge non-leaf queue if necessary
self.branch_and_bound()
def branch_and_bound(self):
# get nodes to expand
seqs, scores = self.branch()
#seqs, scores = self.post_branch_callback(seqs, scores)
self.bound(seqs, scores)
self.num_branches += 1
def branch(self):
# Prepare for the branching operation
# This branch is more complex than the plain Astar because it handles hidden states.
# Collect the scores, seqs, hiddens, etc for up to num_expand nodes
scores = []
seqs = []
hiddens = []
# When you branch a node every child has its own hidden state branching from the same parent hidden state
# this index maps back to the hidden state of the parent.
hidden_idxs = []
num_nodes = self.non_leaf_queue.qsize()
# This is a dictionary from the node tensor to a hidden state and a hidden_idx.
node_to_hiddens = self.node_to_hiddens
while(len(scores) < min(self.num_expand, num_nodes)):
score, _, seq = self.non_leaf_queue.get()
scores.append(score)
seqs.append(seq)
hidden, idx = node_to_hiddens[seq]
hiddens.append(hidden)
hidden_idxs.append(idx)
scores = torch.tensor(scores, device=self.device)
seqs = torch.stack(seqs)
# select all the parent hidden states
hiddens = AstarTreeParallelNotSafe.select_all_hiddens(hiddens, hidden_idxs)
# concatenate all the states so we can batch evaluate them.
hiddens = AstarTreeParallelNotSafe.concat_hiddens(hiddens)
# set the hidden states
self.desmiles.hiddens = hiddens
# set the batch size (seqs legnth is same as length of hiddens[0][0]; first layer; cell state of hidden)
self.desmiles.bs = seqs.shape[0]
return seqs, scores
def bound(self, seqs, scores):
with torch.no_grad():
# hidden states are ready, so this only passes the last elements to the desmiles model
log_probs = self.get_log_probabilities(seqs[:,-1].unsqueeze(0))
# Get an index for what parent that child was from.
# Important for getting the | |
import gevent
from gevent import monkey, GreenletExit
import six
from six.moves import xrange
monkey.patch_all(thread=False)
from time import time
import sys
import random
import traceback
import logging
from .clients import HttpSession
from . import events
from .exception import LocustError, InterruptTaskSet, RescheduleTask, RescheduleTaskImmediately, StopLocust
logger = logging.getLogger(__name__)
def task(weight=1, order=2147483647):
"""
Used as a convenience decorator to be able to declare tasks for a TaskSet
inline in the class. Example::
class ForumPage(TaskSet):
@task(100)
def read_thread(self):
pass
@task(7)
def create_thread(self):
pass
"""
def decorator_func(func):
func.locust_task_info = {"weight": weight, "order": order}
return func
"""
Check if task was used without parentheses (not called), like this::
@task
def my_task()
pass
"""
if callable(weight):
func = weight
weight = 1
return decorator_func(func)
else:
return decorator_func
class NoClientWarningRaiser(object):
"""
The purpose of this class is to emit a sensible error message for old test scripts that
inherits from Locust, and expects there to be an HTTP client under the client attribute.
"""
def __getattr__(self, _):
raise LocustError("No client instantiated. Did you intend to inherit from HttpLocust?")
class Locust(object):
"""
Represents a "user" which is to be hatched and attack the system that is to be load tested.
The behaviour of this user is defined by the task_set attribute, which should point to a
:py:class:`TaskSet <locust.core.TaskSet>` class.
This class should usually be subclassed by a class that defines some kind of client. For
example when load testing an HTTP system, you probably want to use the
:py:class:`HttpLocust <locust.core.HttpLocust>` class.
"""
host = None
"""Base hostname to swarm. i.e: http://127.0.0.1:1234"""
min_wait = 1000
"""Minimum waiting time between the execution of locust tasks"""
max_wait = 1000
"""Maximum waiting time between the execution of locust tasks"""
random_execute = True
"""Random execute flag, default is True"""
task_set = None
"""TaskSet class that defines the execution behaviour of this locust"""
stop_timeout = None
"""Number of seconds after which the Locust will die. If None it won't timeout."""
weight = 10
"""Probability of locust being chosen. The higher the weight, the greater is the chance of it being chosen."""
client = NoClientWarningRaiser()
_catch_exceptions = True
def __init__(self):
super(Locust, self).__init__()
self._update_coroutine = None
self.task_set_instance = self.task_set(self)
@property
def self_co(self):
from runners import locust_runner
if locust_runner is None:
return None
co_idx = getattr(self, '_locust_co_index')
if co_idx is None:
return None
return locust_runner.get_locust_co(co_idx)
def run(self):
if hasattr(self, 'on_update'):
from runners import locust_runner
update_ivl = locust_runner.options.per_locust_update_interval
def _do_onupdate():
while True:
try:
self.on_update()
except GreenletExit:
return
except StopLocust:
if self.self_co:
self.self_co.kill(block=True, timeout=3)
return
except Exception, e:
logger.error('Locust <on_update> exception:{}'.format(e), exc_info=True)
gevent.sleep(update_ivl)
else:
gevent.sleep(update_ivl)
self._update_coroutine = gevent.spawn(_do_onupdate)
try:
self.task_set_instance.run()
except StopLocust:
pass
except (RescheduleTask, RescheduleTaskImmediately) as e:
six.reraise(LocustError, LocustError(
"A task inside a Locust class' main TaskSet (`%s.task_set` of type `%s`) seems to have called interrupt() or raised an InterruptTaskSet exception. The interrupt() function is used to hand over execution to a parent TaskSet, and should never be called in the main TaskSet which a Locust class' task_set attribute points to." % (
type(self).__name__, self.task_set.__name__)), sys.exc_info()[2])
finally:
if self._update_coroutine:
self._update_coroutine.kill(block=True, timeout=3)
self._update_coroutine = None
class HttpLocust(Locust):
"""
Represents an HTTP "user" which is to be hatched and attack the system that is to be load tested.
The behaviour of this user is defined by the task_set attribute, which should point to a
:py:class:`TaskSet <locust.core.TaskSet>` class.
This class creates a *client* attribute on instantiation which is an HTTP client with support
for keeping a user session between requests.
"""
client = None
"""
Instance of HttpSession that is created upon instantiation of Locust.
The client support cookies, and therefore keeps the session between HTTP requests.
"""
def __init__(self):
super(HttpLocust, self).__init__()
if self.host is None:
raise LocustError(
"You must specify the base host. Either in the host attribute in the Locust class, or on the command line using the --host option.")
self.client = HttpSession(base_url=self.host)
class TaskSetMeta(type):
"""
Meta class for the main Locust class. It's used to allow Locust classes to specify task execution
ratio using an {task:int} dict, or a [(task0,int), ..., (taskN,int)] list.
"""
def __new__(mcs, classname, bases, classDict):
new_tasks = []
for base in bases:
if hasattr(base, "tasks") and base.tasks:
new_tasks += base.tasks
if "tasks" in classDict and classDict["tasks"] is not None:
tasks = classDict["tasks"]
if isinstance(tasks, (list, tuple)):
tasks = dict([(tasks[idx], {'order': idx + 1}) for idx in xrange(len(tasks))])
if isinstance(tasks, dict):
tasks = six.iteritems(tasks)
for task in tasks:
if isinstance(task, tuple):
task, count = task
if isinstance(count, int):
setattr(task, 'locust_task_info', {'weight': count, 'order': 2147483647})
for i in xrange(0, count):
new_tasks.append(task)
else:
if 'weight' not in count:
count['weight'] = 1
if 'order' not in count:
count['order'] = 2147483647
setattr(task, 'locust_task_info', count)
for i in xrange(0, count['weight']):
new_tasks.append(task)
else:
new_tasks.append(task)
for item in six.itervalues(classDict):
if hasattr(item, "locust_task_info"):
for i in xrange(0, item.locust_task_info["weight"]):
new_tasks.append(item)
new_tasks = sorted(
new_tasks, cmp=lambda left, right: cmp(left.locust_task_info["order"], right.locust_task_info["order"]))
classDict["tasks"] = new_tasks
return type.__new__(mcs, classname, bases, classDict)
class TaskInstance(object):
"""Class defining a set of task instance"""
Function = 1
BoundMethod = 2
NestedTaskSet = 3
def __init__(self, task_templ, owner, task_index):
self.owner = owner
self.task_templ = task_templ
self.task_index = task_index
if hasattr(task_templ, "tasks") and issubclass(task_templ, TaskSet):
self.task_type = self.NestedTaskSet
self.task_inst = task_templ(owner)
elif hasattr(task, "__self__") and task.__self__ == owner:
self.task_type = self.BoundMethod
self.task_inst = task_templ
else:
self.task_type = self.Function
self.task_inst = task_templ
@six.add_metaclass(TaskSetMeta)
class TaskSet(object):
"""
Class defining a set of tasks that a Locust user will execute.
When a TaskSet starts running, it will pick a task from the *tasks* attribute,
execute it, call it's wait function which will sleep a random number between
*min_wait* and *max_wait* milliseconds. It will then schedule another task for
execution and so on.
TaskSets can be nested, which means that a TaskSet's *tasks* attribute can contain
another TaskSet. If the nested TaskSet it scheduled to be executed, it will be
instantiated and called from the current executing TaskSet. Execution in the
currently running TaskSet will then be handed over to the nested TaskSet which will
continue to run until it throws an InterruptTaskSet exception, which is done when
:py:meth:`TaskSet.interrupt() <locust.core.TaskSet.interrupt>` is called. (execution
will then continue in the first TaskSet).
"""
tasks = []
"""
List with python callables that represents a locust user task.
If tasks is a list, the task to be performed will be picked randomly.
If tasks is a *(callable,int)* list of two-tuples, or a {callable:int} dict,
the task to be performed will be picked randomly, but each task will be weighted
according to it's corresponding int value. So in the following case *ThreadPage* will
be fifteen times more likely to be picked than *write_post*::
class ForumPage(TaskSet):
tasks = {ThreadPage:15, write_post:1}
"""
min_wait = None
"""
Minimum waiting time between the execution of locust tasks. Can be used to override
the min_wait defined in the root Locust class, which will be used if not set on the
TaskSet.
"""
max_wait = None
"""
Maximum waiting time between the execution of locust tasks. Can be used to override
the max_wait defined in the root Locust class, which will be used if not set on the
TaskSet.
"""
random_execute = None
"""random execute falg"""
locust = None
"""Will refer to the root Locust class instance when the TaskSet has been instantiated"""
parent = None
"""
Will refer to the parent TaskSet, or Locust, class instance when the TaskSet has been
instantiated. Useful for nested TaskSet classes.
"""
def __init__(self, parent):
self._task_queue = []
self._time_start = time()
if isinstance(parent, TaskSet):
self.locust = parent.locust
elif isinstance(parent, Locust):
self.locust = parent
else:
raise LocustError("TaskSet should be called with Locust instance or TaskSet instance as first argument")
self.parent = parent
# if this class doesn't have a min_wait or max_wait defined, copy it from Locust
if not self.min_wait:
self.min_wait = self.locust.min_wait
if not self.max_wait:
self.max_wait = self.locust.max_wait
# if | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
import numpy as np
import shapely.geometry.point as point
import pycity_base.classes.Weather as Weather
import pycity_base.classes.demand.SpaceHeating as spaceheat
import pycity_base.classes.demand.ElectricalDemand as elecdemand
import pycity_base.classes.supply.BES as BES
import pycity_base.classes.supply.PV as PV
import pycity_base.classes.demand.Apartment as apart
import pycity_calc.cities.city as cit
import pycity_calc.buildings.building as build
import pycity_calc.environments.co2emissions as co2em
import pycity_calc.environments.environment as env
import pycity_calc.environments.timer as time
import pycity_calc.environments.germanmarket as germanmarket
import pycity_calc.simulation.energy_balance.city_eb_calc as cityeb
import pycity_calc.energysystems.chp as chpsys
import pycity_calc.energysystems.battery as bat
import pycity_calc.energysystems.boiler as boil
import pycity_calc.energysystems.electricalHeater as ehsys
import pycity_calc.energysystems.heatPumpSimple as hpsys
import pycity_calc.energysystems.thermalEnergyStorage as sto
import pycity_calc.economic.annuity_calculation as annu
import pycity_calc.economic.city_economic_calc as citecon
class TestCityCO2():
def test_city_co2_with_pv_only(self):
"""
Compares CO2 values for city with single building, PV and electrical
demand, only.
"""
# Create extended environment of pycity_calc
year = 2017
timestep = 900 # Timestep in seconds
location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop
altitude = 55 # Altitude of Bottrop
# Generate environment
# Generate timer object
timer = time.TimerExtended(timestep=timestep, year=year)
nb_timesteps = timer.timestepsTotal
# Generate weather object
weather = Weather.Weather(timer, useTRY=True, location=location,
altitude=altitude)
# Generate market object
gmarket = germanmarket.GermanMarket()
# Generate co2 emissions object
co2emissions = co2em.Emissions(year=year)
# Generate environment
environment = env.EnvironmentExtended(timer, weather,
prices=gmarket,
location=location,
co2em=co2emissions)
# City
city = cit.City(environment=environment)
# One building
building = build.BuildingExtended(environment=environment,
build_type=0)
# One apartment
apartment = apart.Apartment(environment=environment)
p_nom = 500 # in W
array_el = np.ones(environment.timer.timestepsTotal) * p_nom
el_demand = elecdemand.ElectricalDemand(
environment=environment,
method=0,
loadcurve=array_el)
# Add energy demands to apartment
apartment.addEntity(el_demand)
# Add apartment to extended building
building.addEntity(entity=apartment)
# Add building to city
pos = point.Point(0, 0)
city.add_extended_building(extended_building=building, position=pos)
# BES
bes = BES.BES(environment=environment)
# PV
pv_simple = PV.PV(environment=environment, area=10, eta=0.15)
boiler = boil.BoilerExtended(environment=environment,
q_nominal=1, # Dummy value
eta=1)
# Add devices to BES
bes.addMultipleDevices([pv_simple, boiler])
# Add BES to building
building.addEntity(bes)
# Generate energy balance object
energy_balance = cityeb.CityEBCalculator(city=city)
# Generate annuity object instance
annuity_obj = annu.EconomicCalculation(interest=0.000000001,
# Zero interest undefined,
# thus, using small value
price_ch_cap=1,
price_ch_dem_gas=1,
price_ch_dem_el=1,
price_ch_dem_cool=1,
price_ch_op=1,
price_ch_proc_chp=1.0,
price_ch_proc_pv=1.0,
price_ch_eeg_chp=1.0,
price_ch_eeg_pv=1,
price_ch_eex=1,
price_ch_grid_use=1,
price_ch_chp_sub=1,
price_ch_chp_self=1,
price_ch_chp_tax_return=1,
price_ch_pv_sub=1,
price_ch_dem_el_hp=1)
# Generate city economic calculator
city_eco_calc = citecon.CityAnnuityCalc(annuity_obj=annuity_obj,
energy_balance=energy_balance)
# ##################################################################
# Run energy balance
# ##################################################################
# Calc. city energy balance
city_eco_calc.energy_balance.calc_city_energy_balance()
# Perform final energy anaylsis
city_eco_calc.energy_balance.calc_final_energy_balance_city()
# Perform emissions calculation
co2 = city_eco_calc.energy_balance.calc_co2_emissions(
el_mix_for_chp=True)
# Total el. demand
el_energy = building.get_annual_el_demand()
# Get reference co2 emission factor for electricity
co2_factor_el_mix = city.environment.co2emissions.co2_factor_el_mix
# Get el. power array of PV
pv_power_array = pv_simple.getPower()
# El. energy PV
pv_el_energy = sum(pv_power_array) * timestep / (1000 * 3600)
co2_ref = (el_energy - pv_el_energy) * co2_factor_el_mix
assert abs(co2 - co2_ref) <= 0.001 * co2
def test_city_co2_with_pv_only_no_el_demand(self):
"""
Compares CO2 values for city with single building, PV (no electr.
demand)
"""
# Create extended environment of pycity_calc
year = 2017
timestep = 900 # Timestep in seconds
location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop
altitude = 55 # Altitude of Bottrop
# Generate environment
# Generate timer object
timer = time.TimerExtended(timestep=timestep, year=year)
# Generate weather object
weather = Weather.Weather(timer, useTRY=True, location=location,
altitude=altitude)
# Generate market object
gmarket = germanmarket.GermanMarket()
# Generate co2 emissions object
co2emissions = co2em.Emissions(year=year)
# Generate environment
environment = env.EnvironmentExtended(timer, weather,
prices=gmarket,
location=location,
co2em=co2emissions)
# City
city = cit.City(environment=environment)
# One building
building = build.BuildingExtended(environment=environment,
build_type=0)
# One apartment
apartment = apart.Apartment(environment=environment)
p_nom = 0 # in W
array_el = np.ones(environment.timer.timestepsTotal) * p_nom
el_demand = elecdemand.ElectricalDemand(
environment=environment,
method=0,
loadcurve=array_el)
# Add energy demands to apartment
apartment.addEntity(el_demand)
# Add apartment to extended building
building.addEntity(entity=apartment)
# Add building to city
pos = point.Point(0, 0)
city.add_extended_building(extended_building=building, position=pos)
# BES
bes = BES.BES(environment=environment)
# PV
pv_simple = PV.PV(environment=environment, area=10, eta=0.15)
boiler = boil.BoilerExtended(environment=environment,
q_nominal=1, # Dummy value
eta=1)
# Add devices to BES
bes.addMultipleDevices([pv_simple, boiler])
# Add BES to building
building.addEntity(bes)
# Generate energy balance object
energy_balance = cityeb.CityEBCalculator(city=city)
# Generate annuity object instance
annuity_obj = annu.EconomicCalculation(interest=0.000000001,
# Zero interest undefined,
# thus, using small value
price_ch_cap=1,
price_ch_dem_gas=1,
price_ch_dem_el=1,
price_ch_dem_cool=1,
price_ch_op=1,
price_ch_proc_chp=1.0,
price_ch_proc_pv=1.0,
price_ch_eeg_chp=1.0,
price_ch_eeg_pv=1,
price_ch_eex=1,
price_ch_grid_use=1,
price_ch_chp_sub=1,
price_ch_chp_self=1,
price_ch_chp_tax_return=1,
price_ch_pv_sub=1,
price_ch_dem_el_hp=1)
# Generate city economic calculator
city_eco_calc = citecon.CityAnnuityCalc(annuity_obj=annuity_obj,
energy_balance=energy_balance)
# ##################################################################
# Run energy balance
# ##################################################################
# Calc. city energy balance
city_eco_calc.energy_balance.calc_city_energy_balance()
# Perform final energy anaylsis
city_eco_calc.energy_balance.calc_final_energy_balance_city()
# Perform emissions calculation
co2 = city_eco_calc.energy_balance.calc_co2_emissions(
el_mix_for_chp=True)
assert co2 < 0
# Get reference co2 emission factor for electricity
co2_factor_el_mix = city.environment.co2emissions.co2_factor_el_mix
# Get el. power array of PV
pv_power_array = pv_simple.getPower()
# El. energy PV
pv_el_energy = sum(pv_power_array) * timestep / (1000 * 3600)
co2_ref = (- pv_el_energy) * co2_factor_el_mix
assert abs(co2 - co2_ref) <= 0.001 * abs(co2)
def test_city_co2_chp_calc1(self):
"""
"""
# Create extended environment of pycity_calc
year = 2017
timestep = 900 # Timestep in seconds
location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop
altitude = 55 # Altitude of Bottrop
# Generate environment
# Generate timer object
timer = time.TimerExtended(timestep=timestep, year=year)
nb_timesteps = timer.timestepsTotal
# Generate weather object
weather = Weather.Weather(timer, useTRY=True, location=location,
altitude=altitude)
# Generate market object
gmarket = germanmarket.GermanMarket()
# Generate co2 emissions object
co2emissions = co2em.Emissions(year=year)
# Generate environment
environment = env.EnvironmentExtended(timer, weather,
prices=gmarket,
location=location,
co2em=co2emissions)
# City
city = cit.City(environment=environment)
# One building
building = build.BuildingExtended(environment=environment,
build_type=0)
# One apartment
apartment = apart.Apartment(environment=environment)
# Initialize constant space heating and electrical load
q_nom = 1000 # in W
array_sh = np.ones(environment.timer.timestepsTotal) * q_nom
heat_demand = spaceheat.SpaceHeating(environment=environment,
method=0, loadcurve=array_sh)
p_nom = 300 # in W
array_el = np.ones(environment.timer.timestepsTotal) * p_nom
el_demand = elecdemand.ElectricalDemand(
environment=environment,
method=0,
loadcurve=array_el)
# Add energy demands to apartment
apartment.addMultipleEntities([heat_demand, el_demand])
# Add apartment to extended building
building.addEntity(entity=apartment)
# Add building to city
pos = point.Point(0, 0)
city.add_extended_building(extended_building=building, position=pos)
# BES
bes = BES.BES(environment=environment)
# CHP
chp = chpsys.ChpExtended(environment=environment,
q_nominal=q_nom,
p_nominal=0.001, # Dummmy value
eta_total=1)
# ASUE calc --> el. power --> Get el. power
chp_el_pow = chp.pNominal
print('CHP el. power in Watt: ', chp_el_pow)
# Add CHP to BES
bes.addDevice(chp)
# Create thermal storage
# Create Heating Device
t_init = 55 # °C
capacity = 100 # kg
t_max = 60 # °C
t_min = 20 # °C
cp = 4186 # J/kgK
t_surroundings = 20 # °C
k_losses = 0 # W/(Km²) # Losses set to zero
rho = 1000 # kg / m³
tes = sto.thermalEnergyStorageExtended(environment=environment,
t_init=t_init,
c_p=cp,
capacity=capacity,
t_max=t_max,
t_min=t_min,
t_surroundings=t_surroundings,
k_loss=k_losses,
rho=rho)
# Add TES to BES
bes.addDevice(tes)
# Add BES to building
building.addEntity(bes)
# Generate energy balance object
energy_balance = cityeb.CityEBCalculator(city=city)
# Generate annuity object instance
annuity_obj = annu.EconomicCalculation(interest=0.000000001,
# Zero interest undefined,
# thus, using small value
price_ch_cap=1,
price_ch_dem_gas=1,
price_ch_dem_el=1,
price_ch_dem_cool=1,
price_ch_op=1,
price_ch_proc_chp=1.0,
price_ch_proc_pv=1.0,
price_ch_eeg_chp=1.0,
price_ch_eeg_pv=1,
price_ch_eex=1,
price_ch_grid_use=1,
price_ch_chp_sub=1,
price_ch_chp_self=1,
price_ch_chp_tax_return=1,
price_ch_pv_sub=1,
price_ch_dem_el_hp=1)
# Generate city economic calculator
city_eco_calc = citecon.CityAnnuityCalc(annuity_obj=annuity_obj,
energy_balance=energy_balance)
# ##################################################################
# Run energy balance
# ##################################################################
# Calc. city energy balance
city_eco_calc.energy_balance.calc_city_energy_balance()
# Perform final energy anaylsis
city_eco_calc.energy_balance.calc_final_energy_balance_city()
# Perform emissions calculation
co2 = city_eco_calc.energy_balance.calc_co2_emissions(
el_mix_for_chp=True)
# Perform simplified reference annuity calculation
# ################################################################
# Total space heating energy
sh_energy = building.get_annual_space_heat_demand()
# Total el. demand
el_energy = building.get_annual_el_demand()
print('Total space heating demand in kWh: ', sh_energy)
print('Total electrical demand in kWh: ', el_energy)
# Get amount of CHP-generated el. energy
chp_energy_el = sum(chp.totalPOutput) * timestep / (1000 * 3600)
# Get CHP gas demand
chp_energy_gas = sum(chp.array_fuel_power) * timestep / (1000 * 3600)
print('chp_energy_el: ', chp_energy_el)
print('chp_energy_gas:', chp_energy_gas)
# Estimate amount of self-used electric energy
el_energy_self = chp_energy_el
assert el_energy_self >= 0
# Estimate amount of bought electricity from the grid
el_energy_import = el_energy - el_energy_self
assert el_energy_import >= 0
print('el_energy_self: ', el_energy_self)
print('el_energy_import:', el_energy_import)
co2_gas = city.environment.co2emissions.co2_factor_gas
co2_el = city.environment.co2emissions.co2_factor_el_mix
co2_ref = co2_gas * chp_energy_gas / 1.11 + co2_el * el_energy_import
assert abs(co2_ref - co2) <= 0.001 * co2
gas_ref = sh_energy * (chp_el_pow + q_nom) / q_nom
assert abs(chp_energy_gas - gas_ref) <= 0.001 * gas_ref
chp_el_gen_ref = gas_ref * chp_el_pow / (chp_el_pow + q_nom)
assert abs(chp_el_gen_ref - chp_energy_el) <= 0.001 * chp_energy_el
el_energy_import_b = el_energy - chp_el_gen_ref
co2_ref_b = co2_gas * gas_ref / 1.11 + co2_el * el_energy_import_b
assert abs(co2_ref_b - co2) <= 0.001 | |
<gh_stars>10-100
"""
=========================
User Interaction Handlers
=========================
User interaction handlers for a :class:`~.SchemeEditWidget`.
User interactions encapsulate the logic of user interactions with the
scheme document.
All interactions are subclasses of :class:`UserInteraction`.
"""
import logging
from AnyQt.QtWidgets import QApplication, QGraphicsRectItem, QUndoCommand
from AnyQt.QtGui import QPen, QBrush, QColor, QFontMetrics
from AnyQt.QtCore import (
Qt,
QObject,
QCoreApplication,
QSizeF,
QPointF,
QRect,
QRectF,
QLineF,
)
from AnyQt.QtCore import pyqtSignal as Signal
from ..registry.description import WidgetDescription
from ..registry.qt import QtWidgetRegistry
from .. import scheme
from ..canvas import items
from ..canvas.items import controlpoints
from ..gui.quickhelp import QuickHelpTipEvent
from . import commands
from .editlinksdialog import EditLinksDialog
from functools import reduce
log = logging.getLogger(__name__)
class UserInteraction(QObject):
"""
Base class for user interaction handlers.
Parameters
----------
document : :class:`~.SchemeEditWidget`
An scheme editor instance with which the user is interacting.
parent : :class:`QObject`, optional
A parent QObject
deleteOnEnd : bool, optional
Should the UserInteraction be deleted when it finishes (``True``
by default).
"""
# Cancel reason flags
#: No specified reason
NoReason = 0
#: User canceled the operation (e.g. pressing ESC)
UserCancelReason = 1
#: Another interaction was set
InteractionOverrideReason = 3
#: An internal error occurred
ErrorReason = 4
#: Other (unspecified) reason
OtherReason = 5
#: Emitted when the interaction is set on the scene.
started = Signal()
#: Emitted when the interaction finishes successfully.
finished = Signal()
#: Emitted when the interaction ends (canceled or finished)
ended = Signal()
#: Emitted when the interaction is canceled.
canceled = Signal([], [int])
def __init__(self, document, parent=None, deleteOnEnd=True):
QObject.__init__(self, parent)
self.document = document
self.scene = document.scene()
self.scheme = document.scheme()
self.deleteOnEnd = deleteOnEnd
self.cancelOnEsc = False
self.__finished = False
self.__canceled = False
self.__cancelReason = self.NoReason
def start(self):
"""
Start the interaction. This is called by the :class:`CanvasScene` when
the interaction is installed.
.. note:: Must be called from subclass implementations.
"""
self.started.emit()
def end(self):
"""
Finish the interaction. Restore any leftover state in this method.
.. note:: This gets called from the default :func:`cancel`
implementation.
"""
self.__finished = True
if self.scene.user_interaction_handler is self:
self.scene.set_user_interaction_handler(None)
if self.__canceled:
self.canceled.emit()
self.canceled[int].emit(self.__cancelReason)
else:
self.finished.emit()
self.ended.emit()
if self.deleteOnEnd:
self.deleteLater()
def cancel(self, reason=OtherReason):
"""
Cancel the interaction with `reason`.
"""
self.__canceled = True
self.__cancelReason = reason
self.end()
def isFinished(self):
"""
Is the interaction finished.
"""
return self.__finished
def isCanceled(self):
"""
Was the interaction canceled.
"""
return self.__canceled
def cancelReason(self):
"""
Return the reason the interaction was canceled.
"""
return self.__cancelReason
def mousePressEvent(self, event):
"""
Handle a `QGraphicsScene.mousePressEvent`.
"""
return False
def mouseMoveEvent(self, event):
"""
Handle a `GraphicsScene.mouseMoveEvent`.
"""
return False
def mouseReleaseEvent(self, event):
"""
Handle a `QGraphicsScene.mouseReleaseEvent`.
"""
return False
def mouseDoubleClickEvent(self, event):
"""
Handle a `QGraphicsScene.mouseDoubleClickEvent`.
"""
return False
def keyPressEvent(self, event):
"""
Handle a `QGraphicsScene.keyPressEvent`
"""
if self.cancelOnEsc and event.key() == Qt.Key_Escape:
self.cancel(self.UserCancelReason)
return False
def keyReleaseEvent(self, event):
"""
Handle a `QGraphicsScene.keyPressEvent`
"""
return False
def contextMenuEvent(self, event):
"""
Handle a `QGraphicsScene.contextMenuEvent`
"""
return False
class NoPossibleLinksError(ValueError):
pass
class UserCanceledError(ValueError):
pass
def reversed_arguments(func):
"""
Return a function with reversed argument order.
"""
def wrapped(*args):
return func(*reversed(args))
return wrapped
class NewLinkAction(UserInteraction):
"""
User drags a new link from an existing `NodeAnchorItem` to create
a connection between two existing nodes or to a new node if the release
is over an empty area, in which case a quick menu for new node selection
is presented to the user.
"""
# direction of the drag
FROM_SOURCE = 1
FROM_SINK = 2
def __init__(self, document, *args, **kwargs):
UserInteraction.__init__(self, document, *args, **kwargs)
self.source_item = None
self.sink_item = None
self.from_item = None
self.direction = None
self.force_link_dialog = False
# An `NodeItem` currently under the mouse as a possible
# link drop target.
self.current_target_item = None
# A temporary `LinkItem` used while dragging.
self.tmp_link_item = None
# An temporary `AnchorPoint` inserted into `current_target_item`
self.tmp_anchor_point = None
# An `AnchorPoint` following the mouse cursor
self.cursor_anchor_point = None
# An QUndoCommand
self.macro = None
def remove_tmp_anchor(self):
"""
Remove a temporary anchor point from the current target item.
"""
if self.direction == self.FROM_SOURCE:
self.current_target_item.removeInputAnchor(self.tmp_anchor_point)
else:
self.current_target_item.removeOutputAnchor(self.tmp_anchor_point)
self.tmp_anchor_point = None
def create_tmp_anchor(self, item):
"""
Create a new tmp anchor at the `item` (:class:`NodeItem`).
"""
assert self.tmp_anchor_point is None
if self.direction == self.FROM_SOURCE:
self.tmp_anchor_point = item.newInputAnchor()
else:
self.tmp_anchor_point = item.newOutputAnchor()
def can_connect(self, target_item):
"""
Is the connection between `self.from_item` (item where the drag
started) and `target_item` possible.
"""
node1 = self.scene.node_for_item(self.from_item)
node2 = self.scene.node_for_item(target_item)
if self.direction == self.FROM_SOURCE:
return bool(self.scheme.propose_links(node1, node2))
else:
return bool(self.scheme.propose_links(node2, node1))
def set_link_target_anchor(self, anchor):
"""
Set the temp line target anchor.
"""
if self.direction == self.FROM_SOURCE:
self.tmp_link_item.setSinkItem(None, anchor)
else:
self.tmp_link_item.setSourceItem(None, anchor)
def target_node_item_at(self, pos):
"""
Return a suitable :class:`NodeItem` at position `pos` on which
a link can be dropped.
"""
# Test for a suitable `NodeAnchorItem` or `NodeItem` at pos.
if self.direction == self.FROM_SOURCE:
anchor_type = items.SinkAnchorItem
else:
anchor_type = items.SourceAnchorItem
item = self.scene.item_at(pos, (anchor_type, items.NodeItem))
if isinstance(item, anchor_type):
item = item.parentNodeItem()
return item
def mousePressEvent(self, event):
anchor_item = self.scene.item_at(
event.scenePos(), items.NodeAnchorItem, buttons=Qt.LeftButton
)
if anchor_item and event.button() == Qt.LeftButton:
# Start a new link starting at item
self.from_item = anchor_item.parentNodeItem()
if isinstance(anchor_item, items.SourceAnchorItem):
self.direction = NewLinkAction.FROM_SOURCE
self.source_item = self.from_item
else:
self.direction = NewLinkAction.FROM_SINK
self.sink_item = self.from_item
event.accept()
helpevent = QuickHelpTipEvent(
self.tr("Create a new link"),
self.tr(
"<h3>Create new link</h3>"
"<p>Drag a link to an existing node or release on "
"an empty spot to create a new node.</p>"
"<p>Hold Shift when releasing the mouse button to "
"edit connections.</p>"
# '<a href="help://orange-canvas/create-new-links">'
# 'More ...</a>'
),
)
QCoreApplication.postEvent(self.document, helpevent)
return True
else:
# Whoever put us in charge did not know what he was doing.
self.cancel(self.ErrorReason)
return False
def mouseMoveEvent(self, event):
if not self.tmp_link_item:
# On first mouse move event create the temp link item and
# initialize it to follow the `cursor_anchor_point`.
self.tmp_link_item = items.LinkItem()
# An anchor under the cursor for the duration of this action.
self.cursor_anchor_point = items.AnchorPoint()
self.cursor_anchor_point.setPos(event.scenePos())
# Set the `fixed` end of the temp link (where the drag started).
if self.direction == self.FROM_SOURCE:
self.tmp_link_item.setSourceItem(self.source_item)
else:
self.tmp_link_item.setSinkItem(self.sink_item)
self.set_link_target_anchor(self.cursor_anchor_point)
self.scene.addItem(self.tmp_link_item)
# `NodeItem` at the cursor position
item = self.target_node_item_at(event.scenePos())
if self.current_target_item is not None and (
item is None or item is not self.current_target_item
):
# `current_target_item` is no longer under the mouse cursor
# (was replaced by another item or the the cursor was moved over
# an empty scene spot.
log.info("%r is no longer the target.", self.current_target_item)
self.remove_tmp_anchor()
self.current_target_item = None
if item is not None and item is not self.from_item:
# The mouse is over an node item (different from the starting node)
if self.current_target_item is item:
# Avoid reseting the points
pass
elif self.can_connect(item):
# Grab a new anchor
log.info("%r is the new target.", item)
self.create_tmp_anchor(item)
self.set_link_target_anchor(self.tmp_anchor_point)
self.current_target_item = item
else:
log.info("%r does not have compatible channels", item)
self.set_link_target_anchor(self.cursor_anchor_point)
# TODO: How to indicate that the connection is not possible?
# The node's anchor could be drawn with a 'disabled'
# palette
else:
self.set_link_target_anchor(self.cursor_anchor_point)
self.cursor_anchor_point.setPos(event.scenePos())
return True
def mouseReleaseEvent(self, event):
if self.tmp_link_item:
self.force_link_dialog = bool(event.modifiers() & Qt.ShiftModifier)
item = self.target_node_item_at(event.scenePos())
node = None
stack = self.document.undoStack()
self.macro = QUndoCommand(self.tr("Add link"))
if item:
# If the release was over a node item then connect them
node = self.scene.node_for_item(item)
else:
# Release on an empty canvas part
# Show a quick menu popup for a new widget creation.
try:
node = self.create_new(event)
except Exception:
log.error("Failed to create a new node, ending.", exc_info=True)
node = None
if node is not None:
commands.AddNodeCommand(self.scheme, node, parent=self.macro)
if node is not None:
if self.direction == self.FROM_SOURCE:
source_node = self.scene.node_for_item(self.source_item)
sink_node = node
else:
source_node = node
sink_node = self.scene.node_for_item(self.sink_item)
self.connect_nodes(source_node, sink_node)
if (
not self.isCanceled()
or not self.isFinished()
and self.macro is not None
):
# Push (commit) the add link/node action on the stack.
stack.push(self.macro)
self.end()
else:
self.end()
return False
def create_new(self, event):
"""
Create and return a new node with a `QuickMenu`.
"""
pos = event.screenPos()
menu = self.document.quickMenu()
node = self.scene.node_for_item(self.from_item)
from_desc = node.description
def is_compatible(source, sink):
return any(
scheme.compatible_channels(output, input)
for output in source.outputs
for input in sink.inputs
)
if self.direction == self.FROM_SINK:
# Reverse the argument order.
| |
<gh_stars>10-100
import functools
import click
from gradient import clilogger
from gradient.api_sdk import constants, workspace
from gradient.cli import common, validators
from gradient.cli.cli import cli
from gradient.cli.cli_types import json_string, ChoiceType
from gradient.cli.common import api_key_option, ClickGroup, validate_comma_split_option
from gradient.cli.utils.flag_with_value import GradientRegisterReaderOption, GradientRegisterWriterOption, \
GradientRegisterWriterCommand
from gradient.cli_constants import CLI_PS_CLIENT_NAME
from gradient.commands import experiments as experiments_commands
from gradient.commands.experiments import ExperimentAddTagsCommand, ExperimentRemoveTagsCommand, \
GetExperimentMetricsCommand, ListExperimentMetricsCommand, StreamExperimentMetricsCommand
MULTI_NODE_CREATE_EXPERIMENT_COMMANDS = {
constants.ExperimentType.GRPC_MULTI_NODE: experiments_commands.CreateMultiNodeExperimentCommand,
constants.ExperimentType.MPI_MULTI_NODE: experiments_commands.CreateMpiMultiNodeExperimentCommand,
}
MULTI_NODE_RUN_EXPERIMENT_COMMANDS = {
constants.ExperimentType.GRPC_MULTI_NODE: experiments_commands.CreateAndStartMultiNodeExperimentCommand,
constants.ExperimentType.MPI_MULTI_NODE: experiments_commands.CreateAndStartMpiMultiNodeExperimentCommand,
}
def get_workspace_handler(api_key):
logger_ = clilogger.CliLogger()
workspace_handler = workspace.S3WorkspaceHandlerWithProgressbar(api_key=api_key, logger_=logger_,
client_name=CLI_PS_CLIENT_NAME)
return workspace_handler
@cli.group("experiments", help="Manage experiments", cls=ClickGroup)
def experiments_group():
pass
@experiments_group.group("create", help="Create new experiment", cls=ClickGroup)
def create_experiment():
pass
@experiments_group.group(name="run", help="Create and start new experiment", cls=ClickGroup)
def create_and_start_experiment():
pass
@experiments_group.group(name="tags", help="Manage tags for experiment", cls=ClickGroup)
def experiments_tags():
pass
@experiments_group.group(name="metrics", help="Read experiment metrics", cls=ClickGroup)
def experiments_metrics():
pass
def common_experiments_create_options(f):
options = [
click.option(
"--name",
metavar="<name>",
help="Name of new experiment",
cls=common.GradientOption,
),
click.option(
"--ports",
help="Port to use in new experiment",
cls=common.GradientOption,
),
click.option(
"--workspace",
"workspace",
help="Path to workspace directory, archive, S3 or git repository",
cls=common.GradientOption,
),
click.option(
"--workspaceRef",
"workspace_ref",
help="Git commit hash, branch name or tag",
cls=common.GradientOption,
),
click.option(
"--workspaceUsername",
"workspace_username",
metavar="<username>",
help="Workspace username",
cls=common.GradientOption,
),
click.option(
"--workspacePassword",
"workspace_password",
help="Workspace password",
cls=common.GradientOption,
),
click.option(
"--ignoreFiles",
"ignore_files",
help="Ignore certain files from uploading",
cls=common.GradientOption,
),
click.option(
"--workingDirectory",
"working_directory",
help="Working directory for the experiment",
cls=common.GradientOption,
),
click.option(
"--artifactDirectory",
"artifact_directory",
help="Artifacts directory",
cls=common.GradientOption,
),
click.option(
"--clusterId",
"cluster_id",
metavar="<cluster ID>",
help="Cluster ID",
cls=common.GradientOption,
),
click.option(
"--experimentEnv",
"experiment_env",
type=json_string,
help="Environment variables in a JSON",
cls=common.GradientOption,
),
click.option(
"--projectId",
"project_id",
metavar="<project ID>",
required=True,
help="Project ID",
cls=common.GradientOption,
),
click.option(
"--modelType",
"model_type",
metavar="<model type>",
help="Model type",
cls=common.GradientOption,
),
click.option(
"--modelPath",
"model_path",
metavar="<path>",
help="Model path",
cls=common.GradientOption,
),
click.option(
"--isPreemptible",
"is_preemptible",
type=bool,
is_flag=True,
help="Flag: is preemptible",
cls=common.GradientOption,
),
click.option(
"--tag",
"tags",
multiple=True,
help="One or many tags that you want to add to experiment",
cls=common.GradientOption
),
click.option(
"--tags",
"tags_comma",
help="Separated by comma tags that you want add to experiment",
cls=common.GradientOption
)
]
return functools.reduce(lambda x, opt: opt(x), reversed(options), f)
def dataset_options(f):
options = [
click.option(
"--datasetId",
"dataset_id_list",
metavar="<dateset id>",
multiple=True,
help="Dataset ID",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetUri",
"dataset_uri_list",
metavar="<dateset uri>",
multiple=True,
help="Url to S3 bucket with dataset",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetName",
"dataset_name_list",
multiple=True,
metavar="<dateset name>",
help="Name of dataset",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetAwsAccessKeyId",
"dataset_access_key_id_list",
multiple=True,
metavar="<AWS access key>",
help="S3 bucket's Access Key ID",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetAwsSecretAccessKey",
"dataset_secret_access_key_list",
multiple=True,
help="S3 bucket's Secret Access Key",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetAwsEndpoint",
"dataset_endpoint_list",
multiple=True,
help="S3 endpoint URL",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetVersionId",
"dataset_version_id_list",
metavar="<version ID>",
multiple=True,
help="S3 dataset's version ID",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetEtag",
"dataset_etag_list",
metavar="<etag>",
multiple=True,
help="S3 dataset's ETag",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetVolumeKind",
"dataset_volume_kind_list",
multiple=True,
type=ChoiceType(constants.DATASET_VOLUME_KINDS, case_sensitive=False),
help="S3 dataset's volume kind. If used, --datasetVolumeSize has to be set as well",
cls=common.GradientDatasetOption,
),
click.option(
"--datasetVolumeSize",
"dataset_volume_size_list",
multiple=True,
help="S3 dataset's volume size",
cls=common.GradientDatasetOption,
),
]
return functools.reduce(lambda x, opt: opt(x), reversed(options), f)
def common_experiment_create_multi_node_options(f):
options = [
click.option(
"--experimentType",
"experiment_type_id",
type=ChoiceType(constants.MULTI_NODE_EXPERIMENT_TYPES_MAP, case_sensitive=False),
required=True,
help="Experiment Type",
cls=common.GradientOption,
),
click.option(
"--workerContainer",
"worker_container",
metavar="<container>",
required=True,
help="Worker container",
cls=common.GradientOption,
),
click.option(
"--workerMachineType",
"worker_machine_type",
metavar="<machine type>",
required=True,
help="Worker machine type",
cls=common.GradientOption,
),
click.option(
"--workerCommand",
"worker_command",
metavar="<command>",
required=True,
help="Worker command",
cls=common.GradientOption,
),
click.option(
"--workerCount",
"worker_count",
type=int,
required=True,
help="Worker count",
cls=common.GradientOption,
),
click.option(
"--parameterServerContainer",
"parameter_server_container",
metavar="<container>",
help="Parameter server container (GRPC only)",
cls=common.GradientOption,
),
click.option(
"--parameterServerMachineType",
"parameter_server_machine_type",
metavar="<machine type>",
help="Parameter server machine type (GRPC only)",
cls=common.GradientOption,
),
click.option(
"--parameterServerCommand",
"parameter_server_command",
metavar="<command>",
help="Parameter server command (GRPC only)",
cls=common.GradientOption,
),
click.option(
"--parameterServerCount",
"parameter_server_count",
type=int,
help="Parameter server count (GRPC only)",
cls=common.GradientOption,
),
click.option(
"--masterContainer",
"master_container",
metavar="<container>",
help="Master container (MPI only)",
cls=common.GradientOption,
),
click.option(
"--masterMachineType",
"master_machine_type",
metavar="<machine type>",
help="Master machine type (MPI only)",
cls=common.GradientOption,
),
click.option(
"--masterCount",
"master_count",
help="Master count (MPI only)",
cls=common.GradientOption,
),
click.option(
"--masterCommand",
"master_command",
metavar="<command>",
help="Master command (MPI only)",
cls=common.GradientOption,
),
click.option(
"--workerContainerUser",
"worker_container_user",
help="Worker container user",
cls=common.GradientOption,
),
click.option(
"--workerRegistryUsername",
"worker_registry_username",
help="Worker container registry username",
cls=common.GradientOption,
),
click.option(
"--workerRegistryPassword",
"worker_registry_password",
metavar="<password>",
help="Worker registry password",
cls=common.GradientOption,
),
click.option(
"--workerRegistryUrl",
"worker_registry_url",
metavar="<registry url>",
help="Worker registry URL",
cls=common.GradientOption,
),
click.option(
"--parameterServerContainerUser",
"parameter_server_container_user",
help="Parameter server container user",
cls=common.GradientOption,
),
click.option(
"--parameterServerRegistryUsername",
"parameter_server_registry_username",
help="Parameter server registry username",
cls=common.GradientOption,
),
click.option(
"--parameterServerRegistryPassword",
"parameter_server_registry_password",
metavar="<password>",
help="Parameter server registry password",
cls=common.GradientOption,
),
click.option(
"--parameterServerRegistryUrl",
"parameter_server_registry_url",
metavar="<registry url>",
help="Parameter server registry URL",
cls=common.GradientOption,
),
click.option(
"--masterContainerUser",
"master_container_user",
help="Master container user (MPI only)",
cls=common.GradientOption,
),
click.option(
"--masterRegistryUsername",
"master_registry_username",
metavar="<username>",
help="Master registry username (MPI only)",
cls=common.GradientOption,
),
click.option(
"--masterRegistryPassword",
"master_registry_password",
metavar="<password>",
help="Master registry password (MPI only)",
cls=common.GradientOption,
),
click.option(
"--masterRegistryUrl",
"master_registry_url",
metavar="<registry url>",
help="Master registry URL (MPI only)",
cls=common.GradientOption
),
]
return functools.reduce(lambda x, opt: opt(x), reversed(options), f)
def common_experiments_create_single_node_options(f):
options = [
click.option(
"--container",
required=True,
help="Container",
cls=common.GradientOption,
),
click.option(
"--machineType",
"machine_type",
required=True,
metavar="<machine type>",
help="Machine type",
cls=common.GradientOption,
),
click.option(
"--command",
required=True,
metavar="<command>",
help="Container entrypoint command",
cls=common.GradientOption,
),
click.option(
"--containerUser",
"container_user",
help="Container user",
cls=common.GradientOption,
),
click.option(
"--registryUsername",
"registry_username",
help="Registry username",
cls=common.GradientOption,
),
click.option(
"--registryPassword",
"registry_password",
metavar="<password>",
help="Registry password",
cls=common.GradientOption,
),
click.option(
"--registryUrl",
"registry_url",
metavar="<registry url>",
help="Registry URL",
cls=common.GradientOption,
),
]
return functools.reduce(lambda x, opt: opt(x), reversed(options), f)
def tensorboard_option(f):
options = [
click.option(
"--tensorboard",
is_flag=True,
# default=experiments_commands.NoTensorboardId,
help="Creates new tensorboard for this experiment",
cls=GradientRegisterReaderOption,
),
click.option(
"--tensorboard_set",
help="Add to existing tensorboard",
cls=GradientRegisterWriterOption,
metavar='<tensorboard ID>'
),
]
return functools.reduce(lambda x, opt: opt(x), reversed(options), f)
def parse_tensorboard_options(tensorboard, tensorboard_set):
"""
:param str|bool tensorboard:
:param str|None tensorboard_set:
:rtype: str|bool
"""
if tensorboard is True:
return True
if tensorboard_set:
return tensorboard_set
else:
return False
@create_experiment.command(name="multinode", help="Create multi node experiment", cls=GradientRegisterWriterCommand)
@common_experiments_create_options
@common_experiment_create_multi_node_options
@dataset_options
@tensorboard_option
@api_key_option
@common.options_file
def create_multi_node(api_key, tensorboard, tensorboard_set, options_file, **kwargs):
kwargs["tags"] = validate_comma_split_option(kwargs.pop("tags_comma"), kwargs.pop("tags"))
add_to_tensorboard = parse_tensorboard_options(tensorboard, tensorboard_set)
validators.validate_multi_node(kwargs)
common.del_if_value_is_none(kwargs)
experiment_type = kwargs.get('experiment_type_id')
command_class = MULTI_NODE_CREATE_EXPERIMENT_COMMANDS.get(experiment_type)
command = command_class(
api_key=api_key,
workspace_handler=get_workspace_handler(api_key),
)
command.execute(kwargs, add_to_tensorboard=add_to_tensorboard)
@create_experiment.command(name="singlenode", help="Create single node experiment", cls=GradientRegisterWriterCommand)
@common_experiments_create_options
@common_experiments_create_single_node_options
@dataset_options
@tensorboard_option
@api_key_option
@common.options_file
def create_single_node(api_key, tensorboard, tensorboard_set, options_file, **kwargs):
kwargs["tags"] = validate_comma_split_option(kwargs.pop("tags_comma"), kwargs.pop("tags"))
add_to_tensorboard = parse_tensorboard_options(tensorboard, tensorboard_set)
common.del_if_value_is_none(kwargs)
command = experiments_commands.CreateSingleNodeExperimentCommand(
api_key=api_key,
workspace_handler=get_workspace_handler(api_key),
)
command.execute(kwargs, add_to_tensorboard=add_to_tensorboard)
@create_and_start_experiment.command(name="multinode", help="Create and start new multi node experiment",
cls=GradientRegisterWriterCommand)
@common_experiments_create_options
@common_experiment_create_multi_node_options
@click.option(
"--no-logs",
"show_logs",
is_flag=True,
flag_value=False,
default=True,
help="Don't show logs. Only create, start and exit",
)
@dataset_options
@tensorboard_option
@api_key_option
@common.options_file
@click.pass_context
def create_and_start_multi_node(ctx, api_key, show_logs, tensorboard, tensorboard_set, options_file, **kwargs):
kwargs["tags"] = validate_comma_split_option(kwargs.pop("tags_comma"), kwargs.pop("tags"))
add_to_tensorboard = parse_tensorboard_options(tensorboard, tensorboard_set)
validators.validate_multi_node(kwargs)
common.del_if_value_is_none(kwargs)
experiment_type = kwargs.get('experiment_type_id')
command_class = MULTI_NODE_RUN_EXPERIMENT_COMMANDS.get(experiment_type)
command = command_class(
api_key=api_key,
workspace_handler=get_workspace_handler(api_key),
)
experiment_id = command.execute(kwargs, add_to_tensorboard=add_to_tensorboard)
if experiment_id and show_logs:
ctx.invoke(list_logs, experiment_id=experiment_id, line=1, limit=100, follow=True, api_key=api_key)
@create_and_start_experiment.command(name="singlenode", help="Create and start new single node experiment",
cls=GradientRegisterWriterCommand)
@common_experiments_create_options
@common_experiments_create_single_node_options
@click.option(
"--no-logs",
"show_logs",
is_flag=True,
flag_value=False,
default=True,
help="Don't show logs. Only create, start and exit",
)
@dataset_options
@tensorboard_option
@api_key_option
@common.options_file
@click.pass_context
def create_and_start_single_node(ctx, api_key, show_logs, tensorboard, tensorboard_set, options_file,
**kwargs):
kwargs["tags"] = validate_comma_split_option(kwargs.pop("tags_comma"), kwargs.pop("tags"))
add_to_tensorboard = parse_tensorboard_options(tensorboard, tensorboard_set)
common.del_if_value_is_none(kwargs)
command = experiments_commands.CreateAndStartSingleNodeExperimentCommand(
api_key=api_key,
workspace_handler=get_workspace_handler(api_key),
)
experiment_id = command.execute(kwargs, add_to_tensorboard=add_to_tensorboard)
if experiment_id and show_logs:
ctx.invoke(list_logs, experiment_id=experiment_id, line=1, limit=100, follow=True, api_key=api_key)
@experiments_group.command("start", help="Start experiment")
@click.option(
"--id",
"id",
required=True,
cls=common.GradientOption,
help="ID of the experiment",
)
@click.option(
"--logs",
"show_logs",
is_flag=True,
help="Show logs",
)
@api_key_option
@common.options_file
@click.pass_context
def start_experiment(ctx, id, show_logs, api_key, options_file):
command = experiments_commands.StartExperimentCommand(api_key=api_key)
command.execute(id)
if show_logs:
ctx.invoke(list_logs, experiment_id=id, line=1, limit=100, follow=True, api_key=api_key)
@experiments_group.command("stop", help="Stop experiment")
@click.option(
"--id",
"id",
required=True,
cls=common.GradientOption,
help="ID of the experiment",
)
@api_key_option
@common.options_file
def stop_experiment(id, api_key, options_file):
command = experiments_commands.StopExperimentCommand(api_key=api_key)
command.execute(id)
@experiments_group.command("list", help="List experiments")
@click.option(
"--projectId",
"-p",
"project_ids",
multiple=True,
metavar='<project ID>',
help="Filter by project IDs. Multiple use",
cls=common.GradientOption,
)
@click.option(
"--tag",
"tags",
multiple=True,
cls=common.GradientOption,
help="Filter by tags. Multiple use"
)
@click.option(
"--limit",
"-l",
"exp_limit",
default=20,
help="Limit listed experiments per page",
cls=common.GradientOption,
)
@click.option(
"--offset",
"-o",
"exp_offset",
default=0,
cls=common.GradientOption,
)
@api_key_option
@common.options_file
def list_experiments(project_ids, api_key, exp_limit, exp_offset, tags, options_file):
command = experiments_commands.ListExperimentsCommand(api_key=api_key)
res = command.execute(project_id=project_ids, limit=exp_limit, offset=exp_offset, tags=tags)
for experiments_str, next_iteration in res:
click.echo(experiments_str)
if next_iteration:
click.confirm("Do you want to continue?", abort=True)
@experiments_group.command("details", help="Show detail of an experiment")
@click.option(
"--id",
"id",
required=True,
cls=common.GradientOption,
help="ID of the experiment",
)
@api_key_option
@common.options_file
def get_experiment_details(id, options_file, api_key):
command = experiments_commands.GetExperimentCommand(api_key=api_key)
command.execute(id)
@experiments_group.command("logs", help="List experiment logs")
@click.option(
"--id",
"experiment_id",
required=True,
cls=common.GradientOption,
)
@click.option(
"--line",
"line",
required=False,
default=0,
cls=common.GradientOption,
)
@click.option(
"--limit",
"limit",
required=False,
default=10000,
cls=common.GradientOption,
)
@click.option(
"--follow",
"follow",
required=False,
default=False,
cls=common.GradientOption,
)
@api_key_option
@common.options_file
def list_logs(experiment_id, line, limit, follow, options_file, api_key=None):
command = experiments_commands.ExperimentLogsCommand(api_key=api_key)
command.execute(experiment_id, line, limit, follow)
@experiments_group.command("delete", help="Delete an experiment")
@click.option(
"--id",
"id",
required=True,
cls=common.GradientOption,
help="ID of the experiment",
)
@api_key_option
@common.options_file
def delete_experiment(id, options_file, api_key):
command = experiments_commands.DeleteExperimentCommand(api_key=api_key)
command.execute(id)
@experiments_tags.command("add", help="Add tags to experiment")
@click.option(
"--id",
"id",
required=True,
cls=common.GradientOption,
help="ID of the experiment",
)
@click.option(
"--tag",
"tags",
multiple=True,
help="One or many tags that you want to add to experiment",
cls=common.GradientOption
)
@click.option(
"--tags",
"tags_comma",
help="Separated by comma tags that you want add to experiment",
cls=common.GradientOption
)
@api_key_option
@common.options_file
def experiment_add_tags(id, options_file, api_key, **kwargs):
kwargs["tags"] = validate_comma_split_option(kwargs.pop("tags_comma"), kwargs.pop("tags"), raise_if_no_values=True)
command = ExperimentAddTagsCommand(api_key=api_key)
command.execute(id, **kwargs)
@experiments_tags.command("remove", help="Remove tags from experiment")
@click.option(
"--id",
"id",
required=True,
cls=common.GradientOption,
help="ID of the experiment",
)
@click.option(
"--tag",
"tags",
multiple=True,
help="One or many tags that you want to remove from experiment",
cls=common.GradientOption
)
@click.option(
"--tags",
"tags_comma",
help="Separated by comma tags that you | |
<filename>src/1-Phenotype-web/ext/db_generator.py
#!/usr/bin/python
from wikitools import wiki, category
import os
import re
import urllib2
import urllib
from bs4 import BeautifulSoup
from bs4 import Comment
import sys
import csv
import sqlite3
from lxml import etree
def load_accepted_rsnumbers(filename):
return rs_filter(rs.rstrip().lower() for rs in open(filename).read().split('\n'))
def get_snpedia_snp_names():
site = wiki.Wiki('https://bots.snpedia.com/index.php/')
snps = category.Category(site, 'Is_a_snp')
snpedia = set()
for article in snps.getAllMembersGen(namespaces=[0]):
snpedia.add(article.title.lower())
return snpedia
def download_and_add_snpedia_data(accepted_rsnumbers):
print 'downloading snpedia pages and generating text html ...'
conn = sqlite3.connect('snpedia.db')
conn.text_factory = str
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS main')
c.execute('CREATE TABLE main (rs text PRIMARY KEY, html text, strand_info text)')
# get the names of all snps in snpedia
snpedia = get_snpedia_snp_names()
print("here1")
# filter out snp names that aren't rs numbers
rsnumbers = rs_filter(snpedia)
# filter out rsnumbers we don't want
if accepted_rsnumbers:
rsnumbers = rsnumbers & accepted_rsnumbers
for idx, rs in enumerate(rsnumbers):
print 'processing snp {0} out of {1} ...'.format(idx + 1, len(rsnumbers))
while True:
print("here1")
try:
print("here2")
query = 'https://bots.snpedia.com/index.php/'+rs
print("here3")
response = urllib.request.urlopen(query)
html = response.read()
except urllib2.URLError:
print "error downloading page, trying again ..."
continue
break
c.execute('INSERT INTO main VALUES (?, ?, ?)', (rs, extract_snpedia_text(html), extract_snpedia_strand_info(html)))
conn.commit()
conn.close()
def rs_filter(snp_names):
return set(snp for snp in snp_names if re.match(r'rs\d+', snp))
def extract_snpedia_strand_info(page):
# the strand info is contained between these unique tags
regex = r'title="Orientation">Orientation</a></td><td>(plus|minus)</td></tr>'
m = re.findall(regex, page, re.MULTILINE)
if(m):
# plus or minus
return m[0]
else:
return "undefined"
def extract_snpedia_text(html_page):
soup = BeautifulSoup(html_page)
# remove comments
for comment in soup.findAll(text=lambda text:isinstance(text, Comment)):
comment.extract()
# remove unwanted tags
for t in soup.body.find_all(recursive=False):
if t in soup.body.find_all('table') or \
t in soup.body.find_all('div'):
t.extract()
# remove dead image links
for t in soup.body.find_all('a'):
if t.get('class') and 'image' in t.get('class'):
t.extract()
return soup.body.prettify(formatter='html').encode('utf-8')
# def generate_snpedia_charts(rsnumbers):
# print 'generating snpedia charts ...'
#
# for idx, rs in enumerate(rsnumbers):
# print 'processing snp {0} out of {1} ...'.format(idx + 1, len(rsnumbers))
#
# page = open('snp_db/'+rs+'/snpedia_page.html').read()
#
#
# m = re.search(url_regex, page)
#
# if m:
# soup = BeautifulSoup(m.group(1))
# urllib.urlretrieve(soup.string, 'snp_db/'+rs+'/snpedia_chart.png')
# def generate_html_for_snpedia_charts(rsnumbers):
# print 'generating html for snpedia charts ...'
#
# for idx, rs in enumerate(rsnumbers):
# print 'processing snp {0} out of {1} ...'.format(idx + 1, len(rsnumbers))
#
# page = open('snp_db/'+rs+'/snpedia_page.html').read()
#
# soup = BeautifulSoup(page)
#
# url_regex = r"'(http://chart\.apis\.google\.com/chart.*?)'"
# m = re.search(url_regex, page)
#
# if m:
# tables = soup.find_all('table')
# table = [table for table in tables if table.img and table.img.get('src') and re.match(r'http://chart\.apis\.google\.com/chart', table.img.get('src'))][0]
# table.img['src'] = 'snpedia_chart.png'
# open('snp_db/'+rs+'/snpedia_chart.html', 'w').write(table.prettify(formatter='html').encode('utf-8'))
def download_hapmap_data():
if not os.path.exists('hapmap_archive'):
print 'downloading and unpacking hapmap data ...'
# get index
query = 'http://hapmap.ncbi.nlm.nih.gov/downloads/frequencies/latest_phaseIII_ncbi_b36/fwd_strand/non-redundant/'
response = urllib.urlopen(query)
html = response.read()
soup = BeautifulSoup(html)
# create local archive
prefix = 'http://hapmap.ncbi.nlm.nih.gov/downloads/frequencies/latest_phaseIII_ncbi_b36/fwd_strand/non-redundant/'
os.mkdir('hapmap_archive')
filenames = set(a['href'] for a in soup.find_all('a') if re.search(r'\.txt\.gz', a['href']))
for idx, filename in enumerate(filenames):
print 'processing file {0} out of {1} ...'.format(idx + 1, len(filenames))
urllib.urlretrieve(prefix + filename, 'hapmap_archive/' + filename)
os.system('gunzip hapmap_archive/' + filename)
def create_hapmap_database():
if not os.path.isfile('hapmap_tmp.db'):
print 'creating intermediate hapmap database ...'
conn = sqlite3.connect('hapmap_tmp.db')
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS genotype')
c.execute('''CREATE TABLE genotype
(rs text, pop text, ref_allele_homo text, ref_allele_homo_freq real, ref_allele_hetero text, ref_allele_hetero_freq real, other_allele_homo text, other_allele_homo_freq real)''')
c.execute('CREATE INDEX idx_genotype_rs ON genotype (rs)')
# c.execute('DROP TABLE IF EXISTS allele')
# c.execute('''CREATE TABLE allele
# (rs text, pop text, ref_allele text, ref_allele_freq real, other_allele text, other_allele_freq real)''')
# c.execute('CREATE INDEX idx_allele_rs ON allele (rs)')
hapmap_files = os.listdir('hapmap_archive')
for idx, f in enumerate(hapmap_files):
print 'processing file {0} out of {1} ...'.format(idx + 1, len(hapmap_files))
reader = csv.reader(open('hapmap_archive/' + f), delimiter=' ', quotechar='#')
next(reader, None) # skip header
for row in reader:
pop = re.search(r'(ASW)|(CEU)|(CHB)|(CHD)|(GIH)|(JPT)|(LWK)|(MEX)|(MKK)|(TSI)|(YRI)', f).group(0)
if re.search(r'genotype', f):
c.execute('INSERT INTO genotype VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (row[0].lower(), pop, row[10], row[11], row[13], row[14], row[16], row[17]))
else:
pass
# c.execute('INSERT INTO allele VALUES (?, ?, ?, ?, ?, ?)', (row[0].lower(), pop, row[10], row[11], row[13], row[14]))
conn.commit()
conn.close()
def add_final_hapmap_data(accepted_rsnumbers):
print 'generating hapmap charts ...'
conn = sqlite3.connect('hapmap.db')
hapmap_conn = sqlite3.connect('hapmap_tmp.db')
c = conn.cursor()
hapmap_c = hapmap_conn.cursor()
c.execute('DROP TABLE IF EXISTS main')
c.execute('''CREATE TABLE main (rs text PRIMARY KEY, genotype1 text, genotype2 text, genotype3 text,
pop1 text, pop1_freq1 real, pop1_freq2 real, pop1_freq3 real,
pop2 text, pop2_freq1 real, pop2_freq2 real, pop2_freq3 real,
pop3 text, pop3_freq1 real, pop3_freq2 real, pop3_freq3 real,
pop4 text, pop4_freq1 real, pop4_freq2 real, pop4_freq3 real,
pop5 text, pop5_freq1 real, pop5_freq2 real, pop5_freq3 real,
pop6 text, pop6_freq1 real, pop6_freq2 real, pop6_freq3 real,
pop7 text, pop7_freq1 real, pop7_freq2 real, pop7_freq3 real,
pop8 text, pop8_freq1 real, pop8_freq2 real, pop8_freq3 real,
pop9 text, pop9_freq1 real, pop9_freq2 real, pop9_freq3 real,
pop10 text, pop10_freq1 real, pop10_freq2 real, pop10_freq3 real,
pop11 text, pop11_freq1 real, pop11_freq2 real, pop11_freq3 real)''')
populations = ['ASW','CEU','CHB','CHD','GIH','JPT','LWK','MEX','MKK','TSI','YRI']
snps = set(row[0] for row in hapmap_c.execute('SELECT DISTINCT rs FROM genotype'))
# filter out rsnumbers we don't want
if accepted_rsnumbers:
snps = snps & accepted_rsnumbers
for idx, rs in enumerate(snps):
if (idx + 1) % 100 == 0:
print '{0} out of {1} snps processed ...'.format(idx + 1, len(snps))
result = list(hapmap_c.execute('SELECT * FROM genotype WHERE rs = ?', (rs,)))
# fill in missing data
data = []
for pop in populations:
if pop in [row[1] for row in result]:
data.append([row for row in result if row[1] == pop][0])
else:
data.append([rs, pop, "", 0, "", 0, "", 0])
parameters = [rs]
# get the three genotypes in the right format
row = result[0]
parameters.extend(''.join(row[i].split('/')) for i in [2,4,6])
# get the frequencies for each population as percentages
for row in data:
parameters.append(row[1])
for i in [3, 5, 7]:
parameters.append(row[i]*100)
c.execute('INSERT INTO main VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', parameters)
conn.commit()
conn.close()
hapmap_conn.close()
def download_and_add_hapmap_data(accepted_rsnumbers):
download_hapmap_data()
create_hapmap_database()
add_final_hapmap_data(accepted_rsnumbers)
# def get_omim_text(rsnumbers):
# for rs in rsnumbers:
# query = 'http://www.ncbi.nlm.nih.gov/omim/?term='+rs+'&report=uilist&format=text'
# response = urllib2.urlopen(query)
# html = response.read()
# soup = BeautifulSoup(html)
# entry = soup.pre.text.split('\n')[0]
# if entry:
# url = 'http://api.omim.org/api/entry?apiKey=45CB5D7EF90D6522646B46F5095277D7B225F453&include=text&format=html&mimNumber=' + entry
# print url
# # urllib.urlretrieve(url, 'snp_db/' + rs + '/omim_entry.html')
# urllib.urlretrieve(url, 'test.html')
def download_and_add_dbsnp_data(accepted_rsnumbers):
print 'downloading, parsing and adding dbsnp data ...'
conn = sqlite3.connect('dbsnp.db')
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS main')
c.execute('''CREATE TABLE main
(rs text PRIMARY KEY, gene_id text, symbol text)''')
# get index
query = 'http://ftp.ncbi.nlm.nih.gov/snp/organisms/human_9606_b141_GRCh38/XML/'
response = urllib.urlopen(query)
html = response.read()
soup = BeautifulSoup(html)
# download and parse files
prefix = 'ftp://ftp.ncbi.nlm.nih.gov/snp/organisms/human_9606_b141_GRCh38/XML/'
filenames = set(a['href'] for a in soup.find_all('a') if re.search(r'\.xml\.gz', a['href']))
namespace = '{http://www.ncbi.nlm.nih.gov/SNP/docsum}'
for idx, filename in enumerate(filenames):
print 'processing file {0} out of {1} ...'.format(idx + 1, len(filenames))
urllib.urlretrieve(prefix + filename, 'dbsnp_tmp.xml.gz')
os.system('gunzip dbsnp_tmp.xml.gz')
context = etree.iterparse('dbsnp_tmp.xml', events=['start', 'end'])
context = iter(context)
_, root = context.next()
inside_rs_element = False
children = set()
for event, element in context:
if event == 'start' and element.tag == namespace+'Rs':
# must remember that we are inside an rs element so prematurely clear child nodes
inside_rs_element = True
if event == 'end':
if element.tag == namespace+'Rs':
rs = 'rs' + element.get('rsId')
if (not accepted_rsnumbers) or (rs in accepted_rsnumbers):
assembly = element.find(namespace+'Assembly')
if assembly is not None:
component = assembly.find(namespace+'Component')
if component is not None:
maploc = component.find(namespace+'MapLoc')
if maploc is not None:
fxnset = maploc.find(namespace+'FxnSet')
if fxnset is not None:
gene_id = fxnset.get('geneId')
symbol = fxnset.get('symbol')
try:
c.execute('INSERT INTO main VALUES (?, ?, ?)', (rs, gene_id, symbol))
except sqlite3.IntegrityError:
pass
# leaving the rs element, so the children can now safely be cleared
inside_rs_element = False
for child in children:
child.clear()
children.clear()
if inside_rs_element:
# can't clear it yet, but must remember to clear later
children.add(element)
else:
element.clear()
if element.getparent() is root:
root.remove(element)
conn.commit()
os.remove('dbsnp_tmp.xml')
conn.close()
def generate_complete_database(accepted_rsnumbers=set()):
print 'generating databases in ' + os.getcwd() + ' ...'
# download, process and add the data from snpedia
download_and_add_snpedia_data(accepted_rsnumbers)
# download, process and add the data from hapmap
download_and_add_hapmap_data(accepted_rsnumbers)
# download, process and add the data from dbsnp
download_and_add_dbsnp_data(accepted_rsnumbers)
if __name__ == "__main__":
if len(sys.argv) > | |
<filename>loader/hthh_nxo64.py
# hthh_nxo64.py: IDA loader and library for reading nso/nro/kip files
# Copyright 2017 Reswitched Team
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or
# without fee is hereby granted, provided that the above copyright notice and this permission
# notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
# OR PERFORMANCE OF THIS SOFTWARE.
# demangler Copyright (C) 2018 <EMAIL>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import gzip, math, os, re, struct, sys
from io import BytesIO
from cStringIO import StringIO
import lz4.block
uncompress = lz4.block.decompress
def get_file_size(f):
filesize = 0
try:
filesize = f.size()
except:
ptell = f.tell()
f.seek(0, 2)
filesize = f.tell()
f.seek(ptell)
return filesize
class BinFile(object):
def __init__(self, li):
self._f = li
def read(self, arg):
if isinstance(arg, str):
fmt = '<' + arg
size = struct.calcsize(fmt)
raw = self._f.read(size)
out = struct.unpack(fmt, raw)
if len(out) == 1:
return out[0]
return out
elif arg is None:
return self.read_to_end()
else:
out = self._f.read(arg)
#if isinstance(arg, (int,long)) and len(out) != arg:
# print 'warning: read of %d bytes got %d bytes' % (arg, len(out))
return out
def read_to_end(self):
return self.read(self.size()-self.tell())
def size(self):
return get_file_size(self._f)
def read_from(self, arg, offset):
old = self.tell()
try:
self.seek(offset)
out = self.read(arg)
finally:
self.seek(old)
return out
def seek(self, off):
self._f.seek(off)
def skip(self, dist):
self.seek(self.tell()+dist)
def close(self):
self._f.close()
def tell(self):
return self._f.tell()
(DT_NULL, DT_NEEDED, DT_PLTRELSZ, DT_PLTGOT, DT_HASH, DT_STRTAB, DT_SYMTAB, DT_RELA, DT_RELASZ,
DT_RELAENT, DT_STRSZ, DT_SYMENT, DT_INIT, DT_FINI, DT_SONAME, DT_RPATH, DT_SYMBOLIC, DT_REL,
DT_RELSZ, DT_RELENT, DT_PLTREL, DT_DEBUG, DT_TEXTREL, DT_JMPREL, DT_BIND_NOW, DT_INIT_ARRAY,
DT_FINI_ARRAY, DT_INIT_ARRAYSZ, DT_FINI_ARRAYSZ, DT_RUNPATH, DT_FLAGS) = xrange(31)
DT_GNU_HASH = 0x6ffffef5
DT_VERSYM = 0x6ffffff0
DT_RELACOUNT = 0x6ffffff9
DT_RELCOUNT = 0x6ffffffa
DT_FLAGS_1 = 0x6ffffffb
DT_VERDEF = 0x6ffffffc
DT_VERDEFNUM = 0x6ffffffd
STT_NOTYPE = 0
STT_OBJECT = 1
STT_FUNC = 2
STT_SECTION = 3
STB_LOCAL = 0
STB_GLOBAL = 1
STB_WEAK = 2
R_ARM_ABS32 = 2
R_ARM_TLS_DESC = 13
R_ARM_GLOB_DAT = 21
R_ARM_JUMP_SLOT = 22
R_ARM_RELATIVE = 23
R_AARCH64_ABS64 = 257
R_AARCH64_GLOB_DAT = 1025
R_AARCH64_JUMP_SLOT = 1026
R_AARCH64_RELATIVE = 1027
R_AARCH64_TLSDESC = 1031
MULTIPLE_DTS = set([DT_NEEDED])
class Range(object):
def __init__(self, start, size):
self.start = start
self.size = size
self.end = start+size
self._inclend = start+size-1
def overlaps(self, other):
return self.start <= other._inclend and other.start <= self._inclend
def includes(self, other):
return other.start >= self.start and other._inclend <= self._inclend
def __repr__(self):
return 'Range(0x%X -> 0x%X)' % (self.start, self.end)
class Segment(object):
def __init__(self, r, name, kind):
self.range = r
self.name = name
self.kind = kind
self.sections = []
def add_section(self, s):
for i in self.sections:
assert not i.range.overlaps(s.range), '%r overlaps %r' % (s, i)
self.sections.append(s)
class Section(object):
def __init__(self, r, name):
self.range = r
self.name = name
def __repr__(self):
return 'Section(%r, %r)' % (self.range, self.name)
def suffixed_name(name, suffix):
if suffix == 0:
return name
return '%s.%d' % (name, suffix)
class SegmentBuilder(object):
def __init__(self):
self.segments = []
self._sections = []
def add_segment(self, start, size, name, kind):
r = Range(start, size)
for i in self.segments:
assert not r.overlaps(i.range)
self.segments.append(Segment(r, name, kind))
def add_section(self, name, start, end=None, size=None):
assert end is None or size is None
if size is None:
size = end-start
if size <= 0:
return
assert size > 0
r = Range(start, size)
self._sections.append((r, name))
def _add_sections_to_segments(self):
for r, name in self._sections:
for i in self.segments:
if i.range.includes(r):
i.add_section(Section(r, name))
break
else:
assert False, 'no containing segment for %r' % (name,)
def flatten(self):
self._add_sections_to_segments()
self.segments.sort(key=lambda s: s.range.start)
parts = []
for segment in self.segments:
suffix = 0
segment.sections.sort(key=lambda s: s.range.start)
pos = segment.range.start
for section in segment.sections:
if pos < section.range.start:
parts.append((pos, section.range.start, suffixed_name(segment.name, suffix), segment.kind))
suffix += 1
pos = section.range.start
parts.append((section.range.start, section.range.end, section.name, segment.kind))
pos = section.range.end
if pos < segment.range.end:
parts.append((pos, segment.range.end, suffixed_name(segment.name, suffix), segment.kind))
suffix += 1
pos = segment.range.end
return parts
class ElfSym(object):
def __init__(self, name, info, other, shndx, value, size):
self.name = name
self.shndx = shndx
self.value = value
self.size = size
self.vis = other & 3
self.type = info & 0xF
self.bind = info >> 4
def __repr__(self):
return 'Sym(name=%r, shndx=0x%X, value=0x%X, size=0x%X, vis=%r, type=%r, bind=%r)' % (
self.name, self.shndx, self.value, self.size, self.vis, self.type, self.bind)
class NxoFileBase(object):
def __init__(self, f, segment_data=None):
self.binfile = f
# read MOD
self.modoff = f.read_from('I', 4)
f.seek(self.modoff)
if f.read('4s') != 'MOD0':
raise NxoException('invalid MOD0 magic')
self.dynamicoff = self.modoff + f.read('i')
self.bssoff = self.modoff + f.read('i')
self.bssend = self.modoff + f.read('i')
self.unwindoff = self.modoff + f.read('i')
self.unwindend = self.modoff + f.read('i')
self.moduleoff = self.modoff + f.read('i')
builder = SegmentBuilder()
# read dynamic
self.armv7 = (f.read_from('Q', self.dynamicoff) > 0xFFFFFFFF or f.read_from('Q', self.dynamicoff+0x10) > 0xFFFFFFFF)
self.offsize = 4 if self.armv7 else 8
f.seek(self.dynamicoff)
self.dynamic = dynamic = {}
for i in MULTIPLE_DTS:
dynamic[i] = []
for i in xrange((f.size() - self.dynamicoff) / 0x10):
tag, val = f.read('II' if self.armv7 else 'QQ')
if tag == DT_NULL:
break
if tag in MULTIPLE_DTS:
dynamic[tag].append(val)
else:
dynamic[tag] = val
builder.add_section('.dynamic', self.dynamicoff, end=f.tell())
builder.add_section('.eh_frame_hdr', self.unwindoff, end=self.unwindend)
# read .dynstr
if DT_STRTAB in dynamic and DT_STRSZ in dynamic:
f.seek(dynamic[DT_STRTAB])
self.dynstr = f.read(dynamic[DT_STRSZ])
else:
self.dynstr = '\0'
print 'warning: no dynstr'
for startkey, szkey, name in [
(DT_STRTAB, DT_STRSZ, '.dynstr'),
(DT_INIT_ARRAY, DT_INIT_ARRAYSZ, '.init_array'),
(DT_FINI_ARRAY, DT_FINI_ARRAYSZ, '.fini_array'),
(DT_RELA, DT_RELASZ, '.rela.dyn'),
(DT_REL, DT_RELSZ, '.rel.dyn'),
(DT_JMPREL, DT_PLTRELSZ, ('.rel.plt' if self.armv7 else '.rela.plt')),
]:
if startkey in dynamic and szkey in dynamic:
builder.add_section(name, dynamic[startkey], size=dynamic[szkey])
# TODO
#build_id = content.find('\x04\x00\x00\x00\x14\x00\x00\x00\x03\x00\x00\x00GNU\x00')
#if build_id >= 0:
# builder.add_section('.note.gnu.build-id', build_id, size=0x24)
#else:
# build_id = content.index('\x04\x00\x00\x00\x10\x00\x00\x00\x03\x00\x00\x00GNU\x00')
# if build_id >= 0:
# builder.add_section('.note.gnu.build-id', build_id, size=0x20)
if DT_HASH in dynamic:
hash_start = dynamic[DT_HASH]
f.seek(hash_start)
nbucket, nchain = f.read('II')
f.skip(nbucket * 4)
f.skip(nchain * 4)
hash_end = f.tell()
builder.add_section('.hash', hash_start, end=hash_end)
if DT_GNU_HASH in dynamic:
gnuhash_start = dynamic[DT_GNU_HASH]
f.seek(gnuhash_start)
nbuckets, symoffset, bloom_size, bloom_shift = f.read('IIII')
f.skip(bloom_size * self.offsize)
buckets = [f.read('I') for i in range(nbuckets)]
max_symix = max(buckets) if buckets else 0
if max_symix >= symoffset:
f.skip((max_symix - symoffset) * 4)
while (f.read('I') & 1) == 0:
pass
gnuhash_end = f.tell()
builder.add_section('.gnu.hash', gnuhash_start, end=gnuhash_end)
self.needed = [self.get_dynstr(i) for i in self.dynamic[DT_NEEDED]]
# load .dynsym
self.symbols = symbols = []
f.seek(dynamic[DT_SYMTAB])
while True:
if dynamic[DT_SYMTAB] < dynamic[DT_STRTAB] and f.tell() >= dynamic[DT_STRTAB]:
break
if self.armv7:
st_name, st_value, st_size, st_info, st_other, st_shndx = f.read('IIIBBH')
else:
st_name, st_info, st_other, st_shndx, st_value, st_size = f.read('IBBHQQ')
if st_name > len(self.dynstr):
break
symbols.append(ElfSym(self.get_dynstr(st_name), st_info, st_other, st_shndx, st_value, st_size))
builder.add_section('.dynsym', dynamic[DT_SYMTAB], end=f.tell())
self.plt_entries = []
self.relocations = []
locations = set()
if DT_REL in dynamic:
locations |= self.process_relocations(f, symbols, dynamic[DT_REL], dynamic[DT_RELSZ])
if DT_RELA in dynamic:
locations |= self.process_relocations(f, symbols, dynamic[DT_RELA], dynamic[DT_RELASZ])
if segment_data is None:
# infer segment info
rloc_guess = (dynamic[DT_REL if DT_REL in dynamic else DT_RELA] & ~0xFFF)
dloc_guess = (min(i for i in locations if i != 0) & ~0xFFF)
dloc_guess2 = None
modoff = f.read_from('I', 4)
if self.modoff != 8:
search_start = (self.modoff + 0xFFF) & ~0xFFF
for i in range(search_start, f.size(), 0x1000):
count = 0
for j in range(4, 0x1000, 4):
if f.read_from('I', i - j) != 0:
break
count += 1
if count > 6:
dloc_guess2 = i
break
if dloc_guess2 is not None and dloc_guess2 < dloc_guess:
dloc_guess = dloc_guess2
if segment_data:
tloc, tsize, rloc, rsize, dloc, dsize = segment_data
assert rloc_guess == rloc
assert dloc_guess == dloc
self.textoff = 0
| |
ends_visible(self, size, focus=False):
"""Return a list that may contain 'top' and/or 'bottom'.
convenience function for checking whether the top and bottom
of the list are visible
"""
(maxcol, maxrow) = size
l = []
middle,top,bottom = self.calculate_visible( (maxcol,maxrow),
focus=focus )
if middle is None: # empty listbox
return ['top','bottom']
trim_top, above = top
trim_bottom, below = bottom
if trim_bottom == 0:
row_offset, w, pos, rows, c = middle
row_offset += rows
for w, pos, rows in below:
row_offset += rows
if row_offset < maxrow:
l.append( 'bottom' )
elif self.body.get_next(pos) == (None,None):
l.append( 'bottom' )
if trim_top == 0:
row_offset, w, pos, rows, c = middle
for w, pos, rows in above:
row_offset -= rows
if self.body.get_prev(pos) == (None,None):
l.append( 'top' )
return l
class HListBoxError(Exception):
pass
class HListBox(FlowWidget):
def __init__(self, body):
"""
body -- a ListWalker-like object that contains
widgets to be displayed inside the list box
"""
if hasattr(body,'get_focus'):
self.body = body
else:
self.body = PollingListWalker(body)
try:
connect_signal(self.body, "modified", self._invalidate)
except NameError:
# our list walker has no modified signal so we must not
# cache our canvases because we don't know when our
# content has changed
self.render = nocache_widget_render_instance(self)
# offset_cols is the number of cols between the left side of
# the view and the left side of the focused item
self.offset_cols = 0
# inset_fraction is used when the focused widget is off the
# left side of the view. it is the fraction of the widget
# cut off at the left side. (numerator, denominator)
self.inset_fraction = (0,1)
# pref_col is the preferred column for the cursor when moving
# between widgets that use the cursor (edit boxes etc.)
self.pref_col = 'left'
# variable for delayed focus change used by set_focus
self.set_focus_pending = 'first selectable'
# variable for delayed align change used by set_focus_align
self.set_focus_align_pending = None
def calculate_visible(self, size, focus=False ):
""" Return (middle,left,right) or None,None,None.
middle -- (column offset(when +ve) or inset(when -ve),
focus widget, focus position, focus cols,
cursor coords or None)
left -- (# columns to trim off left side,
list of (widget, position, cols) tuples left
of the focus in order from right to left)
right -- (# columns to trim off right side,
list of (widget, position, cols) tuples right
of the focus in order from left to right)
"""
maxcol = size[0]
# 0. set the focus if a change is pending
if self.set_focus_pending or self.set_focus_align_pending:
self._set_focus_complete(size, focus )
# 1. start with the focus widget
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None: #list box is empty?
return None,None,None
left_pos = right_pos = focus_pos
focus_size = focus_widget.pack(None, focus)
offset_cols, inset_cols = self.get_focus_offset_inset(size)
# force at least one column of focus to be visible
if offset_cols >= maxcol:
offset_cols = maxcol -1
# adjust position so cursor remains visible
cursor = None
if focus_widget.selectable() and focus:
if hasattr(focus_widget,'get_cursor_coords'):
cursor=focus_widget.get_cursor_coords((maxcol,))
if cursor is not None:
cx, cy = cursor
effective_cx = cx + offset_cols - inset_cols
if effective_cx < 0: # cursor past left?
inset_cols = cx
elif effective_cx >= maxcol: # cursor past right?
offset_cols = maxcol - cx -1
# set trim_top by focus trimmimg
trim_left = inset_cols
focus_cols = focus_size[0]
# 2. collect the widgets left of the focus
pos = focus_pos
fill_cols = offset_cols
fill_left = []
left_pos = pos
while fill_cols > 0:
prev, pos = self.body.get_prev( pos )
if prev is None: # run out of widgets to left?
offset_cols -= fill_cols
break
top_pos = pos
p_cols = prev.pack(None, False)[0]
fill_left.append( (prev, pos, p_cols) )
if p_cols > fill_cols: # crosses left edge?
trim_left = p_cols-fill_cols
break
fill_cols -= p_cols
trim_right = focus_cols + offset_cols - inset_cols - maxcol
if trim_right < 0: trim_right = 0
# 3. collect the widgets right of the focus
pos = focus_pos
fill_cols = maxcol - focus_cols - offset_cols + inset_cols
fill_right = []
while fill_cols > 0:
next, pos = self.body.get_next( pos )
if next is None: # run out of widgets below?
break
right_pos = pos
n_cols = next.pack(None, False)[0]
fill_right.append( (next, pos, n_cols) )
if n_cols > fill_cols: # crosses bottom edge?
trim_right = n_cols-fill_cols
fill_cols -= n_cols
break
fill_cols -= n_cols
# 4. fill from left again if necessary & possible
fill_cols = max(0, fill_cols)
if fill_cols >0 and trim_left >0:
if fill_cols <= trim_left:
trim_left -= fill_cols
offset_cols += fill_cols
fill_cols = 0
else:
fill_cols -= trim_left
offset_cols += trim_left
trim_left = 0
pos = left_pos
while fill_cols > 0:
prev, pos = self.body.get_prev( pos )
if prev is None:
break
p_cols = prev.pack(None, False)[0]
fill_left.append( (prev, pos, p_cols) )
if p_cols > fill_cols: # more than required
trim_left = p_cols-fill_cols
offset_cols += fill_cols
break
fill_cols -= p_cols
offset_cols += p_cols
# 5. return the interesting bits
return ((offset_cols - inset_cols, focus_widget,
focus_pos, focus_cols, cursor ),
(trim_left, fill_left), (trim_right, fill_right))
def render(self, size, focus=False ):
"""
Render listbox and return canvas.
"""
(maxcol,) = size
middle, left, right = self.calculate_visible( (maxcol,),
focus=focus)
if middle is None:
return SolidCanvas(" ", maxcol, 1)
_ignore, focus_widget, focus_pos, focus_cols, cursor = middle
trim_left, fill_left = left
trim_right, fill_right = right
joinlist = []
cols = 0
fill_left.reverse() # fill_above is in rtol order
for widget,w_pos,w_cols in fill_left:
canvas = widget.render((w_cols,))
cols += w_cols
joinlist.append((canvas, w_pos, False, w_cols))
focus_canvas = focus_widget.render((focus_cols,), focus=focus)
c_cursor = focus_canvas.cursor
if cursor != c_cursor:
raise HListBoxError, "Focus Widget %s at position %s within listbox calculated cursor coords %s but rendered cursor coords %s!" %(`focus_widget`,`focus_pos`,`cursor`,`c_cursor`)
cols += focus_cols
joinlist.append((focus_canvas, focus_pos, True, focus_cols))
for widget,w_pos,w_cols in fill_below:
canvas = widget.render((w_cols,))
cols += w_cols
joinlist.append((canvas, w_pos, False, w_cols))
final_canvas = CanvasJoin(joinlist)
if trim_left or trim_right:
final_canvas.pad_trim_left_right(
-trim_left, -trim_right)
cols -= trim_left + trim_right
assert cols <= maxcol, "HListbox contents too long! Probably urwid's fault (please report): %s" % `left,middle,right`
if cols < maxcol:
right_pos = focus_pos
if fill_right: right_pos = fill_right[-1][1]
assert trim_right==0 and self.body.get_next(right_pos) == (None,None), "HListbox contents too short! Probably urwid's fault (please report): %s" % `top,middle,bottom`
final_canvas.pad_trim_left_right(0, maxcol - cols)
return final_canvas
def set_focus_align(self, align):
"""Set the focus widget's display offset and inset.
align -- one of:
'left', 'center', 'right'
('fixed left', columns)
('fixed right', columns)
('relative', percentage 0=left 100=right)
"""
vt,va,ht,ha=decompose_valign_height(valign,None,ListBoxError)
self.set_focus_align_pending = at,aa
def set_focus(self, position, coming_from=None):
"""
Set the focus position and try to keep the old focus in view.
position -- a position compatible with self.body.set_focus
coming_from -- set to 'left' or 'right' if you know that
old position is left or right of the new
position.
"""
assert coming_from in ('left', 'right', None)
focus_widget, focus_pos = self.body.get_focus()
self.set_focus_pending = coming_from, focus_widget, focus_pos
self.body.set_focus(position)
def get_focus(self):
"""
Return a (focus widget, focus position) tuple.
"""
return self.body.get_focus()
def _set_focus_align_complete(self, size, focus):
"""
Finish setting the offset and inset now that we have have a
maxcol & maxrow.
"""
at,aa = self.set_focus_align_pending
self.set_focus_align_pending = None
self.set_focus_pending = None
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
cols = focus_widget.pack(None, focus)
rleft, rright = calculate_padding( at, aa, 'fixed', cols,
None, maxcol )
self.shift_focus(size, rtop)
def _set_focus_first_selectable(self, size, focus):
"""
Choose the first visible, selectable widget below the
current focus as the focus widget.
"""
(maxcol,) = size
self.set_focus_align_pending = None
self.set_focus_pending = None
middle, left, right = self.calculate_visible(
(maxcol,), focus=focus)
if middle is None:
return
col_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_left, fill_left = top
trim_right, fill_right = bottom
if focus_widget.selectable():
return
if trim_right:
fill_right = fill_right[:-1]
new_col_offset = col_offset + focus_col
for widget, pos, cols in fill_right:
if widget.selectable():
self.body.set_focus(pos)
self.shift_focus((maxcol,), new_col_offset)
return
new_col_offset += cols
def _set_focus_complete(self, size, focus):
"""
Finish setting the position now that we have maxcol.
"""
(maxcol,) = size
self._invalidate()
if self.set_focus_pending == "first selectable":
return self._set_focus_first_selectable(
(maxcol,), focus)
if self.set_focus_align_pending is not None:
return self._set_focus_align_complete(
(maxcol,), focus)
coming_from, focus_widget, focus_pos = self.set_focus_pending
self.set_focus_pending = None
# new position
new_focus_widget, position = self.body.get_focus()
if focus_pos == position:
# do nothing
return
# restore old focus temporarily
self.body.set_focus(focus_pos)
middle,left,right=self.calculate_visible((maxcol,),focus)
focus_offset, focus_widget, focus_pos, focus_cols, cursor=middle
trim_left, fill_left = top
trim_right, fill_right = bottom
offset = focus_offset
for widget, pos, cols in fill_left:
offset -= cols
if pos == position:
self.change_focus((maxcol,), pos,
offset, 'right' )
return
offset = focus_offset + focus_cols
for widget, pos, cols in fill_left:
if pos == position:
self.change_focus((maxcol,), pos,
offset, 'left' )
return
offset += cols
# failed to find widget among visible widgets
self.body.set_focus( position )
widget, position = self.body.get_focus()
cols = widget.pack(None, focus)[0]
if coming_from=='below':
offset = 0
elif coming_from=='above':
offset = maxcol-cols
else:
offset = (maxcol-cols)/2
self.shift_focus((maxcol,), offset)
def shift_focus(self, size, offset_inset ):
"""Move the location of the current focus relative to the top.
offset_inset -- either the number of columns between the
left of the HListBox and the start of the focus widget (+ve
value) or the number of columns of the focus widget hidden
off the left edge of the HListBox (-ve value) or 0 if the
left edge of the focus widget is aligned with the top edge
of the HListBox
"""
(maxcol,) = size
if offset_inset >= 0:
if offset_inset >= maxcol:
raise HListBoxError, "Invalid offset_inset: %s, only %s cols in HListBox"% (`offset_inset`, `maxcol`)
self.offset_cols = offset_inset
self.inset_fraction = (0,1)
else:
target, _ignore = self.body.get_focus()
tgt_cols = target.pack(None, True)[0]
if offset_inset + tgt_cols <= 0:
raise HListBoxError, "Invalid offset_inset: %s, only %s rows in target!" %(`offset_inset`, `tgt_rows`)
self.offset_cols = 0
self.inset_fraction = (-offset_inset,tgt_cols)
self._invalidate()
def update_pref_col_from_focus(self, size):
"""Update self.pref_col from the focus widget."""
(maxcol,) = size
widget, old_pos = self.body.get_focus()
if widget is None: return
pref_col = None
wcol = widget.pack(None, True)[0]
if hasattr(widget,'get_pref_col'):
pref_col = widget.get_pref_col((wcol,))
if pref_col is None and hasattr(widget,'get_cursor_coords'):
coords = widget.get_cursor_coords((wcol,))
if type(coords) == type(()):
pref_col,y = coords
if pref_col is not None:
if type(pref_col) == type(0):
offset, inset = self.get_focus_offset_inset()
pref_col += offset - inset
self.pref_col = pref_col
def change_focus(self, size, position,
offset_inset = 0, | |
import os
import sys
import random
import warnings
import math
import numpy as np
import pylab
import scipy.ndimage as ndi
from concurrent.futures import ThreadPoolExecutor
import PIL
from PIL import Image, ImageDraw
from tqdm import tqdm
def autoinvert(image):
assert np.amin(image) >= 0
assert np.amax(image) <= 1
if np.sum(image > 0.9) > np.sum(image < 0.1):
return 1 - image
else:
return image
def zerooneimshow(img):
img = (img * 255).astype(np.uint8)
Image.fromarray(img).show()
return
#
# random geometric transformations
#
def random_transform(translation=(-0.05, 0.05), rotation=(-2, 2), scale=(-0.1, 0.1), aniso=(-0.1, 0.1)):
dx = random.uniform(*translation)
dy = random.uniform(*translation)
angle = random.uniform(*rotation)
angle = angle * np.pi / 180.0
scale = 10 ** random.uniform(*scale)
aniso = 10 ** random.uniform(*aniso)
return dict(angle=angle, scale=scale, aniso=aniso, translation=(dx, dy))
def transform_image(image, angle=0.0, scale=1.0, aniso=1.0, translation=(0, 0), order=1):
dx, dy = translation
scale = 1.0 / scale
c = np.cos(angle)
s = np.sin(angle)
sm = np.array([[scale / aniso, 0], [0, scale * aniso]], 'f')
m = np.array([[c, -s], [s, c]], 'f')
m = np.dot(sm, m)
w, h = image.shape
c = np.array([w, h]) / 2.0
d = c - np.dot(m, c) + np.array([dx * w, dy * h])
return ndi.affine_transform(image, m, offset=d, order=order, mode="nearest", output=np.dtype("f"))
#
# random distortions
#
def bounded_gaussian_noise(shape, sigma, maxdelta):
n, m = shape
deltas = pylab.rand(2, n, m)
deltas = ndi.gaussian_filter(deltas, (0, sigma, sigma))
deltas -= np.amin(deltas)
deltas /= np.amax(deltas)
deltas = (2 * deltas - 1) * maxdelta
return deltas
def distort_with_noise(image, deltas, order=1):
assert deltas.shape[0] == 2
assert image.shape == deltas.shape[1:], (image.shape, deltas.shape)
n, m = image.shape
xy = np.transpose(np.array(np.meshgrid(
range(n), range(m))), axes=[0, 2, 1])
deltas += xy
return ndi.map_coordinates(image, deltas, order=order, mode="reflect")
def noise_distort1d(shape, sigma=100.0, magnitude=100.0):
h, w = shape
noise = ndi.gaussian_filter(pylab.randn(w), sigma)
noise *= magnitude / np.amax(abs(noise))
dys = np.array([noise] * h)
deltas = np.array([dys, np.zeros((h, w))])
return deltas
#
# mass preserving blur
#
def percent_black(image):
n = np.prod(image.shape)
k = np.sum(image < 0.5)
return k * 100.0 / n
def binary_blur(image, sigma, noise=0.0):
p = percent_black(image)
blurred = ndi.gaussian_filter(image, sigma)
if noise > 0:
blurred += pylab.randn(*blurred.shape) * noise
t = np.percentile(blurred, p)
return np.array(blurred > t, 'f')
#
# multiscale noise
#
def make_noise_at_scale(shape, scale):
h, w = shape
h0, w0 = int(h / scale + 1), int(w / scale + 1)
data = pylab.rand(h0, w0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = ndi.zoom(data, scale)
return result[:h, :w]
def make_multiscale_noise(shape, scales, weights=None, limits=(0.0, 1.0)):
if weights is None:
weights = [1.0] * len(scales)
result = make_noise_at_scale(shape, scales[0]) * weights[0]
for s, w in zip(scales, weights):
result += make_noise_at_scale(shape, s) * w
lo, hi = limits
result -= np.amin(result)
result /= np.amax(result)
result *= (hi - lo)
result += lo
return result
def make_multiscale_noise_uniform(shape, srange=(1.0, 100.0), nscales=4, limits=(0.0, 1.0)):
lo, hi = np.log10(srange[0]), np.log10(srange[1])
scales = np.random.uniform(size=nscales)
scales = np.add.accumulate(scales)
scales -= np.amin(scales)
scales /= np.amax(scales)
scales *= hi - lo
scales += lo
scales = 10 ** scales
weights = 2.0 * np.random.uniform(size=nscales)
return make_multiscale_noise(shape, scales, weights=weights, limits=limits)
#
# random blobs
#
def random_blobs(shape, blobdensity, size, roughness=2.0):
from random import randint
from builtins import range # python2 compatible
h, w = shape
numblobs = int(blobdensity * w * h)
mask = np.zeros((h, w), 'i')
for i in range(numblobs):
mask[randint(0, h - 1), randint(0, w - 1)] = 1
dt = ndi.distance_transform_edt(1 - mask)
mask = np.array(dt < size, 'f')
mask = ndi.gaussian_filter(mask, size / (2 * roughness))
mask -= np.amin(mask)
mask /= np.amax(mask)
noise = pylab.rand(h, w)
noise = ndi.gaussian_filter(noise, size / (2 * roughness))
noise -= np.amin(noise)
noise /= np.amax(noise)
return np.array(mask * noise > 0.5, 'f')
def random_blotches(image, fgblobs, bgblobs, fgscale=10, bgscale=10):
fg = random_blobs(image.shape, fgblobs, fgscale)
bg = random_blobs(image.shape, bgblobs, bgscale)
return np.minimum(np.maximum(image, fg), 1 - bg)
#
# random fibers
#
def make_fiber(l, a, stepsize=0.5):
angles = np.random.standard_cauchy(l) * a
angles[0] += 2 * np.pi * pylab.rand()
angles = np.add.accumulate(angles)
coss = np.add.accumulate(np.cos(angles) * stepsize)
sins = np.add.accumulate(np.sin(angles) * stepsize)
return np.array([coss, sins]).transpose((1, 0))
def make_fibrous_image(shape, nfibers=300, l=300, a=0.2, stepsize=0.5, limits=(0.1, 1.0), blur=1.0):
h, w = shape
lo, hi = limits
result = np.zeros(shape)
for i in range(nfibers):
v = pylab.rand() * (hi - lo) + lo
fiber = make_fiber(l, a, stepsize=stepsize)
y, x = random.randint(0, h - 1), random.randint(0, w - 1)
fiber[:, 0] += y
fiber[:, 0] = np.clip(fiber[:, 0], 0, h - .1)
fiber[:, 1] += x
fiber[:, 1] = np.clip(fiber[:, 1], 0, w - .1)
for y, x in fiber:
result[int(y), int(x)] = v
result = ndi.gaussian_filter(result, blur)
result -= np.amin(result)
result /= np.amax(result)
result *= (hi - lo)
result += lo
return result
#
# print-like degradation with multiscale noise
#
def printlike_multiscale(image, blur=0.5, blotches=5e-5, paper_range=(0.8, 1.0), ink_range=(0.0, 0.2)):
selector = autoinvert(image)
# selector = random_blotches(selector, 3 * blotches, blotches)
selector = random_blotches(selector, 2 * blotches, blotches)
paper = make_multiscale_noise_uniform(image.shape, limits=paper_range)
ink = make_multiscale_noise_uniform(image.shape, limits=ink_range)
blurred = ndi.gaussian_filter(selector, blur)
printed = blurred * ink + (1 - blurred) * paper
return printed
def printlike_fibrous(image, blur=0.5, blotches=5e-5, paper_range=(0.8, 1.0), ink_range=(0.0, 0.2)):
selector = autoinvert(image)
selector = random_blotches(selector, 2 * blotches, blotches)
paper = make_multiscale_noise(image.shape, [1.0, 5.0, 10.0, 50.0], weights=[1.0, 0.3, 0.5, 0.3], limits=paper_range)
paper -= make_fibrous_image(image.shape, 300, 500, 0.01, limits=(0.0, 0.25), blur=0.5)
ink = make_multiscale_noise(image.shape, [1.0, 5.0, 10.0, 50.0], limits=ink_range)
blurred = ndi.gaussian_filter(selector, blur)
printed = blurred * ink + (1 - blurred) * paper
return printed
def add_frame(img):
if isinstance(img, np.ndarray):
img = Image.fromarray(img)
# no_aug : up : down : left : right: left&right = 2:1:1:3:3:1
random_list = ['no_aug', 'no_aug',
'up', 'down',
'left', 'right',
'left', 'right',
'left', 'right',
'left&right']
choice = random.choice(random_list)
if choice == 'no_aug':
return img
w, h = img.size
expand_ratio = random.uniform(1.1, 1.3)
new_w = int(w * expand_ratio)
new_h = int(h * expand_ratio)
new_img = Image.new(img.mode, (new_w, new_h), 255) # 0 - black, 255 - white
draw = ImageDraw.Draw(new_img)
# up
if choice == 'up':
new_img.paste(img, ((new_w - w) // 2, new_h - h))
line_thick = random.randint(3, 10)
line_height = random.randint(line_thick, new_h - h - line_thick)
draw.line((0, line_height, new_w, line_height), fill=0, width=line_thick)
if choice == 'down':
new_img.paste(img, ((new_w - w) // 2, 0))
line_thick = random.randint(3, 10)
line_height = random.randint(h + line_thick, new_h - line_thick)
draw.line((0, line_height, new_w, line_height), fill=0, width=line_thick)
if choice == 'left':
new_img.paste(img, (new_w - w, (new_h - h) // 2))
line_thick = random.randint(3, 10)
line_width = random.randint(line_thick, new_w - w - line_thick)
draw.line((line_width, 0, line_width, new_h), fill=0, width=line_thick)
if choice == 'right':
new_img.paste(img, (0, (new_h - h) // 2))
line_thick = random.randint(3, 10)
line_width = random.randint(w + line_thick, new_w - line_thick)
draw.line((line_width, 0, line_width, new_h), fill=0, width=line_thick)
if choice == 'left&right':
new_img.paste(img, ((new_w - w) // 2, (new_h - h) // 2))
line_thick = random.randint(3, 10)
left_line_width = random.randint(line_thick, (new_w - w) // 2 - line_thick)
draw.line((left_line_width, 0, left_line_width, new_h), fill=0, width=line_thick)
line_thick = random.randint(3, 10)
right_line_width = random.randint((new_w - w) // 2 + w + line_thick, new_w - line_thick)
draw.line((right_line_width, 0, right_line_width, new_h), fill=0, width=line_thick)
new_img.resize((w, h), Image.BICUBIC)
return new_img
def ocrodeg_augment(img):
if not isinstance(img, np.ndarray):
img = np.array(img)
img = img / 255
img = np.clip(img, 0.0, 1.0)
# 50% use distort, 50% use raw
flag = 0
if random.random() < 0.5:
img = distort_with_noise(
img,
deltas=bounded_gaussian_noise(
shape=img.shape,
sigma=random.uniform(12.0, 20.0),
maxdelta=random.uniform(3.0, 5.0)
)
)
flag += 1
# img = img / 255
img = np.clip(img, 0.0, 1.0)
# 50% use binary blur, 50% use raw
if random.random() < 0.0:
img = binary_blur(
img,
sigma=random.uniform(0.5, 0.7),
noise=random.uniform(0.05, 0.1)
)
flag += 1
img = np.clip(img, 0.0, 1.0)
# raw - 50% use multiscale, 50% use fibrous, 0% use raw
# flag=1 - 35% use multiscale, 35% use fibrous, 30% use raw
# flag=2 - 20% use multiscale, 20% use fibrous, 60% use raw
rnd = random.random()
if rnd < 0.5 - flag * 0.15:
img = printlike_multiscale(img, blur=0.5)
elif rnd < 1 - flag * 0.15:
img = printlike_fibrous(img)
img = np.clip(img, 0.0, 1.0)
img = (img * 255).astype(np.uint8)
img = Image.fromarray(img)
return img
def add_noise(img, generate_ratio=0.003, generate_size=0.006):
if not isinstance(img, np.ndarray):
img = np.array(img)
h, w = img.shape
R_max = max(3, int(min(h, w) * generate_size))
threshold = int(h * w * generate_ratio)
random_choice_list = []
| |
t)
item = self.anal_type(t.args[0])
return TypeType.make_normalized(item, line=t.line)
elif fullname == 'typing.ClassVar':
if self.nesting_level > 0:
self.fail('Invalid type: ClassVar nested inside other type', t)
if len(t.args) == 0:
return AnyType(TypeOfAny.from_omitted_generics, line=t.line, column=t.column)
if len(t.args) != 1:
self.fail('ClassVar[...] must have at most one type argument', t)
return AnyType(TypeOfAny.from_error)
return self.anal_type(t.args[0])
elif fullname in ('mypy_extensions.NoReturn', 'typing.NoReturn'):
return UninhabitedType(is_noreturn=True)
elif fullname in ('typing_extensions.Literal', 'typing.Literal'):
return self.analyze_literal_type(t)
elif fullname in ('typing_extensions.Annotated', 'typing.Annotated'):
if len(t.args) < 2:
self.fail("Annotated[...] must have exactly one type argument"
" and at least one annotation", t)
return AnyType(TypeOfAny.from_error)
return self.anal_type(t.args[0])
elif self.anal_type_guard_arg(t, fullname) is not None:
# In most contexts, TypeGuard[...] acts as an alias for bool (ignoring its args)
return self.named_type('builtins.bool')
return None
def get_omitted_any(self, typ: Type, fullname: Optional[str] = None) -> AnyType:
disallow_any = not self.is_typeshed_stub and self.options.disallow_any_generics
return get_omitted_any(disallow_any, self.fail, self.note, typ,
self.options.python_version, fullname)
def analyze_type_with_type_info(
self, info: TypeInfo, args: Sequence[Type], ctx: Context) -> Type:
"""Bind unbound type when were able to find target TypeInfo.
This handles simple cases like 'int', 'modname.UserClass[str]', etc.
"""
if len(args) > 0 and info.fullname == 'builtins.tuple':
fallback = Instance(info, [AnyType(TypeOfAny.special_form)], ctx.line)
return TupleType(self.anal_array(args), fallback, ctx.line)
# Analyze arguments and (usually) construct Instance type. The
# number of type arguments and their values are
# checked only later, since we do not always know the
# valid count at this point. Thus we may construct an
# Instance with an invalid number of type arguments.
instance = Instance(info, self.anal_array(args), ctx.line, ctx.column)
# Check type argument count.
if len(instance.args) != len(info.type_vars) and not self.defining_alias:
fix_instance(instance, self.fail, self.note,
disallow_any=self.options.disallow_any_generics and
not self.is_typeshed_stub,
python_version=self.options.python_version)
tup = info.tuple_type
if tup is not None:
# The class has a Tuple[...] base class so it will be
# represented as a tuple type.
if args:
self.fail('Generic tuple types not supported', ctx)
return AnyType(TypeOfAny.from_error)
return tup.copy_modified(items=self.anal_array(tup.items),
fallback=instance)
td = info.typeddict_type
if td is not None:
# The class has a TypedDict[...] base class so it will be
# represented as a typeddict type.
if args:
self.fail('Generic TypedDict types not supported', ctx)
return AnyType(TypeOfAny.from_error)
# Create a named TypedDictType
return td.copy_modified(item_types=self.anal_array(list(td.items.values())),
fallback=instance)
return instance
def analyze_unbound_type_without_type_info(self, t: UnboundType, sym: SymbolTableNode,
defining_literal: bool) -> Type:
"""Figure out what an unbound type that doesn't refer to a TypeInfo node means.
This is something unusual. We try our best to find out what it is.
"""
name = sym.fullname
if name is None:
assert sym.node is not None
name = sym.node.name
# Option 1:
# Something with an Any type -- make it an alias for Any in a type
# context. This is slightly problematic as it allows using the type 'Any'
# as a base class -- however, this will fail soon at runtime so the problem
# is pretty minor.
if isinstance(sym.node, Var):
typ = get_proper_type(sym.node.type)
if isinstance(typ, AnyType):
return AnyType(TypeOfAny.from_unimported_type,
missing_import_name=typ.missing_import_name)
# Option 2:
# Unbound type variable. Currently these may be still valid,
# for example when defining a generic type alias.
unbound_tvar = (isinstance(sym.node, TypeVarExpr) and
self.tvar_scope.get_binding(sym) is None)
if self.allow_unbound_tvars and unbound_tvar:
return t
# Option 3:
# Enum value. Note: we only want to return a LiteralType when
# we're using this enum value specifically within context of
# a "Literal[...]" type. So, if `defining_literal` is not set,
# we bail out early with an error.
#
# If, in the distant future, we decide to permit things like
# `def foo(x: Color.RED) -> None: ...`, we can remove that
# check entirely.
if isinstance(sym.node, Var) and sym.node.info and sym.node.info.is_enum:
value = sym.node.name
base_enum_short_name = sym.node.info.name
if not defining_literal:
msg = message_registry.INVALID_TYPE_RAW_ENUM_VALUE.format(
base_enum_short_name, value)
self.fail(msg, t)
return AnyType(TypeOfAny.from_error)
return LiteralType(
value=value,
fallback=Instance(sym.node.info, [], line=t.line, column=t.column),
line=t.line,
column=t.column,
)
# None of the above options worked. We parse the args (if there are any)
# to make sure there are no remaining semanal-only types, then give up.
t = t.copy_modified(args=self.anal_array(t.args))
# TODO: Move this message building logic to messages.py.
notes = [] # type: List[str]
if isinstance(sym.node, Var):
notes.append('See https://mypy.readthedocs.io/en/'
'stable/common_issues.html#variables-vs-type-aliases')
message = 'Variable "{}" is not valid as a type'
elif isinstance(sym.node, (SYMBOL_FUNCBASE_TYPES, Decorator)):
message = 'Function "{}" is not valid as a type'
notes.append('Perhaps you need "Callable[...]" or a callback protocol?')
elif isinstance(sym.node, MypyFile):
# TODO: suggest a protocol when supported.
message = 'Module "{}" is not valid as a type'
elif unbound_tvar:
message = 'Type variable "{}" is unbound'
short = name.split('.')[-1]
notes.append(('(Hint: Use "Generic[{}]" or "Protocol[{}]" base class'
' to bind "{}" inside a class)').format(short, short, short))
notes.append('(Hint: Use "{}" in function signature to bind "{}"'
' inside a function)'.format(short, short))
else:
message = 'Cannot interpret reference "{}" as a type'
self.fail(message.format(name), t, code=codes.VALID_TYPE)
for note in notes:
self.note(note, t, code=codes.VALID_TYPE)
# TODO: Would it be better to always return Any instead of UnboundType
# in case of an error? On one hand, UnboundType has a name so error messages
# are more detailed, on the other hand, some of them may be bogus,
# see https://github.com/python/mypy/issues/4987.
return t
def visit_any(self, t: AnyType) -> Type:
return t
def visit_none_type(self, t: NoneType) -> Type:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_erased_type(self, t: ErasedType) -> Type:
# This type should exist only temporarily during type inference
assert False, "Internal error: Unexpected erased type"
def visit_deleted_type(self, t: DeletedType) -> Type:
return t
def visit_type_list(self, t: TypeList) -> Type:
self.fail('Bracketed expression "[...]" is not valid as a type', t)
self.note('Did you mean "List[...]"?', t)
return AnyType(TypeOfAny.from_error)
def visit_callable_argument(self, t: CallableArgument) -> Type:
self.fail('Invalid type', t)
return AnyType(TypeOfAny.from_error)
def visit_instance(self, t: Instance) -> Type:
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# TODO: should we do something here?
return t
def visit_type_var(self, t: TypeVarType) -> Type:
return t
def visit_callable_type(self, t: CallableType, nested: bool = True) -> Type:
# Every Callable can bind its own type variables, if they're not in the outer scope
with self.tvar_scope_frame():
if self.defining_alias:
variables = t.variables
else:
variables = self.bind_function_type_variables(t, t)
special = self.anal_type_guard(t.ret_type)
ret = t.copy_modified(arg_types=self.anal_array(t.arg_types, nested=nested),
ret_type=self.anal_type(t.ret_type, nested=nested),
# If the fallback isn't filled in yet,
# its type will be the falsey FakeInfo
fallback=(t.fallback if t.fallback.type
else self.named_type('builtins.function')),
variables=self.anal_var_defs(variables),
type_guard=special,
)
return ret
def visit_type_guard_type(self, t: TypeGuardType) -> Type:
return t
def anal_type_guard(self, t: Type) -> Optional[Type]:
if isinstance(t, UnboundType):
sym = self.lookup_qualified(t.name, t)
if sym is not None and sym.node is not None:
return self.anal_type_guard_arg(t, sym.node.fullname)
# TODO: What if it's an Instance? Then use t.type.fullname?
return None
def anal_type_guard_arg(self, t: UnboundType, fullname: str) -> Optional[Type]:
if fullname in ('typing_extensions.TypeGuard', 'typing.TypeGuard'):
if len(t.args) != 1:
self.fail("TypeGuard must have exactly one type argument", t)
return AnyType(TypeOfAny.from_error)
return self.anal_type(t.args[0])
return None
def visit_overloaded(self, t: Overloaded) -> Type:
# Overloaded types are manually constructed in semanal.py by analyzing the
# AST and combining together the Callable types this visitor converts.
#
# So if we're ever asked to reanalyze an Overloaded type, we know it's
# fine to just return it as-is.
return t
def visit_tuple_type(self, t: TupleType) -> Type:
# Types such as (t1, t2, ...) only allowed in assignment statements. They'll
# generate errors elsewhere, and Tuple[t1, t2, ...] must be used instead.
if t.implicit and not self.allow_tuple_literal:
self.fail('Syntax error in type annotation', t, code=codes.SYNTAX)
if len(t.items) == 0:
self.note('Suggestion: Use Tuple[()] instead of () for an empty tuple, or '
'None for a function without a return value', t, code=codes.SYNTAX)
elif len(t.items) == 1:
self.note('Suggestion: Is there a spurious trailing comma?', t, code=codes.SYNTAX)
else:
self.note('Suggestion: Use Tuple[T1, ..., Tn] instead of (T1, ..., Tn)', t,
code=codes.SYNTAX)
return AnyType(TypeOfAny.from_error)
star_count = sum(1 for item in t.items if isinstance(item, StarType))
if star_count > 1:
self.fail('At most one star type allowed in a tuple', t)
if t.implicit:
return TupleType([AnyType(TypeOfAny.from_error) for _ in t.items],
self.named_type('builtins.tuple'),
t.line)
else:
return AnyType(TypeOfAny.from_error)
any_type = AnyType(TypeOfAny.special_form)
# If the fallback isn't | |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kotlin Rules
### Setup
Add the following snippet to your `WORKSPACE` file:
```bzl
git_repository(
name = "io_bazel_rules_kotlin",
remote = "https://github.com/bazelbuild/rules_kotlin.git",
commit = "<COMMIT_HASH>",
)
load("@io_bazel_rules_kotlin//kotlin:repositories.bzl", "kotlin_repositories")
kotlin_repositories(kotlin_release_version = "1.4.0")
load("@io_bazel_rules_kotlin//kotlin:core.bzl", "kt_register_toolchains")
kt_register_toolchains()
```
To enable persistent worker support, add the following to the appropriate `bazelrc` file:
```
build --strategy=KotlinCompile=worker
test --strategy=KotlinCompile=worker
```
### Standard Libraries
The Kotlin libraries that are bundled in a kotlin release should be used with the rules, the mandatory standard libraries are added implicetly. After enabling
the repository the following Kotlin Libraries are also made available from the workspace `com_github_jetbrains_kotlin`:
* `kotlin-test`,
* `kotlin-reflect`.
So if you needed to add reflect as a dep use the following label `@com_github_jetbrains_kotlin//:kotlin-reflect`.
### Mixed Mode compilation
The JVM rules can compile both Java and Kotlin sources. The Java compiler wrapper is not optimized or persistent and does not have the features found in the
native java rules. This mode is usefull for migrating a package to Kotlin over time.
### Annotation Processing
Annotation processing works just as it does in Java, plugins are declared via a [`java_plugin`](https://docs.bazel.build/versions/master/be/java.html#java_plugin)
and may also be inherited from a `java_library` via the `exported_plugins` attribute. Annotation work in mixed-mode compilation and the Kotlin compiler take
care of processing both aspects.
An example which can be found under `//examples/dagger`:
```bzl
java_plugin(
name = "dagger_plugin",
deps = [
"@dagger_compiler//jar",
"@guava//jar",
"@dagger_producers//jar",
"@dagger//jar",
"@javax_inject//jar"
],
processor_class = "dagger.internal.codegen.ComponentProcessor"
)
java_library(
name = "dagger_lib",
exports = [
"@javax_inject//jar",
"@dagger//jar",
],
exported_plugins = ["dagger_plugin"]
)
kt_jvm_binary(
name = "dagger",
srcs = glob(["src/**"]),
main_class = "coffee.CoffeeApp",
deps = [":dagger_lib"],
)
```
"""
load(
"//kotlin/internal:defs.bzl",
_JAVA_RUNTIME_TOOLCHAIN_TYPE = "JAVA_RUNTIME_TOOLCHAIN_TYPE",
_JAVA_TOOLCHAIN_TYPE = "JAVA_TOOLCHAIN_TYPE",
_KT_COMPILER_REPO = "KT_COMPILER_REPO",
_KtCompilerPluginInfo = "KtCompilerPluginInfo",
_KtJvmInfo = "KtJvmInfo",
_TOOLCHAIN_TYPE = "TOOLCHAIN_TYPE",
)
load(
"//kotlin/internal/jvm:plugins.bzl",
_kt_jvm_plugin_aspect = "kt_jvm_plugin_aspect",
)
load(
"//kotlin/internal:opts.bzl",
_JavacOptions = "JavacOptions",
_KotlincOptions = "KotlincOptions",
)
load(
"//kotlin/internal/jvm:impl.bzl",
_kt_compiler_deps_aspect_impl = "kt_compiler_deps_aspect_impl",
_kt_compiler_plugin_impl = "kt_compiler_plugin_impl",
_kt_jvm_binary_impl = "kt_jvm_binary_impl",
_kt_jvm_import_impl = "kt_jvm_import_impl",
_kt_jvm_junit_test_impl = "kt_jvm_junit_test_impl",
_kt_jvm_library_impl = "kt_jvm_library_impl",
)
load("//kotlin/internal/utils:utils.bzl", "utils")
_implicit_deps = {
"_singlejar": attr.label(
executable = True,
cfg = "host",
default = Label("@bazel_tools//tools/jdk:singlejar"),
allow_files = True,
),
"_zipper": attr.label(
executable = True,
cfg = "host",
default = Label("@bazel_tools//tools/zip:zipper"),
allow_files = True,
),
"_java_stub_template": attr.label(
cfg = "host",
default = Label("@kt_java_stub_template//file"),
),
"_toolchain": attr.label(
doc = """The Kotlin JVM Runtime. it's only purpose is to enable the Android native rules to discover the Kotlin
runtime for dexing""",
default = Label("@" + _KT_COMPILER_REPO + "//:kotlin-stdlib"),
cfg = "target",
),
"_java_toolchain": attr.label(
default = Label("@bazel_tools//tools/jdk:current_java_toolchain"),
),
"_host_javabase": attr.label(
default = Label("@bazel_tools//tools/jdk:current_java_runtime"),
cfg = "host",
),
"_java_runtime": attr.label(
default = Label("@bazel_tools//tools/jdk:current_java_runtime"),
),
}
_common_attr = utils.add_dicts(
_implicit_deps,
{
"srcs": attr.label_list(
doc = """The list of source files that are processed to create the target, this can contain both Java and Kotlin
files. Java analysis occurs first so Kotlin classes may depend on Java classes in the same compilation unit.""",
default = [],
allow_files = [".srcjar", ".kt", ".java"],
),
"deps": attr.label_list(
doc = """A list of dependencies of this rule.See general comments about `deps` at
[Attributes common to all build rules](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes).""",
aspects = [] if hasattr(java_common, "JavaPluginInfo") else [_kt_jvm_plugin_aspect],
providers = [
[JavaInfo],
],
allow_files = False,
),
"runtime_deps": attr.label_list(
doc = """Libraries to make available to the final binary or test at runtime only. Like ordinary deps, these will
appear on the runtime classpath, but unlike them, not on the compile-time classpath.""",
default = [],
allow_files = False,
),
"resources": attr.label_list(
doc = """A list of files that should be include in a Java jar.""",
default = [],
allow_files = True,
),
"resource_strip_prefix": attr.string(
doc = """The path prefix to strip from Java resources, files residing under common prefix such as
`src/main/resources` or `src/test/resources` or `kotlin` will have stripping applied by convention.""",
default = "",
),
"resource_jars": attr.label_list(
doc = """Set of archives containing Java resources. If specified, the contents of these jars are merged into
the output jar.""",
default = [],
),
"data": attr.label_list(
doc = """The list of files needed by this rule at runtime. See general comments about `data` at
[Attributes common to all build rules](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes).""",
allow_files = True,
),
"associates": attr.label_list(
doc = """Kotlin deps who should be considered part of the same module/compilation-unit
for the purposes of "internal" access. Such deps must all share the same module space
and so a target cannot associate to two deps from two different modules.""",
default = [],
providers = [JavaInfo, _KtJvmInfo],
),
"friends": attr.label_list(
doc = """A single Kotlin dep which allows Kotlin code in other modules access to
internal members. Currently uses the output jar of the module -- i.e., exported
deps won't be included. [DEPRECATED, use "associates" instead]""",
default = [],
providers = [JavaInfo, _KtJvmInfo],
),
"plugins": attr.label_list(
default = [],
aspects = [] if hasattr(java_common, "JavaPluginInfo") else [_kt_jvm_plugin_aspect],
cfg = "host",
),
"module_name": attr.string(
doc = """The name of the module, if not provided the module name is derived from the label. --e.g.,
`//some/package/path:label_name` is translated to
`some_package_path-label_name`.""",
mandatory = False,
),
"kotlinc_opts": attr.label(
doc = """Kotlinc options to be used when compiling this target. These opts if provided
will be used instead of the ones provided to the toolchain.""",
default = None,
providers = [_KotlincOptions],
mandatory = False,
),
"javac_opts": attr.label(
doc = """Javac options to be used when compiling this target. These opts if provided will
be used instead of the ones provided to the toolchain.""",
default = None,
providers = [_JavacOptions],
mandatory = False,
),
},
)
_lib_common_attr = utils.add_dicts(_common_attr, {
"exports": attr.label_list(
doc = """\
Exported libraries.
Deps listed here will be made available to other rules, as if the parents explicitly depended on
these deps. This is not true for regular (non-exported) deps.""",
default = [],
providers = [JavaInfo],
),
"exported_compiler_plugins": attr.label_list(
doc = """\
Exported compiler plugins.
Compiler plugins listed here will be treated as if they were added in the plugins attribute
of any targets that directly depend on this target. Unlike `java_plugin`s exported_plugins,
this is not transitive""",
default = [],
providers = [_KtCompilerPluginInfo],
),
"neverlink": attr.bool(
doc = """If true only use this library for compilation and not at runtime.""",
default = False,
),
"_empty_jar": attr.label(
doc = """Empty jar for exporting JavaInfos.""",
allow_single_file = True,
cfg = "target",
default = Label("//third_party:empty.jar"),
),
"_empty_jdeps": attr.label(
doc = """Empty jdeps for exporting JavaInfos.""",
allow_single_file = True,
cfg = "target",
default = Label("//third_party:empty.jdeps"),
),
})
_runnable_common_attr = utils.add_dicts(_common_attr, {
"jvm_flags": attr.string_list(
doc = """A list of flags to embed in the wrapper script generated for running this binary. Note: does not yet
support make variable substitution.""",
default = [],
),
})
_common_outputs = dict(
jar = "%{name}.jar",
jdeps = "%{name}.jdeps",
# The params file, declared here so that validate it can be validated for testing.
# jar_2_params = "%{name}.jar-2.params",
srcjar = "%{name}-sources.jar",
)
_common_toolchains = [
_TOOLCHAIN_TYPE,
_JAVA_TOOLCHAIN_TYPE,
_JAVA_RUNTIME_TOOLCHAIN_TYPE,
]
kt_jvm_library = rule(
doc = """This rule compiles and links Kotlin and Java sources into a .jar file.""",
attrs = _lib_common_attr,
outputs = _common_outputs,
toolchains = _common_toolchains,
fragments = ["java"], # Required fragments of the target configuration
host_fragments = ["java"], # Required fragments of the host configuration
implementation = _kt_jvm_library_impl,
provides = [JavaInfo, _KtJvmInfo],
)
kt_jvm_binary = rule(
doc = """\
Builds a Java archive ("jar file"), plus a wrapper shell script with the same name as the rule. The wrapper
shell script uses a classpath that includes, among other things, a jar file for each library on which the binary
depends.
**Note:** This rule does not have all of the features found in [`java_binary`](https://docs.bazel.build/versions/master/be/java.html#java_binary).
It is appropriate for building workspace utilities. `java_binary` should be preferred for release artefacts.
""",
attrs = dict(_runnable_common_attr.items() + {
"main_class": attr.string(
doc = """Name of class with main() method to use as entry | |
#!/usr/bin/env python
""" Replication of CoordConv Paper.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.modules.conv as conv
import torch.utils.data as utils
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from ..models import CoordConv2d as OurCoordConv2d
from ..utils import elbo, AverageMeter, merge_args_with_dict
from ..config import CONFIG
# Simple CNN Model
class Net(nn.Module):
def __init__(self, type):
super(Net, self).__init__()
self.type = type
if self.type == 'coord':
# self.coordconv = CoordConv2d(2, 32, 1, with_r=True)
self.coordconv = OurCoordConv2d(2, 32, 1, with_r=True)
self.conv1 = nn.Conv2d(32, 64, 1)
self.conv2 = nn.Conv2d(64, 64, 1)
self.conv3 = nn.Conv2d(64, 1, 1)
self.conv4 = nn.Conv2d( 1, 1, 1)
elif self.type == 'deconv':
self.deconv1 = nn.ConvTranspose2d(2, 128, 2, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.deconv3 = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.deconv4 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.deconv5 = nn.ConvTranspose2d(64, 64, 2, stride=2)
self.deconv6 = nn.ConvTranspose2d(64, 1, 2, stride=2)
elif self.type == 'gen-coord':
self.coordconv = OurCoordConv2d(2, 32, 1, with_r=True)
self.conv1 = nn.Conv2d(32, 64, 1)
self.conv2 = nn.Conv2d(64, 64, 1)
self.conv3 = nn.Conv2d(64, 1, 1)
self.conv4 = nn.Conv2d( 1, 1, 1)
self.conv5 = nn.Conv2d(1, 16, 2)
self.conv6 = nn.Conv2d(16, 16, 2)
self.conv6 = nn.Conv2d(16, 32, 2)
self.conv7 = nn.Conv2d(32, 32, 2)
self.conv8 = nn.Conv2d(32, 1, 1)
else:
raise Exception('Invalid Conv Type')
def forward(self, x):
if self.type == 'coord':
x = self.coordconv(x)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.conv4(x)
x = x.view(-1, 64*64)
return x
elif self.type == 'deconv':
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
x = F.relu(self.deconv4(x))
x = F.relu(self.deconv5(x))
x = self.deconv6(x)
x = x.view(-1, 64*64)
return x
elif self.type == 'gen-coord':
x = self.coordconv(x)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.conv4(x)
x = x.view(-1, 64*64)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = F.relu(self.conv7(x))
x = self.conv8(x)
x = x.view(-1, 64*64)
else:
raise Exception('Invalid Conv Type')
def cross_entropy_one_hot(input, target):
_, labels = target.max(dim=1)
return nn.CrossEntropyLoss()(input, labels)
def sigmoid_cross_entropy_one_hot(input, target):
input = nn.Sigmoid(input)
_, labels = target.max(dim=1)
return nn.CrossEntropyLoss()(input, labels)
# Add Coordinates to Tensor
class AddCoords(nn.Module):
def __init__(self, rank, with_r=False):
super(AddCoords, self).__init__()
self.rank = rank
self.with_r = with_r
def forward(self, input_tensor):
if self.rank == 1:
batch_size_shape, channel_in_shape, dim_x = input_tensor.shape
xx_range = torch.arange(dim_x, dtype=torch.int32)
xx_channel = xx_range[None, None, :]
xx_channel = xx_channel.float() / (dim_x - 1)
xx_channel = xx_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size_shape, 1, 1)
if torch.cuda.is_available:
input_tensor = input_tensor.cuda()
xx_channel = xx_channel.cuda()
out = torch.cat([input_tensor, xx_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2))
out = torch.cat([out, rr], dim=1)
elif self.rank == 2:
batch_size_shape, channel_in_shape, dim_y, dim_x = input_tensor.shape
xx_ones = torch.ones([1, 1, 1, dim_x], dtype=torch.int32)
yy_ones = torch.ones([1, 1, 1, dim_y], dtype=torch.int32)
xx_range = torch.arange(dim_y, dtype=torch.int32)
yy_range = torch.arange(dim_x, dtype=torch.int32)
xx_range = xx_range[None, None, :, None]
yy_range = yy_range[None, None, :, None]
xx_channel = torch.matmul(xx_range, xx_ones)
yy_channel = torch.matmul(yy_range, yy_ones)
# transpose y
yy_channel = yy_channel.permute(0, 1, 3, 2)
xx_channel = xx_channel.float() / (dim_y - 1)
yy_channel = yy_channel.float() / (dim_x - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1)
yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1)
if torch.cuda.is_available:
input_tensor = input_tensor.cuda()
xx_channel = xx_channel.cuda()
yy_channel = yy_channel.cuda()
out = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
out = torch.cat([out, rr], dim=1)
elif self.rank == 3:
batch_size_shape, channel_in_shape, dim_z, dim_y, dim_x = input_tensor.shape
xx_ones = torch.ones([1, 1, 1, 1, dim_x], dtype=torch.int32)
yy_ones = torch.ones([1, 1, 1, 1, dim_y], dtype=torch.int32)
zz_ones = torch.ones([1, 1, 1, 1, dim_z], dtype=torch.int32)
xy_range = torch.arange(dim_y, dtype=torch.int32)
xy_range = xy_range[None, None, None, :, None]
yz_range = torch.arange(dim_z, dtype=torch.int32)
yz_range = yz_range[None, None, None, :, None]
zx_range = torch.arange(dim_x, dtype=torch.int32)
zx_range = zx_range[None, None, None, :, None]
xy_channel = torch.matmul(xy_range, xx_ones)
xx_channel = torch.cat([xy_channel + i for i in range(dim_z)], dim=2)
yz_channel = torch.matmul(yz_range, yy_ones)
yz_channel = yz_channel.permute(0, 1, 3, 4, 2)
yy_channel = torch.cat([yz_channel + i for i in range(dim_x)], dim=4)
zx_channel = torch.matmul(zx_range, zz_ones)
zx_channel = zx_channel.permute(0, 1, 4, 2, 3)
zz_channel = torch.cat([zx_channel + i for i in range(dim_y)], dim=3)
if torch.cuda.is_available:
input_tensor = input_tensor.cuda()
xx_channel = xx_channel.cuda()
yy_channel = yy_channel.cuda()
zz_channel = zz_channel.cuda()
out = torch.cat([input_tensor, xx_channel, yy_channel, zz_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) +\
torch.pow(yy_channel - 0.5, 2) +\
torch.pow(zz_channel - 0.5, 2))
out = torch.cat([out, rr], dim=1)
else:
raise NotImplementedError
return out
# Coordinate Convolution
class CoordConv2d(conv.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, with_r=False):
super(CoordConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.rank = 2
self.addcoords = AddCoords(self.rank, with_r)
self.conv = nn.Conv2d(in_channels+self.rank+int(with_r), out_channels,
kernel_size, stride, padding, dilation, groups, bias)
def forward(self, input_tensor):
out = self.addcoords(input_tensor)
out = self.conv(out)
return out
# Dataset Loading
def load_dataset(datatype):
if datatype == 'uniform':
# Load the one hot datasets
train_onehot = np.load(os.path.join(os.path.dirname(__file__), 'data-uniform/train_onehot.npy')).astype('float32')
test_onehot = np.load(os.path.join(os.path.dirname(__file__), 'data-uniform/test_onehot.npy')).astype('float32')
# make the train and test datasets
# train
pos_train = np.where(train_onehot == 1.0)
X_train = pos_train[2]
Y_train = pos_train[3]
train_set = np.zeros((len(X_train), 2, 1, 1), dtype='float32')
for i, (x, y) in enumerate(zip(X_train, Y_train)):
train_set[i, 0, 0, 0] = x
train_set[i, 1, 0, 0] = y
# test
pos_test = np.where(test_onehot == 1.0)
X_test = pos_test[2]
Y_test = pos_test[3]
test_set = np.zeros((len(X_test), 2, 1, 1), dtype='float32')
for i, (x, y) in enumerate(zip(X_test, Y_test)):
test_set[i, 0, 0, 0] = x
test_set[i, 1, 0, 0] = y
train_set_orig = train_set / (64. - 1.)
test_set_orig = test_set / (64. - 1.)
train_set = np.tile(train_set, [1, 1, 64, 64])
test_set = np.tile(test_set, [1, 1, 64, 64])
# Normalize the datasets
train_set /= (64. - 1.) # 64x64 grid, 0-based index
test_set /= (64. - 1.) # 64x64 grid, 0-based index
print('Train set : ', train_set.shape, train_set.max(), train_set.min())
print('Test set : ', test_set.shape, test_set.max(), test_set.min())
return train_set, test_set, train_onehot, test_onehot, train_set_orig, test_set_orig
else:
# Load the one hot datasets and the train / test set
train_set = np.load(os.path.join(os.path.dirname(__file__), 'data-quadrant/train_set.npy')).astype('float32')
test_set = np.load(os.path.join(os.path.dirname(__file__), 'data-quadrant/test_set.npy')).astype('float32')
train_onehot = np.load(os.path.join(os.path.dirname(__file__), 'data-quadrant/train_onehot.npy')).astype('float32')
test_onehot = np.load(os.path.join(os.path.dirname(__file__), 'data-quadrant/test_onehot.npy')).astype('float32')
train_set_orig = train_set/train_set.max()
test_set_orig = test_set/test_set.max()
train_set = np.tile(train_set, [1, 1, 64, 64])
test_set = np.tile(test_set, [1, 1, 64, 64])
# Normalize datasets
train_set /= train_set.max()
test_set /= test_set.max()
print('Train set : ', train_set.shape, train_set.max(), train_set.min())
print('Test set : ', test_set.shape, test_set.max(), test_set.min())
return train_set, test_set, train_onehot, test_onehot, train_set_orig, test_set_orig
def train(epoch, net, train_dataloader, optimizer, criterion, device):
net.train()
iters = 0
for batch_idx, (data, target) in enumerate(train_dataloader):
data, target = Variable(data), Variable(target)
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = net(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
iters += len(data)
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
epoch, iters, len(train_dataloader.dataset),
100. * (batch_idx + 1) / len(train_dataloader), loss.data.item()))
print("")
def test(net, test_loader, optimizer, criterion, device):
import pdb; pdb.set_trace()
net.eval()
test_loss = 0
correct = 0
pred_logits = torch.tensor([])
for data, target in test_loader:
with torch.no_grad():
data, target = data.to(device), target.to(device)
output = net(data)
logits = F.softmax(output, dim=1)
pred_logits = torch.cat((pred_logits, logits.cpu()), dim=0)
test_loss += criterion(output, target).item()
_, pred = output.max(1, keepdim=True)
_, label = target.max(dim=1)
correct += pred.eq(label.view_as(pred)).sum().item()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
pred_logits = torch.sum(pred_logits, dim=0)
if __name__ == '__main__':
# set seeds for reproducability
np.random.seed(0)
torch.manual_seed(0)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str,
help='uniform|quadrant')
parser.add_argument('conv', type=str,
help='deconv|coord|gen')
parser.add_argument('epochs', type=int,
help='num training epochs')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
assert args.dataset in ['uniform', 'quadrant']
assert args.conv in ['deconv', 'coord', 'gen-coord']
train_set, test_set, train_onehot, test_onehot, train_orig, test_orig = load_dataset(args.dataset)
# flattent datasets
train_onehot = train_onehot.reshape((-1, 64 * 64)).astype('int64')
test_onehot = test_onehot.reshape((-1, 64 * 64)).astype('int64')
# initialize network
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Net(args.conv).to(device)
if args.conv == 'coord':
# train data
train_tensor_x = torch.stack([torch.Tensor(i) for i in train_set])
train_tensor_y = torch.stack([torch.LongTensor(i) for i in | |
mask positions in action_prob that are not used
# (tgt_action_len, batch_size)
action_mask_pad = torch.eq(batch.apply_rule_mask + batch.gen_token_mask + batch.primitive_copy_mask, 0.)
action_mask = 1. - action_mask_pad.float()
# (tgt_action_len, batch_size)
action_prob = tgt_apply_rule_prob * batch.apply_rule_mask + \
primitive_predictor[:, :, 0] * tgt_primitive_gen_from_vocab_prob * batch.gen_token_mask + \
primitive_predictor[:, :, 1] * tgt_primitive_copy_prob * batch.primitive_copy_mask
# avoid nan in log
action_prob.data.masked_fill_(action_mask_pad.data, 1.e-7)
action_prob = action_prob.log() * action_mask
scores = torch.sum(action_prob, dim=0)
returns = [scores]
if self.args.sup_attention:
returns.append(att_prob)
if return_encode_state: returns.append(last_state)
return returns
def step(self, x, h_tm1, src_encodings, src_encodings_att_linear, src_token_mask=None, return_att_weight=False):
"""Perform a single time-step of computation in decoder LSTM
Args:
x: variable of shape (batch_size, hidden_size), input
h_tm1: Tuple[Variable(batch_size, hidden_size), Variable(batch_size, hidden_size)], previous
hidden and cell states
src_encodings: variable of shape (batch_size, src_sent_len, hidden_size * 2), encodings of source utterances
src_encodings_att_linear: linearly transformed source encodings
src_token_mask: mask over source tokens (Note: unused entries are masked to **one**)
return_att_weight: return attention weights
Returns:
The new LSTM hidden state and cell state
"""
# h_t: (batch_size, hidden_size)
h_t, cell_t = self.decoder_lstm(x, h_tm1)
ctx_t, alpha_t = nn_utils.dot_prod_attention(h_t,
src_encodings, src_encodings_att_linear,
mask=src_token_mask)
att_t = torch.tanh(self.att_vec_linear(torch.cat([h_t, ctx_t], 1))) # E.q. (5)
att_t = self.dropout(att_t)
if return_att_weight:
return (h_t, cell_t), att_t, alpha_t
else: return (h_t, cell_t), att_t
def decode(self, batch, src_encodings, dec_init_vec):
"""Given a batch of examples and their encodings of input utterances,
compute query vectors at each decoding time step, which are used to compute
action probabilities
Args:
batch: a `Batch` object storing input examples
src_encodings: variable of shape (batch_size, src_sent_len, hidden_size * 2), encodings of source utterances
dec_init_vec: a tuple of variables representing initial decoder states
Returns:
Query vectors, a variable of shape (tgt_action_len, batch_size, hidden_size)
Also return the attention weights over candidate tokens if using supervised attention
"""
batch_size = len(batch)
args = self.args
if args.lstm == 'parent_feed':
h_tm1 = dec_init_vec[0], dec_init_vec[1], \
Variable(self.new_tensor(batch_size, args.hidden_size).zero_()), \
Variable(self.new_tensor(batch_size, args.hidden_size).zero_())
else:
h_tm1 = dec_init_vec
# (batch_size, query_len, hidden_size)
src_encodings_att_linear = self.att_src_linear(src_encodings)
zero_action_embed = Variable(self.new_tensor(args.action_embed_size).zero_())
att_vecs = []
history_states = []
att_probs = []
att_weights = []
for t in range(batch.max_action_num):
# the input to the decoder LSTM is a concatenation of multiple signals
# [
# embedding of previous action -> `a_tm1_embed`,
# previous attentional vector -> `att_tm1`,
# embedding of the current frontier (parent) constructor (rule) -> `parent_production_embed`,
# embedding of the frontier (parent) field -> `parent_field_embed`,
# embedding of the ASDL type of the frontier field -> `parent_field_type_embed`,
# LSTM state of the parent action -> `parent_states`
# ]
if t == 0:
x = Variable(self.new_tensor(batch_size, self.decoder_lstm.input_size).zero_(), requires_grad=False)
# initialize using the root type embedding
if args.no_parent_field_type_embed is False:
offset = args.action_embed_size # prev_action
offset += args.att_vec_size * (not args.no_input_feed)
offset += args.action_embed_size * (not args.no_parent_production_embed)
offset += args.field_embed_size * (not args.no_parent_field_embed)
x[:, offset: offset + args.type_embed_size] = self.type_embed(Variable(self.new_long_tensor(
[self.grammar.type2id[self.grammar.root_type] for e in batch.examples])))
else:
a_tm1_embeds = []
for example in batch.examples:
# action t - 1
if t < len(example.tgt_actions):
a_tm1 = example.tgt_actions[t - 1]
if isinstance(a_tm1.action, ApplyRuleAction):
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[a_tm1.action.production]]
elif isinstance(a_tm1.action, ReduceAction):
a_tm1_embed = self.production_embed.weight[len(self.grammar)]
else:
a_tm1_embed = self.primitive_embed.weight[self.vocab.primitive[a_tm1.action.token]]
else:
a_tm1_embed = zero_action_embed
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
if args.no_input_feed is False:
inputs.append(att_tm1)
if args.no_parent_production_embed is False:
parent_production_embed = self.production_embed(batch.get_frontier_prod_idx(t))
inputs.append(parent_production_embed)
if args.no_parent_field_embed is False:
parent_field_embed = self.field_embed(batch.get_frontier_field_idx(t))
inputs.append(parent_field_embed)
if args.no_parent_field_type_embed is False:
parent_field_type_embed = self.type_embed(batch.get_frontier_field_type_idx(t))
inputs.append(parent_field_type_embed)
# append history states
actions_t = [e.tgt_actions[t] if t < len(e.tgt_actions) else None for e in batch.examples]
if args.no_parent_state is False:
parent_states = torch.stack([history_states[p_t][0][batch_id]
for batch_id, p_t in
enumerate(a_t.parent_t if a_t else 0 for a_t in actions_t)])
parent_cells = torch.stack([history_states[p_t][1][batch_id]
for batch_id, p_t in
enumerate(a_t.parent_t if a_t else 0 for a_t in actions_t)])
if args.lstm == 'parent_feed':
h_tm1 = (h_tm1[0], h_tm1[1], parent_states, parent_cells)
else:
inputs.append(parent_states)
x = torch.cat(inputs, dim=-1)
(h_t, cell_t), att_t, att_weight = self.step(x, h_tm1, src_encodings,
src_encodings_att_linear,
src_token_mask=batch.src_token_mask,
return_att_weight=True)
# if use supervised attention
if args.sup_attention:
for e_id, example in enumerate(batch.examples):
if t < len(example.tgt_actions):
action_t = example.tgt_actions[t].action
cand_src_tokens = AttentionUtil.get_candidate_tokens_to_attend(example.src_sent, action_t)
if cand_src_tokens:
att_prob = [att_weight[e_id, token_id] for token_id in cand_src_tokens]
if len(att_prob) > 1: att_prob = torch.cat(att_prob).sum()
else: att_prob = att_prob[0]
att_probs.append(att_prob)
history_states.append((h_t, cell_t))
att_vecs.append(att_t)
att_weights.append(att_weight)
h_tm1 = (h_t, cell_t)
att_tm1 = att_t
att_vecs = torch.stack(att_vecs, dim=0)
if args.sup_attention:
return att_vecs, att_probs
else: return att_vecs
def parse(self, src_sent, context=None, beam_size=5, debug=False):
"""Perform beam search to infer the target AST given a source utterance
Args:
src_sent: list of source utterance tokens
context: other context used for prediction
beam_size: beam size
Returns:
A list of `DecodeHypothesis`, each representing an AST
"""
args = self.args
primitive_vocab = self.vocab.primitive
T = torch.cuda if args.cuda else torch
if self.args.bert_path:
src_sent_var = self._bert_encode([src_sent])
else:
src_sent_var = nn_utils.to_input_variable([src_sent], self.vocab.source, cuda=args.cuda, training=False)
# Variable(1, src_sent_len, hidden_size * 2)
src_encodings, (last_state, last_cell) = self.encode(src_sent_var, [len(src_sent)])
# (1, src_sent_len, hidden_size)
src_encodings_att_linear = self.att_src_linear(src_encodings)
dec_init_vec = self.init_decoder_state(last_state, last_cell)
if args.lstm == 'parent_feed':
h_tm1 = dec_init_vec[0], dec_init_vec[1], \
Variable(self.new_tensor(args.hidden_size).zero_()), \
Variable(self.new_tensor(args.hidden_size).zero_())
else:
h_tm1 = dec_init_vec
zero_action_embed = Variable(self.new_tensor(args.action_embed_size).zero_())
with torch.no_grad():
hyp_scores = Variable(self.new_tensor([0.]))
# For computing copy probabilities, we marginalize over tokens with the same surface form
# `aggregated_primitive_tokens` stores the position of occurrence of each source token
aggregated_primitive_tokens = OrderedDict()
for token_pos, token in enumerate(src_sent):
aggregated_primitive_tokens.setdefault(token, []).append(token_pos)
t = 0
hypotheses = [DecodeHypothesis()]
hyp_states = [[]]
completed_hypotheses = []
while len(completed_hypotheses) < beam_size and t < args.decode_max_time_step:
hyp_num = len(hypotheses)
# (hyp_num, src_sent_len, hidden_size * 2)
exp_src_encodings = src_encodings.expand(hyp_num, src_encodings.size(1), src_encodings.size(2))
# (hyp_num, src_sent_len, hidden_size)
exp_src_encodings_att_linear = src_encodings_att_linear.expand(hyp_num, src_encodings_att_linear.size(1), src_encodings_att_linear.size(2))
if t == 0:
with torch.no_grad():
x = Variable(self.new_tensor(1, self.decoder_lstm.input_size).zero_())
if args.no_parent_field_type_embed is False:
offset = args.action_embed_size # prev_action
offset += args.att_vec_size * (not args.no_input_feed)
offset += args.action_embed_size * (not args.no_parent_production_embed)
offset += args.field_embed_size * (not args.no_parent_field_embed)
x[0, offset: offset + args.type_embed_size] = \
self.type_embed.weight[self.grammar.type2id[self.grammar.root_type]]
else:
actions_tm1 = [hyp.actions[-1] for hyp in hypotheses]
a_tm1_embeds = []
for a_tm1 in actions_tm1:
if a_tm1:
if isinstance(a_tm1, ApplyRuleAction):
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[a_tm1.production]]
elif isinstance(a_tm1, ReduceAction):
a_tm1_embed = self.production_embed.weight[len(self.grammar)]
else:
a_tm1_embed = self.primitive_embed.weight[self.vocab.primitive[a_tm1.token]]
a_tm1_embeds.append(a_tm1_embed)
else:
a_tm1_embeds.append(zero_action_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
if args.no_input_feed is False:
inputs.append(att_tm1)
if args.no_parent_production_embed is False:
# frontier production
frontier_prods = [hyp.frontier_node.production for hyp in hypotheses]
frontier_prod_embeds = self.production_embed(Variable(self.new_long_tensor(
[self.grammar.prod2id[prod] for prod in frontier_prods])))
inputs.append(frontier_prod_embeds)
if args.no_parent_field_embed is False:
# frontier field
frontier_fields = [hyp.frontier_field.field for hyp in hypotheses]
frontier_field_embeds = self.field_embed(Variable(self.new_long_tensor([
self.grammar.field2id[field] for field in frontier_fields])))
inputs.append(frontier_field_embeds)
if args.no_parent_field_type_embed is False:
# frontier field type
frontier_field_types = [hyp.frontier_field.type for hyp in hypotheses]
frontier_field_type_embeds = self.type_embed(Variable(self.new_long_tensor([
self.grammar.type2id[type] for type in frontier_field_types])))
inputs.append(frontier_field_type_embeds)
# parent states
if args.no_parent_state is False:
p_ts = [hyp.frontier_node.created_time for hyp in hypotheses]
parent_states = torch.stack([hyp_states[hyp_id][p_t][0] for hyp_id, p_t in enumerate(p_ts)])
parent_cells = torch.stack([hyp_states[hyp_id][p_t][1] for hyp_id, p_t in enumerate(p_ts)])
if args.lstm == 'parent_feed':
h_tm1 = (h_tm1[0], h_tm1[1], parent_states, parent_cells)
else:
inputs.append(parent_states)
x = torch.cat(inputs, dim=-1)
(h_t, cell_t), att_t = self.step(x, h_tm1, exp_src_encodings,
exp_src_encodings_att_linear,
src_token_mask=None)
# Variable(batch_size, grammar_size)
# apply_rule_log_prob = torch.log(F.softmax(self.production_readout(att_t), dim=-1))
apply_rule_log_prob = F.log_softmax(self.production_readout(att_t), dim=-1)
# Variable(batch_size, primitive_vocab_size)
gen_from_vocab_prob = F.softmax(self.tgt_token_readout(att_t), dim=-1)
if args.no_copy:
primitive_prob = gen_from_vocab_prob
else:
# Variable(batch_size, src_sent_len)
primitive_copy_prob = self.src_pointer_net(src_encodings, None, att_t.unsqueeze(0)).squeeze(0)
# Variable(batch_size, 2)
primitive_predictor_prob = F.softmax(self.primitive_predictor(att_t), dim=-1)
# Variable(batch_size, primitive_vocab_size)
primitive_prob = primitive_predictor_prob[:, 0].unsqueeze(1) * gen_from_vocab_prob
# if src_unk_pos_list:
# primitive_prob[:, primitive_vocab.unk_id] = 1.e-10
gentoken_prev_hyp_ids = []
gentoken_new_hyp_unks = []
applyrule_new_hyp_scores = []
applyrule_new_hyp_prod_ids = []
applyrule_prev_hyp_ids = []
for hyp_id, hyp in enumerate(hypotheses):
# generate new continuations
action_types = self.transition_system.get_valid_continuation_types(hyp)
for action_type in action_types:
if action_type == ApplyRuleAction:
productions = self.transition_system.get_valid_continuating_productions(hyp)
for production in productions:
prod_id = self.grammar.prod2id[production]
prod_score = apply_rule_log_prob[hyp_id, prod_id].data.item()
new_hyp_score = hyp.score + prod_score
applyrule_new_hyp_scores.append(new_hyp_score)
applyrule_new_hyp_prod_ids.append(prod_id)
applyrule_prev_hyp_ids.append(hyp_id)
elif action_type == ReduceAction:
action_score = apply_rule_log_prob[hyp_id, len(self.grammar)].data.item()
new_hyp_score = hyp.score + action_score
applyrule_new_hyp_scores.append(new_hyp_score)
applyrule_new_hyp_prod_ids.append(len(self.grammar))
applyrule_prev_hyp_ids.append(hyp_id)
else:
# GenToken action
gentoken_prev_hyp_ids.append(hyp_id)
hyp_copy_info = dict() # of (token_pos, copy_prob)
hyp_unk_copy_info = []
if args.no_copy is False:
for token, token_pos_list in aggregated_primitive_tokens.items():
sum_copy_prob = torch.gather(primitive_copy_prob[hyp_id], 0, Variable(T.LongTensor(token_pos_list))).sum()
gated_copy_prob = primitive_predictor_prob[hyp_id, 1] * sum_copy_prob
if token in primitive_vocab:
token_id = primitive_vocab[token]
primitive_prob[hyp_id, token_id] = primitive_prob[hyp_id, token_id] + gated_copy_prob
hyp_copy_info[token] = (token_pos_list, gated_copy_prob.data.item())
else:
hyp_unk_copy_info.append({'token': token, 'token_pos_list': token_pos_list,
| |
<gh_stars>0
from time import time
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.http import Http404, HttpResponse, JsonResponse
from django.http.request import QueryDict
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.http import is_safe_url, urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.vary import vary_on_headers
from django.views.generic import View
from wagtail.utils.pagination import paginate
from wagtail.admin import messages, signals
from wagtail.admin.forms import CopyForm, SearchForm
from wagtail.admin.utils import (
send_notification, user_has_any_page_permission, user_passes_test)
from wagtail.core import hooks
from wagtail.core.models import Page, PageRevision, UserPagePermissionsProxy
def get_valid_next_url_from_request(request):
next_url = request.POST.get('next') or request.GET.get('next')
if not next_url or not is_safe_url(url=next_url, host=request.get_host()):
return ''
return next_url
@user_passes_test(user_has_any_page_permission)
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id).specific
else:
parent_page = Page.get_first_root_node().specific
pages = parent_page.get_children().prefetch_related('content_type', 'sites_rooted_here')
# Get page ordering
ordering = request.GET.get('ordering', '-latest_revision_created_at')
if ordering not in [
'title',
'-title',
'content_type',
'-content_type',
'live', '-live',
'latest_revision_created_at',
'-latest_revision_created_at',
'ord'
]:
ordering = '-latest_revision_created_at'
if ordering == 'ord':
# preserve the native ordering from get_children()
pass
elif ordering == 'latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the top of the list.
# Do this by annotating with Count('latest_revision_created_at'),
# which returns 0 for these
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('null_position', 'latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the end of the list.
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('-null_position', '-latest_revision_created_at')
else:
pages = pages.order_by(ordering)
# Don't paginate if sorting by page order - all pages must be shown to
# allow drag-and-drop reordering
do_paginate = ordering != 'ord'
if do_paginate:
# Retrieve pages in their most specific form.
# Only do this for paginated listings, as this could potentially be a
# very expensive operation when performed on a large queryset.
pages = pages.specific()
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_explorer_page_queryset'):
pages = hook(parent_page, pages, request)
# Pagination
if do_paginate:
paginator, pages = paginate(request, pages, per_page=50)
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page.specific,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'pages': pages,
'do_paginate': do_paginate,
})
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return render(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'next': get_valid_next_url_from_request(request),
})
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all()
paginator, pages = paginate(request, pages, per_page=10)
return render(request, 'wagtailadmin/pages/content_type_use.html', {
'pages': pages,
'app_name': content_type_app_name,
'content_type': content_type,
'page_class': page_class,
})
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
page_class = content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if page_class not in parent_page.creatable_subpage_models():
raise PermissionDenied
if not page_class.can_create_at(parent_page):
raise PermissionDenied
for fn in hooks.get_hooks('before_create_page'):
result = fn(request, parent_page, page_class)
if hasattr(result, 'status_code'):
return result
page = page_class(owner=request.user)
edit_handler = page_class.get_edit_handler()
form_class = edit_handler.get_form_class()
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent_page)
if form.is_valid():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit'))
if not is_publishing:
page.live = False
# Save page
parent_page.add_child(instance=page)
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')))
messages.success(request, _("Page '{0}' created and published.").format(page.get_admin_display_title()), buttons=buttons)
elif is_submitting:
messages.success(
request,
_("Page '{0}' created and submitted for moderation.").format(page.get_admin_display_title()),
buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page.id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
]
)
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else:
messages.success(request, _("Page '{0}' created.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
messages.validation_error(
request, _("The page could not be created due to validation errors"), form
)
edit_handler = edit_handler.bind_to_instance(instance=page,
form=form)
has_unsaved_changes = True
else:
signals.init_new_page.send(sender=create, page=page, parent=parent_page)
form = form_class(instance=page, parent_page=parent_page)
edit_handler = edit_handler.bind_to_instance(instance=page, form=form)
has_unsaved_changes = False
return render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
})
def edit(request, page_id):
latest_revision = get_object_or_404(Page, id=page_id).get_latest_revision()
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
parent = page.get_parent()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
for fn in hooks.get_hooks('before_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
edit_handler = page_class.get_edit_handler()
form_class = edit_handler.get_form_class()
next_url = get_valid_next_url_from_request(request)
errors_debug = None
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent)
if form.is_valid() and not page.locked:
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish()
is_submitting = bool(request.POST.get('action-submit'))
is_reverting = bool(request.POST.get('revision'))
# If a revision ID was passed in the form, get that revision so its
# date can be referenced in notification messages
if is_reverting:
previous_revision = get_object_or_404(page.revisions, id=request.POST.get('revision'))
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# store submitted go_live_at for messaging below
go_live_at = page.go_live_at
# Publish
if is_publishing:
revision.publish()
# Need to reload the page because the URL may have changed, and we
# need the up-to-date URL for the "View Live" button.
page = page.specific_class.objects.get(pk=page.pk)
# Notifications
if is_publishing:
if go_live_at and go_live_at > timezone.now():
# Page has been scheduled for publishing in the future
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been scheduled for publishing."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
if page.live:
message = _(
"Page '{0}' is live and this revision has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
])
else:
# Page is being published now
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been published."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been published."
).format(
page.get_admin_display_title()
)
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit')))
messages.success(request, message, buttons=buttons)
elif is_submitting:
message = _(
"Page '{0}' has been submitted for moderation."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page_id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page_id,)),
_('Edit')
)
])
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else: # Saving
if is_reverting:
message = _(
"Page '{0}' has been replaced with revision from {1}."
).format(
page.get_admin_display_title(),
previous_revision.created_at.strftime("%d %b %Y %H:%M")
)
else:
message = _(
"Page '{0}' has been updated."
).format(
page.get_admin_display_title()
)
messages.success(request, message)
for fn in hooks.get_hooks('after_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here - redirect back to the explorer
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
| |
<reponame>BAMresearch/ctsimu-toolbox
# -*- coding: UTF-8 -*-
"""
This module provides classes for the virtual processing of images.
* `Image` reads, stores, writes and handles image data.
* `ImageFile` gathers information about an image file: file name, data type,
byte order. It is used to instruct the `Image.read()` and `Image.save()`
routines.
* `ImageStack` represents a stack of images in the file system. It can be used
in combination with a processing pipeline (see `ctsimu.processing`).
* `ImageROI` defines a pixel region of interest in an image.
Images
------
To import a single image, you can specify its file name in the constructor
and then use the `Image.read()` function to import it into the internal memory.
It will be stored in `Image.px` as a float64 NumPy array. When writing an
image using `Image.save()`, you have to specify the data type for the new file.
from ctsimu.image import Image
myImage = Image("example.tif")
myImage.read()
# Mirror horizontally:
myImage.flipHorizontal()
myImage.save("example_mirrored.raw", dataType="float32")
RAW File Handling
-----------------
To read raw image data, its dimensions, data type, byte order and header size
must be specified:
from ctsimu.image import Image
myImage = Image("example_mirrored.raw")
myImage.read(width=501,
height=501,
dataType="float32",
byteOrder="little",
fileHeaderSize=0)
# Export as big endian, uint16:
myImage.save("example_converted.raw",
dataType="uint16",
byteOrder="big")
"""
import numpy
import os # File and path handling
import sys # To get native byte order ('little' or 'big' endian?)
import math
import copy
from numpy.random import default_rng
# Scipy:
# 'ndimage' class for image processing
# 'optimize' class for intensity fit
# 'signal' class for drift analysis using FFT Convolution
from scipy import ndimage, optimize, stats, signal, fft
from .helpers import *
from .primitives import * # Vectors and Polygons
from .tiffy import tiff
# pixelHalfDiagonal: longest distance a pixel center can have from a line
# while still touching the line with a corner point:
pixelHalfDiagonal = 1.0/math.sqrt(2.0)
def isTIFF(filename: str) -> bool:
"""Check if file name signifies a TIFF image."""
if filename is not None:
if(filename.casefold().endswith('.tif') or filename.casefold().endswith('.tiff')):
return True
return False
def createImageStack(stack):
""" Return an ImageStack object, if string is given. """
if isinstance(stack, ImageStack):
return stack
elif isinstance(stack, str):
return ImageStack(stack)
elif stack is None:
return None
else:
raise Exception("Not a valid image file stack definition: {}".format(stack))
class ImageFile:
"""Fundamental image file properties used for input and output."""
def __init__(self, filename=None, dataType=None, byteOrder=None, flipByteOrder=False):
self.filename = None
self.dataType = None
self.byteOrder = None # 'little' or 'big' endian
self.flipByteOrder = False
self.setFilename(filename)
self.setDataType(dataType)
self.setByteOrder(byteOrder)
self.setFlipByteOrder(flipByteOrder)
def setFilename(self, filename):
self.filename = filename
def getFilename(self) -> str:
return self.filename
def getFileBasename(self) -> str:
return os.path.basename(self.filename)
def getDataType(self) -> str:
return self.dataType
def getByteOrder(self) -> str:
return self.byteOrder
def doFlipByteOrder(self) -> bool:
return self.flipByteOrder
def setDataType(self, dataType: str):
""" Set data type, either from numpy.dtype object or string. """
if isinstance(dataType, numpy.dtype):
self.dataType = dataType
elif dataType is None:
self.dataType = None
elif isinstance(dataType, str): # from string
dt = numpy.dtype(dataType)
self.setDataType(dt)
else:
raise Exception("{} is generally not a valid data type.".format(dataType))
def setByteOrder(self, byteOrder: str):
""" Set endianness, do sanity check before. """
if byteOrder=='little' or byteOrder=='big' or byteOrder==None:
self.byteOrder = byteOrder
else:
raise Exception("{} is not a valid byte order. Must be 'little' or 'big'.".format(byteOrder))
def setFlipByteOrder(self, flipByteOrder: bool):
self.flipByteOrder = flipByteOrder
def isInt(self) -> bool:
""" True if data type is supported int data type. """
return numpy.issubdtype(self.dataType, numpy.integer)
def isFloat(self) -> bool:
""" True if data type is supported float data type. """
return numpy.issubdtype(self.dataType, numpy.floating)
class ImageROI:
""" Defines a region of interest: upper left and lower right corner. """
def __init__(self, x0, y0, x1, y1):
self.x0 = 0
self.y0 = 0
self.x1 = 0
self.y1 = 0
self.set(x0, y0, x1, y1)
def __str__(self):
return "({x0}, {y0}) -- ({x1}, {y1})".format(x0=self.x0, y0=self.y0, x1=self.x1, y1=self.y1)
def set(self, x0, y0, x1, y1):
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1 = y1, y0
self.x0 = int(x0)
self.y0 = int(y0)
self.x1 = int(x1)
self.y1 = int(y1)
def width(self):
return self.x1 - self.x0
def height(self):
return self.y1 - self.y0
def area(self):
return self.width()*self.height()
def grow(self, amount):
amount = int(amount)
self.set(self.x0-amount, self.y0-amount, self.x1+amount, self.y1+amount)
class Image:
""" Stores pixel data, provides image processing routines. """
def __init__(self, inputFile=None, outputFile=None):
self.inputFile = None # type ImageFile or string
self.outputFile = None # type ImageFile or string
self.px = 0 # 2D numpy array that contains the pixel values.
self.height = 0 # Image height in px.
self.width = 0 # Image width in px.
self.index = 0 # Slice number in a 3D volume.
self.rotation = None
self.flipHorz = False
self.flipVert = False
self.n_accumulations = 0 # Counts number of accumulated pictures for averaging (mean)
self.boundingBoxX0 = 0 # After cropping: bounding box offset relative to original image.
self.boundingBoxY0 = 0
self.resolution = 1 # After binning: new resolution relative to original image.
self.setInputFile(inputFile)
self.setOutputFile(outputFile)
def __add__(self, other):
if self.dimensionsMatch(other):
result = copy.deepcopy(self)
result.px += other.px
return result
else:
raise Exception("Cannot add images of different dimensions.")
def __sub__(self, other):
if self.dimensionsMatch(other):
result = copy.deepcopy(self)
result.px -= other.px
return result
else:
raise Exception("Cannot subtract images of different dimensions.")
def __mul__(self, other):
if self.dimensionsMatch(other):
result = copy.deepcopy(self)
result.px *= other.px
return result
else:
raise Exception("Cannot multiply images of different dimensions.")
def __truediv__(self, other):
if self.dimensionsMatch(other):
result = copy.deepcopy(self)
result.px[numpy.nonzero(other.px)] /= other.px[numpy.nonzero(other.px)]
result.px = numpy.where(other.px==0, 0, result.px)
return result
else:
raise Exception("Cannot divide images of different dimensions.")
def __floordiv__(self, other):
if self.dimensionsMatch(other):
result = copy.deepcopy(self)
result.px[numpy.nonzero(other.px)] //= other.px[numpy.nonzero(other.px)]
result = numpy.where(other.px==0, 0, result.px)
return result
else:
raise Exception("Cannot divide images of different dimensions.")
def __del__(self):
""" Delete pixel map upon object destruction. """
self.px =0
def setInputFile(self, inputFile):
""" Set input file properties from ImageFile object or string. """
if isinstance(inputFile, ImageFile) or (inputFile is None):
self.inputFile = inputFile
elif isinstance(inputFile, str): # string given
self.inputFile = ImageFile(inputFile)
else:
raise Exception("{} is not a valid file identifier.")
def setOutputFile(self, outputFile):
""" Set output file properties from ImageFile object or string. """
if isinstance(outputFile, ImageFile) or (outputFile is None):
self.outputFile = outputFile
elif isinstance(outputFile, str): # string given
self.outputFile = ImageFile(outputFile)
else:
raise Exception("{} is not a valid file identifier.")
def setHeight(self, height):
""" Set image height in px. """
self.height = height
def setWidth(self, width):
""" Set image width in px. """
self.width = width
def setIndex(self, index):
""" Set image index position in 3D stack (in px). """
self.index = index
def shape(self, width, height, index=0, dataType=None, value=0):
""" Re-format image to given dimensions and data type. """
self.setWidth(width)
self.setHeight(height)
self.setIndex(index)
if dataType is None:
dataType = self.getInternalDataType()
self.erase(value=0, dataType=dataType)
def shapeLike(self, otherImg, dataType=None):
self.setWidth(otherImg.getWidth())
self.setHeight(otherImg.getHeight())
self.setIndex(otherImg.getIndex())
if dataType is None:
dataType = otherImg.getInternalDataType()
self.erase(value=0, dataType=dataType)
def erase(self, value=0, dataType=None):
""" Set all pixels to 'value'. """
w = self.getWidth()
h = self.getHeight()
if dataType is None:
dataType = self.getInternalDataType()
self.px = 0
self.px = numpy.full((h, w), fill_value=value, dtype=dataType)
def getPixelMap(self):
return self.px
def setPixelMap(self, px):
self.px = px
def setPixel(self, x, y, value):
self.px[y][x] = value
def getPixel(self, x, y):
return self.px[y][x]
def isSet(self):
""" Check if image has a valid width and height. """
if(self.getHeight() > 0):
if(self.getWidth() > 0):
return True
return False
def contains(self, x, y):
""" Check if (x, y) is within image dimensions. """
if x >= 0:
if y >= 0:
if x < self.getWidth():
if y < self.getHeight():
return True
return False
def getWidth(self):
return self.width
def getHeight(self):
return self.height
def getNPixels(self):
""" Calculate number of pixels in image. """
return (self.getWidth() * self.getHeight())
def getIndex(self):
return self.index
def getBoundingBoxX0(self):
return self.boundingBoxX0
def getBoundingBoxY0(self):
return self.boundingBoxY0
def getResolution(self):
return self.resolution
def getFileByteOrder(self):
return self.fileByteOrder
def max(self, ROI=None):
""" Return maximum intensity in image. """
# Take full image if no ROI is given
if ROI==None:
return numpy.amax(self.px)
return numpy.amax(self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1])
def min(self, ROI=None):
""" Return minimum intensity in image. """
# Take full image if no ROI is given
if ROI==None:
return numpy.amin(self.px)
return numpy.amin(self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1])
def mean(self, ROI=None):
""" Return arithmetic mean of the image grey values. """
# Take | |
<gh_stars>100-1000
from .models import Account
DB_HOST = ["localhost"]
DB_PORT = 27017
def get_db(db_name):
import pymongo
DB_HOST = ["localhost"]
DB_PORT = 27017
db = pymongo.Connection(DB_HOST, DB_PORT)[db_name]
return db
def get_mongo_cursor(db_name, collection_name, max_docs=100):
import pymongo
db = pymongo.Connection(host=DB_HOST,
port=DB_PORT)[db_name]
collection = db[collection_name]
cursor = collection.find()
count = cursor.count
if callable(count):
count = count()
if count >= max_docs:
cursor = cursor[0:max_docs]
return cursor
data = [
['Year', 'Sales', 'Expenses', 'Items Sold', 'Net Profit'],
['2004', 1000, 400, 100, 600],
['2005', 1170, 460, 120, 710],
['2006', 660, 1120, 50, -460],
['2007', 1030, 540, 100, 490],
]
candlestick_data = [['Mon', 20, 28, 38, 45],
['Tue', 31, 38, 55, 66],
['Wed', 50, 55, 77, 80],
['Thu', 77, 77, 66, 50],
['Fri', 68, 66, 22, 15]]
# TODO: Come up with a better example
scatter_multi_series_data = [
['state','country','Rainfall', 'Precipitation'],
['Uttar Pradesh','India',1, 2],
['Bihar','India',2, 3],
['Telangana','India',5, 7],
['Lahore','Pakistan',9,8],
['Hyderabad','Pakistan',8,7],
['Lahore','Pakistan',3,11]
]
# TODO: Come up with a better example
scatter_single_series_data = [
['Leader', 'Rainfall', 'Precipitation'],
['Trump', 1, 2],
['Clinton', 2, 3],
['Trumps', 5, 7],
['George', 6, 9],
['Alex', 7, 4],
['Donald', 7, 8],
]
treemap_data = [
['Location', 'Parent', 'Market trade volume (size)', 'Market increase/decrease (color)'],
['Global', None, 0, 0],
['America', 'Global', 0, 0],
['Europe', 'Global', 0, 0],
['Asia', 'Global', 0, 0],
['Australia', 'Global', 0, 0],
['Africa', 'Global', 0, 0],
['Brazil', 'America', 11, 10],
['USA', 'America', 52, 31],
['Mexico', 'America', 24, 12],
['Canada', 'America', 16, -23],
['France', 'Europe', 42, -11],
['Germany', 'Europe', 31, -2],
['Sweden', 'Europe', 22, -13],
['Italy', 'Europe', 17, 4],
['UK', 'Europe', 21, -5],
['China', 'Asia', 36, 4],
['Japan', 'Asia', 20, -12],
['India', 'Asia', 40, 63],
['Laos', 'Asia', 4, 34],
['Mongolia', 'Asia', 1, -5],
['Israel', 'Asia', 12, 24],
['Iran', 'Asia', 18, 13],
['Pakistan', 'Asia', 11, -52],
['Egypt', 'Africa', 21, 0],
['S. Africa', 'Africa', 30, 43],
['Sudan', 'Africa', 12, 2],
['Congo', 'Africa', 10, 12],
['Zaire', 'Africa', 8, 10]]
# map_data = [
# ['Country', 'Value'],
# ['fo', 0],
# ['um', 1],
# ['us', 2],
# ['jp', 3],
# ['sc', 4],
# ['in', 5],
# ['fr', 6],
# ['fm', 7],
# ['cn', 8],
# ['pt', 9],
# ['sw', 10],
# ['sh', 11],
# ['br', 12],
# ['ki', 13],
# ['ph', 14],
# ['mx', 15],
# ['es', 16],
# ['bu', 17],
# ['mv', 18],
# ['sp', 19],
# ['gb', 20],
# ['gr', 21],
# ['as', 22],
# ['dk', 23],
# ['gl', 24],
# ['gu', 25],
# ['mp', 26],
# ['pr', 27],
# ['vi', 28],
# ['ca', 29],
# ['st', 30],
# ['cv', 31],
# ['dm', 32],
# ['nl', 33],
# ['jm', 34],
# ['ws', 35],
# ['om', 36],
# ['vc', 37],
# ['tr', 38],
# ['bd', 39],
# ['lc', 40],
# ['nr', 41],
# ['no', 42],
# ['kn', 43],
# ['bh', 44],
# ['to', 45],
# ['fi', 46],
# ['id', 47],
# ['mu', 48],
# ['se', 49],
# ['tt', 50],
# ['my', 51],
# ['pa', 52],
# ['pw', 53],
# ['tv', 54],
# ['mh', 55],
# ['cl', 56],
# ['th', 57],
# ['gd', 58],
# ['ee', 59],
# ['ad', 60],
# ['tw', 61],
# ['bb', 62],
# ['it', 63],
# ['mt', 64],
# ['vu', 65],
# ['sg', 66],
# ['cy', 67],
# ['lk', 68],
# ['km', 69],
# ['fj', 70],
# ['ru', 71],
# ['va', 72],
# ['sm', 73],
# ['kz', 74],
# ['az', 75],
# ['tj', 76],
# ['ls', 77],
# ['uz', 78],
# ['ma', 79],
# ['co', 80],
# ['tl', 81],
# ['tz', 82],
# ['ar', 83],
# ['sa', 84],
# ['pk', 85],
# ['ye', 86],
# ['ae', 87],
# ['ke', 88],
# ['pe', 89],
# ['do', 90],
# ['ht', 91],
# ['pg', 92],
# ['ao', 93],
# ['kh', 94],
# ['vn', 95],
# ['mz', 96],
# ['cr', 97],
# ['bj', 98],
# ['ng', 99],
# ['ir', 100],
# ['sv', 101],
# ['sl', 102],
# ['gw', 103],
# ['hr', 104],
# ['bz', 105],
# ['za', 106],
# ['cf', 107],
# ['sd', 108],
# ['cd', 109],
# ['kw', 110],
# ['de', 111],
# ['be', 112],
# ['ie', 113],
# ['kp', 114],
# ['kr', 115],
# ['gy', 116],
# ['hn', 117],
# ['mm', 118],
# ['ga', 119],
# ['gq', 120],
# ['ni', 121],
# ['lv', 122],
# ['ug', 123],
# ['mw', 124],
# ['am', 125],
# ['sx', 126],
# ['tm', 127],
# ['zm', 128],
# ['nc', 129],
# ['mr', 130],
# ['dz', 131],
# ['lt', 132],
# ['et', 133],
# ['er', 134],
# ['gh', 135],
# ['si', 136],
# ['gt', 137],
# ['ba', 138],
# ['jo', 139],
# ['sy', 140],
# ['mc', 141],
# ['al', 142],
# ['uy', 143],
# ['cnm', 144],
# ['mn', 145],
# ['rw', 146],
# ['so', 147],
# ['bo', 148],
# ['cm', 149],
# ['cg', 150],
# ['eh', 151],
# ['rs', 152],
# ['me', 153],
# ['tg', 154],
# ['la', 155],
# ['af', 156],
# ['ua', 157],
# ['sk', 158],
# ['jk', 159],
# ['bg', 160],
# ['qa', 161],
# ['li', 162],
# ['at', 163],
# ['sz', 164],
# ['hu', 165],
# ['ro', 166],
# ['ne', 167],
# ['lu', 168],
# ['ad', 169],
# ['ci', 170],
# ['lr', 171],
# ['bn', 172],
# ['iq', 173],
# ['ge', 174],
# ['gm', 175],
# ['ch', 176],
# ['td', 177],
# ['kv', 178],
# ['lb', 179],
# ['dj', 180],
# ['bi', 181],
# ['sr', 182],
# ['il', 183],
# ['ml', 184],
# ['sn', 185],
# ['gn', 186],
# ['zw', 187],
# ['pl', 188],
# ['mk', 189],
# ['py', 190],
# ['by', 191],
# ['ca', 192],
# ['bf', 193],
# ['na', 194],
# ['ly', 195],
# ['tn', 196],
# ['bt', 197],
# ['md', 198],
# ['ss', 199],
# ['bw', 200],
# ['bs', 201],
# ['nz', 202],
# ['cu', 203],
# ['ec', 204],
# ['au', 205],
# ['ve', 206],
# ['sb', 207],
# ['mg', 208],
# ['is', 209],
# ['eg', 210],
# ['kg', 211],
# ['np', 212]
# ]
map_data = [
['Country', 'Value'],
['fo', 0],
['um', 1],
['us', 2],
['jp', 3],
['sc', 4],
['in', 5],
['fr', 6],
['fm', 7],
['cn', 8],
['pt', 9],
['sw', 10],
['sh', 11],
['br', 12],
['ki', 13],
['ph', 14],
['mx', 15],
['es', 16],
['bu', 17],
['mv', 18],
['sp', 19],
['gb', 20],
['gr', 21],
['as', 22],
['dk', 23],
['gl', 24],
['gu', 25],
['mp', 26],
['pr', 27],
['vi', 28],
['ca', 29],
['st', 30],
['cv', 31],
['dm', 32],
['nl', 33],
['jm', 34],
['ws', 35],
['om', 36],
['vc', 37],
['tr', 38],
['bd', 39],
['lc', 40],
['nr', 41],
['no', 42],
['kn', 43],
['bh', 44],
['to', 45],
['fi', 46],
['id', 47],
['mu', 48],
['se', 49],
['tt', 50],
['my', 51],
['pa', 52],
['pw', 53],
['tv', 54],
['mh', 55],
['cl', 56],
['th', 57],
['gd', 58],
['ee', 59],
['ad', 60],
['tw', 61],
['bb', 62],
['it', 63],
['mt', 64],
['vu', 65],
['sg', 66],
['cy', 67],
['lk', 68],
['km', 69],
['fj', 70],
['ru', 71],
['va', 72],
['sm', 73],
['kz', 74],
['az', 75],
['tj', 76],
['ls', 77],
['uz', 78],
['ma', 79],
['co', 80],
['tl', 81],
['tz', 82],
['ar', 83],
['sa', 84],
['pk', 85],
['ye', 86],
['ae', 87],
['ke', 88],
['pe', 89],
['do', 90],
['ht', 91],
['pg', 92],
['ao', 93],
['kh', 94],
['vn', 95],
['mz', 96],
['cr', 97],
['bj', 98],
['ng', 99]
]
map_data_us_multi_series_lat_lon = [
['Latitude', 'Longitude', 'Winner', 'Seats'],
[32.380120, -86.300629, 'Trump', 10],
[58.299740, -134.406794, 'Trump', 10],
[33.448260, -112.075774, 'Trump', 10],
[34.748655, -92.274494, 'Clinton', 20],
[38.579065, -121.491014, 'Clinton', 20],
]
map_data_us_multi_series = [
['State', 'Winner', 'Seats'],
['us-nj', 'Trump', 10],
['us-ri', 'Trump', 10],
['us-ma', 'Trump', 10],
['us-ct', 'Clinton', 20],
['us-md', 'Clinton', 20],
['us-ny', 'Clinton', 20],
['us-de', 'Trump', 20],
['us-fl', 'Trump', 20],
['us-oh', 'Trump', 20],
['us-pa', 'Trump', 20],
['us-li', 'Trump', 20],
['us-ca', 'Trump', 20],
['us-hi', 'Trump', 20],
['us-va', 'Trump', 31],
['us-mi', 'Trump', 31],
['us-in', 'Trump', 31],
['us-nc', 'Trump', 31],
['us-ga', 'Trump', 31],
['us-tn', 'Trump', 31],
['us-nh', 'Trump', 31],
['us-sc', 'Trump', 31],
['us-la', 'Trump', 31],
['us-ky', 'Trump', 31],
['us-wi', 'Trump', 12],
['us-wa', 'Trump', 12],
['us-al', 'Clinton', 12],
['us-mo', 'Clinton', 12],
['us-tx', 'Clinton', 45],
['us-wv', 'Clinton', 45],
]
map_data_us_lat_lon = [
['Latitude', 'Longitude', 'Population'],
[32.380120, -86.300629, 900],
[58.299740, -134.406794, 387],
[33.448260, -112.075774, 313],
]
map_data_india_lat_lon = [
['Latitude', 'Longitude', 'Population'],
[25.4851484, 83.2104426, 900],
[27.7126407, 78.7391187, 387],
[28.2699017, 79.1604971, 313],
]
map_data_us = [
['State', 'Population'],
['us-nj', 438],
['us-ri', 387],
['us-ma', 313],
['us-ct', 271],
['us-md', 209],
['us-ny', 195],
['us-de', 155],
['us-fl', 114],
['us-oh', 107],
['us-pa', 106],
['us-li', 86],
['us-ca', 84],
['us-hi', 73],
['us-va', 69],
['us-mi', 68],
['us-in', 65],
['us-nc', 64],
['us-ga', 55],
['us-tn', 53],
['us-nh', 53],
['us-sc', 51],
['us-la', 40],
['us-ky', 39],
['us-wi', 38],
['us-wa', 34],
['us-al', 34],
['us-mo', 31],
['us-tx', 31],
['us-wv', 29],
['us-vt', 25],
['us-mn', 24],
['us-ms', 23],
['us-ia', 20],
['us-ar', 20],
['us-ok', 19],
['us-az', 17],
['us-co', 16],
['us-me', 16],
['us-or', 14],
['us-ks', 13],
['us-ut', 11],
['us-ne', 9],
['us-nv', 7],
['us-id', 6],
['us-nm', 6],
['us-sd', 4],
['us-nd', 4],
['us-mt', 2],
['us-wy', 2],
['us-ak', 1],
]
map_data_us_point = [
['Lat', 'Lon', 'Name', 'Date'],
[46.8797, -110.3626, 'trump', '25th February'],
[41.4925, -99.9018, 'trump', '26th February'],
[45.4925, -89.9018, 'trump', '27th February'],
[32.1656, -82.9001, 'clinton', '25th February'],
[33.1656, -81.9001, 'clinton', '26th February'],
]
mongo_series_object_1 = [[440, 39],
[488, 29.25],
[536, 28],
[584, 29],
[632, 33.25],
[728, 28.5],
[776, 33.25],
[824, 28.5],
[872, 31],
[920, 30.75],
[968, 26.25]]
mongo_series_object_2 = [[400, 4],
[488, 0],
[536, 20],
[584, 8],
[632, 2],
[680, 36],
[728, 0],
[776, 0],
[824, 0],
[872, 4],
[920, 1],
[968, 0]]
mongo_data = [{'data': mongo_series_object_1, 'label': 'hours'},
{'data': mongo_series_object_2, 'label': 'hours'}]
def create_demo_accounts():
Account.objects.all().delete()
# Create some rows
Account.objects.create(year="2004", sales=1000,
expenses=400, ceo="Welch")
Account.objects.create(year="2005", sales=1170,
expenses=460, ceo="Jobs")
Account.objects.create(year="2006", sales=660,
expenses=1120, ceo="Page")
Account.objects.create(year="2007", sales=1030,
| |
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation, self).__init__()
self.yang_name = "srgb-information"
self.yang_parent_name = "remote-node-protocol-identifier"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("igp-srgb", ("igp_srgb", Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('start', YLeaf(YType.uint32, 'start')),
('size', YLeaf(YType.uint32, 'size')),
])
self.start = None
self.size = None
self.igp_srgb = Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb()
self.igp_srgb.parent = self
self._children_name_map["igp_srgb"] = "igp-srgb"
self._children_yang_names.add("igp-srgb")
self._segment_path = lambda: "srgb-information"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation, ['start', 'size'], name, value)
class IgpSrgb(Entity):
"""
IGP\-specific information
.. attribute:: isis
ISIS information
**type**\: :py:class:`Isis <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis>`
.. attribute:: ospf
OSPF information
**type**\: :py:class:`Ospf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf>`
.. attribute:: bgp
BGP information
**type**\: :py:class:`Bgp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp>`
.. attribute:: igp_id
IGP ID
**type**\: :py:class:`PceIgpInfoId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceIgpInfoId>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb, self).__init__()
self.yang_name = "igp-srgb"
self.yang_parent_name = "srgb-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("isis", ("isis", Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis)), ("ospf", ("ospf", Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf)), ("bgp", ("bgp", Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('igp_id', YLeaf(YType.enumeration, 'igp-id')),
])
self.igp_id = None
self.isis = Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis()
self.isis.parent = self
self._children_name_map["isis"] = "isis"
self._children_yang_names.add("isis")
self.ospf = Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf()
self.ospf.parent = self
self._children_name_map["ospf"] = "ospf"
self._children_yang_names.add("ospf")
self.bgp = Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp()
self.bgp.parent = self
self._children_name_map["bgp"] = "bgp"
self._children_yang_names.add("bgp")
self._segment_path = lambda: "igp-srgb"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb, ['igp_id'], name, value)
class Isis(Entity):
"""
ISIS information
.. attribute:: system_id
ISIS system ID
**type**\: str
.. attribute:: level
ISIS level
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis, self).__init__()
self.yang_name = "isis"
self.yang_parent_name = "igp-srgb"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('system_id', YLeaf(YType.str, 'system-id')),
('level', YLeaf(YType.uint32, 'level')),
])
self.system_id = None
self.level = None
self._segment_path = lambda: "isis"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis, ['system_id', 'level'], name, value)
class Ospf(Entity):
"""
OSPF information
.. attribute:: router_id
OSPF router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: area
OSPF area
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf, self).__init__()
self.yang_name = "ospf"
self.yang_parent_name = "igp-srgb"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('router_id', YLeaf(YType.str, 'router-id')),
('area', YLeaf(YType.uint32, 'area')),
])
self.router_id = None
self.area = None
self._segment_path = lambda: "ospf"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf, ['router_id', 'area'], name, value)
class Bgp(Entity):
"""
BGP information
.. attribute:: router_id
BGP router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: confed_asn
Confederation ASN
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp, self).__init__()
self.yang_name = "bgp"
self.yang_parent_name = "igp-srgb"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('router_id', YLeaf(YType.str, 'router-id')),
('confed_asn', YLeaf(YType.uint32, 'confed-asn')),
])
self.router_id = None
self.confed_asn = None
self._segment_path = lambda: "bgp"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.RemoteNodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp, ['router_id', 'confed_asn'], name, value)
class AdjacencySid(Entity):
"""
Adjacency SIDs
.. attribute:: sid_prefix
Prefix
**type**\: :py:class:`SidPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid.SidPrefix>`
.. attribute:: sid_type
SID Type
**type**\: :py:class:`Sid <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Sid>`
.. attribute:: mpls_label
MPLS Label
**type**\: int
**range:** 0..4294967295
.. attribute:: domain_identifier
Domain identifier
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rflag
R Flag
**type**\: bool
.. attribute:: nflag
N Flag
**type**\: bool
.. attribute:: pflag
P Flag
**type**\: bool
.. attribute:: eflag
E Flag
**type**\: bool
.. attribute:: vflag
V Flag
**type**\: bool
.. attribute:: lflag
L Flag
**type**\: bool
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid, self).__init__()
self.yang_name = "adjacency-sid"
self.yang_parent_name = "ipv6-link"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("sid-prefix", ("sid_prefix", Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid.SidPrefix))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('sid_type', YLeaf(YType.enumeration, 'sid-type')),
('mpls_label', YLeaf(YType.uint32, 'mpls-label')),
('domain_identifier', YLeaf(YType.uint64, 'domain-identifier')),
('rflag', YLeaf(YType.boolean, 'rflag')),
('nflag', YLeaf(YType.boolean, 'nflag')),
('pflag', YLeaf(YType.boolean, 'pflag')),
('eflag', YLeaf(YType.boolean, 'eflag')),
('vflag', YLeaf(YType.boolean, 'vflag')),
('lflag', YLeaf(YType.boolean, 'lflag')),
])
self.sid_type = None
self.mpls_label = None
self.domain_identifier = None
self.rflag = None
self.nflag = None
self.pflag = None
self.eflag = None
self.vflag = None
self.lflag = None
self.sid_prefix = Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid.SidPrefix()
self.sid_prefix.parent = self
self._children_name_map["sid_prefix"] = "sid-prefix"
self._children_yang_names.add("sid-prefix")
self._segment_path = lambda: "adjacency-sid"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid, ['sid_type', 'mpls_label', 'domain_identifier', 'rflag', 'nflag', 'pflag', 'eflag', 'vflag', 'lflag'], name, value)
class SidPrefix(Entity):
"""
Prefix
.. attribute:: af_name
AFName
**type**\: :py:class:`PceAfId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceAfId>`
.. attribute:: ipv4
IPv4 address type
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6 address type
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid.SidPrefix, self).__init__()
self.yang_name = "sid-prefix"
self.yang_parent_name = "adjacency-sid"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('ipv4', YLeaf(YType.str, 'ipv4')),
('ipv6', YLeaf(YType.str, 'ipv6')),
])
self.af_name = None
self.ipv4 = None
self.ipv6 = None
self._segment_path = lambda: "sid-prefix"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologyNodes.TopologyNode.Ipv6Link.AdjacencySid.SidPrefix, ['af_name', 'ipv4', 'ipv6'], name, value)
class PrefixInfos(Entity):
"""
Prefixes database in XTC
.. attribute:: prefix_info
PCE prefix information
**type**\: list of :py:class:`PrefixInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PrefixInfos.PrefixInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.PrefixInfos, self).__init__()
self.yang_name = "prefix-infos"
self.yang_parent_name = "pce"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("prefix-info", ("prefix_info", Pce.PrefixInfos.PrefixInfo))])
self._leafs = OrderedDict()
self.prefix_info = YList(self)
self._segment_path = lambda: "prefix-infos"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.PrefixInfos, [], name, value)
class PrefixInfo(Entity):
"""
PCE prefix information
.. attribute:: node_identifier (key)
Node ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: node_protocol_identifier
Node protocol identifier
**type**\: :py:class:`NodeProtocolIdentifier <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier>`
.. attribute:: node_identifier_xr
Node identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: address
Prefix address
**type**\: list of :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PrefixInfos.PrefixInfo.Address>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.PrefixInfos.PrefixInfo, self).__init__()
self.yang_name = "prefix-info"
self.yang_parent_name = "prefix-infos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_identifier']
self._child_container_classes = OrderedDict([("node-protocol-identifier", ("node_protocol_identifier", Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier))])
self._child_list_classes = OrderedDict([("address", ("address", Pce.PrefixInfos.PrefixInfo.Address))])
self._leafs = OrderedDict([
('node_identifier', YLeaf(YType.int32, 'node-identifier')),
('node_identifier_xr', YLeaf(YType.uint32, 'node-identifier-xr')),
])
self.node_identifier = None
self.node_identifier_xr = None
self.node_protocol_identifier = Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier()
self.node_protocol_identifier.parent = self
self._children_name_map["node_protocol_identifier"] = "node-protocol-identifier"
self._children_yang_names.add("node-protocol-identifier")
self.address = YList(self)
self._segment_path = lambda: "prefix-info" + "[node-identifier='" + str(self.node_identifier) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/prefix-infos/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.PrefixInfos.PrefixInfo, ['node_identifier', 'node_identifier_xr'], name, value)
class NodeProtocolIdentifier(Entity):
"""
Node protocol identifier
.. attribute:: node_name
Node Name
**type**\: str
.. attribute:: ipv4_bgp_router_id_set
True if IPv4 BGP router ID is set
**type**\: bool
.. attribute:: ipv4_bgp_router_id
IPv4 TE router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv4te_router_id_set
True if IPv4 TE router ID is set
**type**\: bool
.. attribute:: ipv4te_router_id
IPv4 BGP router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: igp_information
IGP information
**type**\: list of :py:class:`IgpInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier.IgpInformation>`
.. attribute:: srgb_information
SRGB information
**type**\: list of :py:class:`SrgbInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier.SrgbInformation>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier, self).__init__()
self.yang_name = "node-protocol-identifier"
self.yang_parent_name = "prefix-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("igp-information", ("igp_information", Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier.IgpInformation)), ("srgb-information", ("srgb_information", Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier.SrgbInformation))])
self._leafs = OrderedDict([
('node_name', YLeaf(YType.str, 'node-name')),
('ipv4_bgp_router_id_set', YLeaf(YType.boolean, 'ipv4-bgp-router-id-set')),
('ipv4_bgp_router_id', YLeaf(YType.str, 'ipv4-bgp-router-id')),
('ipv4te_router_id_set', YLeaf(YType.boolean, 'ipv4te-router-id-set')),
('ipv4te_router_id', YLeaf(YType.str, 'ipv4te-router-id')),
])
self.node_name = None
self.ipv4_bgp_router_id_set = None
self.ipv4_bgp_router_id = None
self.ipv4te_router_id_set = None
self.ipv4te_router_id = None
self.igp_information = YList(self)
self.srgb_information = YList(self)
self._segment_path = lambda: "node-protocol-identifier"
def __setattr__(self, name, value):
self._perform_setattr(Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier, ['node_name', 'ipv4_bgp_router_id_set', 'ipv4_bgp_router_id', 'ipv4te_router_id_set', 'ipv4te_router_id'], name, value)
class IgpInformation(Entity):
"""
IGP information
.. attribute:: igp
IGP\-specific information
**type**\: :py:class:`Igp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PrefixInfos.PrefixInfo.NodeProtocolIdentifier.IgpInformation.Igp>`
.. attribute:: domain_identifier
Domain identifier
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: autonomous_system_number
Autonomous System Number
**type**\: | |
in e.es))
def visit_ETupleGet(self, e):
return "({}).{}".format(self.visit(e.e), e.index)
def visit_ELet(self, e):
return "{} {} = {} in {}".format(self.format_keyword("let"), e.body_function.arg.id, self.visit(e.e), self.visit(e.body_function.body))
def visit_CPull(self, c):
return "{} {lt}- {}".format(c.id, self.visit(c.e), lt=self.format_lt())
def visit_CCond(self, c):
return self.visit(c.e)
def visit_ADT(self, e, *args, **kwargs):
return "{}({})".format(type(e).__name__, ", ".join(self.visit(x) for x in e.children()))
def visit_object(self, e, *args, **kwargs):
print("Warning: implement prettyprinting for {}".format(type(e).__name__), file=sys.stderr)
return repr(e)
def visit_SNoOp(self, s, indent=""):
return "{}{};".format(indent, self.format_keyword("pass"))
def visit_SCall(self, s, indent=""):
return "{}{}.{}({});".format(indent, self.visit(s.target), s.func, ", ".join(self.visit(arg) for arg in s.args))
def visit_SAssign(self, s, indent=""):
return "{}{} = {};".format(indent, self.visit(s.lhs), self.visit(s.rhs))
def visit_SDecl(self, s, indent=""):
return "{}{} {} = {};".format(indent, self.format_keyword("let"), self.visit(s.var), self.visit(s.val))
def visit_SSeq(self, s, indent=""):
return "{}\n{}".format(self.visit(s.s1, indent), self.visit(s.s2, indent))
def visit_SMapUpdate(self, s, indent=""):
return "{indent}{} {} {} {}:\n{}".format(
self.format_keyword("with"),
self.visit(target_syntax.EMapGet(s.map, s.key)),
self.format_keyword("as"),
s.val_var.id,
self.visit(s.change, indent + " "),
indent=indent)
def visit_SMapPut(self, s, indent=""):
return "{indent}{} = {};".format(
self.visit(target_syntax.EMapGet(s.map, s.key)),
self.visit(s.value),
indent=indent)
def visit_SMapDel(self, s, indent=""):
return "{indent}{} {};".format(
self.format_keyword("del"),
self.visit(target_syntax.EMapGet(s.map, s.key)),
indent=indent)
def visit_SForEach(self, s, indent=""):
return "{}{For} {} {In} {}:\n{}".format(
indent,
s.loop_var.id,
self.visit(s.iter),
self.visit(s.body, indent + " "),
For=self.format_keyword("for"),
In=self.format_keyword("in"))
def visit_SIf(self, s, indent=""):
if isinstance(s.else_branch, syntax.SNoOp):
return "{indent}{If} {} {{\n{}\n{indent}}}".format(self.visit(s.cond), self.visit(s.then_branch, indent + " "), indent=indent, If=self.format_keyword("if"))
return "{indent}{If} {} {{\n{}\n{indent}}} {Else} {{\n{}\n{indent}}}".format(
self.visit(s.cond),
self.visit(s.then_branch, indent + " "),
self.visit(s.else_branch, indent + " "),
indent=indent,
If=self.format_keyword("if"),
Else=self.format_keyword("else"))
_PRETTYPRINTER = PrettyPrinter()
def pprint(ast, format="plain"):
_PRETTYPRINTER.format = format
return _PRETTYPRINTER.visit(ast)
def pprint_unpacked(e, out=None):
if out is None:
from io import StringIO
with StringIO() as f:
pprint_unpacked(e, out=f)
return f.value()
out = out.write
rep, ret = unpack_representation(e)
for v, e in rep:
out("{} = {}\n".format(pprint(v), pprint(e)))
out("return {}\n".format(pprint(ret)))
def free_funcs(e : syntax.Exp) -> { str : syntax.TFunc }:
res = collections.OrderedDict()
for x in all_exps(e):
if isinstance(x, syntax.ECall):
t = syntax.TFunc(tuple(arg.type for arg in x.args), x.type)
if x.func in res:
assert res[x.func] == t
else:
res[x.func] = t
return res
def free_vars(exp, counts=False):
"""Find all free variables in an AST.
This function can be used on expressions, statements, and methods.
If counts=False (the default), then this function returns an OrderedSet of
EVar objects in a deterministic order.
If counts=True, then this function returns an OrderedDict in a
deterministic order mapping each EVar to the number of times it occurs in
the AST.
"""
res = collections.OrderedDict()
bound = collections.defaultdict(int)
scopes = [[]]
def push_scope():
scopes.append([])
def bind(x):
bound[x] += 1
scopes[-1].append(x)
class Bind(object):
def __init__(self, var):
self.var = var
def exec(self):
bind(self.var)
class PopScope():
def exec(self):
scope = scopes.pop()
for v in scope:
bound[v] -= 1
class PushScope():
def exec(self):
push_scope()
# Find free variables using a work stack (to avoid running out of stack
# frames on large expressions). The work stack contains AST objects whose
# free variables are yet to be added to `res`. Additionally, it contains
# Bind, PushScope, and PopScope objects indicating when scopes start and
# end and where bound variable are introduced.
stk = [exp]
while stk:
x = stk.pop()
if isinstance(x, PushScope) or isinstance(x, PopScope) or isinstance(x, Bind):
x.exec()
elif isinstance(x, syntax.EVar):
if not bound[x]:
res[x] = res.get(x, 0) + 1
elif isinstance(x, target_syntax.EStateVar):
subres = free_vars(x.e, counts=True)
for k, v in subres.items():
res[k] = res.get(k, 0) + v
elif isinstance(x, target_syntax.ELambda):
push_scope()
bind(x.arg)
stk.append(PopScope())
stk.append(x.body)
elif isinstance(x, syntax.EListComprehension):
raise NotImplementedError()
elif isinstance(x, syntax.Method):
push_scope()
args = [syntax.EVar(a).with_type(t) for (a, t) in x.args]
for a in args:
bind(a)
stk.append(PopScope())
if isinstance(x, syntax.Query):
stk.extend(reversed(x.assumptions))
stk.append(x.ret)
elif isinstance(x, syntax.Op):
stk.extend(reversed(x.assumptions))
stk.append(x.body)
else:
raise NotImplementedError()
elif isinstance(x, syntax.SDecl):
v = x.var
if hasattr(x.val, "type"):
v = v.with_type(x.val.type)
stk.append(Bind(v))
stk.append(x.val)
elif isinstance(x, syntax.SIf):
for branch in (x.then_branch, x.else_branch):
stk.append(PopScope())
stk.append(branch)
stk.append(PushScope())
stk.append(x.cond)
elif isinstance(x, syntax.SForEach):
stk.append(PopScope())
stk.append(x.body)
stk.append(Bind(x.loop_var))
stk.append(PushScope())
stk.append(x.iter)
elif isinstance(x, target_syntax.SWhile):
stk.append(PopScope())
stk.append(x.body)
stk.append(PushScope())
stk.append(x.e)
elif isinstance(x, target_syntax.SEscapableBlock):
push_scope()
stk.append(PopScope())
stk.append(x.body)
elif isinstance(x, target_syntax.SMapUpdate):
stk.append(PopScope())
stk.append(x.change)
stk.append(Bind(x.val_var))
stk.append(PushScope())
stk.append(x.key)
stk.append(x.map)
elif isinstance(x, common.ADT):
stk.extend(reversed(x.children()))
elif isinstance(x, list) or isinstance(x, tuple):
stk.extend(reversed(x))
elif isinstance(x, (str, int, float, Fraction)):
continue
else:
raise NotImplementedError(repr(x))
if not counts:
res = common.OrderedSet(res.keys())
return res
def free_vars_and_funcs(e : syntax.Exp):
"""Iterate over the names of all free variables and functions in `e`."""
for v in free_vars(e):
yield v.id
for f in free_funcs(e):
yield f
def all_exps(x):
q = [x]
while q:
e = q.pop()
if isinstance(e, tuple) or isinstance(e, list):
q.extend(e)
continue
if isinstance(e, syntax.Exp):
yield e
if isinstance(e, common.ADT):
q.extend(e.children())
Unknown = collections.namedtuple("Unknown", [])
ElemOf = collections.namedtuple("ElemOf", ["bag"])
Exactly = collections.namedtuple("Exactly", ["e"])
Context = collections.namedtuple("Context", [
"toplevel",
"e",
"facts",
"mutations",
"replace_e_with",
"bound_vars",
"var_sources",
"pool"])
class FragmentEnumerator(common.Visitor):
# This visitor's methods use a weird pattern:
# yield (lambda r: ...)(r)
# This is because lambdas are capture-by-reference in Python! Since r is
# overwritten at each loop iteration, that's a problem. Defining a fresh
# function and immediately calling it is a simple way to force
# capture-by-value for r instead.
def __init__(self, toplevel, pool=pools.RUNTIME_POOL):
self.toplevel = toplevel
self.bound_vars = []
self.assumptions = []
self.pool_stack = [pool]
self.mutations = []
def currently_bound(self) -> {syntax.EVar}:
return common.OrderedSet(v for v, src in self.bound_vars)
def current_assumptions(self) -> [syntax.Exp]:
return list(self.assumptions)
@contextmanager
def intro_vars(self, vs):
vs = common.make_random_access(vs)
for v, src in vs:
assert isinstance(v, syntax.EVar)
assert isinstance(src, ElemOf) or isinstance(src, Exactly) or isinstance(src, Unknown)
self.bound_vars.extend(vs)
with common.save_property(self, "assumptions"):
for v, src in vs:
self.assumptions = [a for a in self.assumptions if v not in free_vars(a)]
for v, src in vs:
if isinstance(src, ElemOf):
if v not in free_vars(src.bag):
self.assumptions.append(target_syntax.EDeepIn(v, src.bag))
elif isinstance(src, Exactly):
if v not in free_vars(src.e):
self.assumptions.append(syntax.EBinOp(v, "===", src.e).with_type(BOOL))
yield
for i in range(len(vs)):
self.bound_vars.pop()
@contextmanager
def clear_bound(self):
old_bound = self.bound_vars
self.bound_vars = []
yield
self.bound_vars = old_bound
@contextmanager
@typechecked
def push_assumptions(self, new_assumptions : [syntax.Exp] = []):
with common.save_property(self, "assumptions"):
self.assumptions = self.assumptions + new_assumptions
yield
@contextmanager
@typechecked
def push_block(self):
with common.save_property(self, "mutations"):
self.mutations = list(self.mutations)
yield
def make_ctx(self, e):
return Context(
toplevel=self.toplevel,
e=e,
facts=self.current_assumptions(),
mutations=syntax.seq(self.mutations),
replace_e_with=common.identity_func,
bound_vars=self.currently_bound(),
var_sources=collections.OrderedDict(self.bound_vars),
pool=self.pool_stack[-1])
def update_repl(self, ctx, new_replace):
return ctx._replace(replace_e_with=new_replace(ctx.replace_e_with))
def visit_assumptions_seq(self, assumptions, i=0):
if i >= len(assumptions):
return
for ctx in self.visit(assumptions[i]):
yield self.update_repl(ctx, lambda r: lambda x: tuple(assumptions[:i]) + (x,) + tuple(assumptions[i:]))
self.assumptions.append(assumptions[i])
yield from self.visit_assumptions_seq(assumptions, i+1)
def recurse_with_assumptions_about_bound_var(self, e : target_syntax.ELambda, src):
yield self.make_ctx(e)
with self.intro_vars([(e.arg, src)]):
for ctx in self.visit(e.body):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.ELambda(e.arg, r(x)))
def visit_ELambda(self, obj):
# The parent should tell us something about where the var comes from
raise NotImplementedError(pprint(self.toplevel))
def visit_EStateVar(self, e):
"""
A very tricky case: the set of bound variables gets cleared for its
children. Consider
Filter {\v -> EStateVar(v)} C
The `v` in the EStateVar is *different* from the `v` bound by the filter
predicate, since this expression is conceptually equivalent to
state s = v
Filter {\v -> s} C
"""
yield self.make_ctx(e)
t = e.type
self.pool_stack.append(pools.STATE_POOL)
with self.clear_bound():
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EStateVar(r(x)).with_type(t))
self.pool_stack.pop()
def visit_EFilter(self, e):
yield self.make_ctx(e)
t = e.type
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EFilter(r(x), e.predicate).with_type(t))
for ctx in self.recurse_with_assumptions_about_bound_var(e.predicate, ElemOf(e.e)):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EFilter(e.e, r(x)).with_type(t))
def visit_EMap(self, e):
yield self.make_ctx(e)
t = e.type
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EMap(r(x), e.transform_function).with_type(t))
for ctx in self.recurse_with_assumptions_about_bound_var(e.transform_function, ElemOf(e.e)):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EMap(e.e, r(x)).with_type(t))
def visit_EFlatMap(self, e):
yield self.make_ctx(e)
t = e.type
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EFlatMap(r(x), e.transform_function).with_type(t))
for ctx in self.recurse_with_assumptions_about_bound_var(e.transform_function, ElemOf(e.e)):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EFlatMap(e.e, r(x)).with_type(t))
def visit_EMakeMap2(self, e):
yield self.make_ctx(e)
t = e.type
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EMakeMap2(r(x), e.value_function).with_type(t))
for ctx in self.recurse_with_assumptions_about_bound_var(e.value_function, ElemOf(e.e)):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EMakeMap2(e.e, r(x)).with_type(t))
def visit_EArgMin(self, e):
yield self.make_ctx(e)
t = e.type
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EArgMin(r(x), e.key_function).with_type(t))
for ctx in self.recurse_with_assumptions_about_bound_var(e.key_function, ElemOf(e.e)):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EArgMin(e.e, r(x)).with_type(t))
def visit_EArgMax(self, e):
yield self.make_ctx(e)
t = e.type
for ctx in self.visit(e.e):
yield self.update_repl(ctx, lambda r: lambda x: target_syntax.EArgMax(r(x), e.key_function).with_type(t))
for ctx in self.recurse_with_assumptions_about_bound_var(e.key_function, | |
<filename>old_projects/eoc/chapter9.py
import scipy
import fractions
from big_ol_pile_of_manim_imports import *
class Chapter9OpeningQuote(OpeningQuote):
CONFIG = {
"quote" : [
"We often hear that mathematics consists mainly of",
"proving theorems.",
"Is a writer's job mainly that of\\\\",
"writing sentences?"
],
"highlighted_quote_terms" : {
"proving theorems." : MAROON_B,
"writing sentences?" : MAROON_B,
},
"author" : "<NAME>",
}
class AverageOfContinuousVariable(GraphScene):
CONFIG = {
"bounds" : [1, 7],
"bound_colors" : [RED, GREEN],
}
def construct(self):
self.setup_axes()
graph = self.get_graph(
lambda x : 0.1*x*(x-3)*(x-6) + 4
)
graph_label = self.get_graph_label(graph, "f(x)")
boundary_lines = self.get_vertical_lines_to_graph(
graph, *self.bounds, num_lines = 2,
line_class = DashedLine
)
for line, color in zip(boundary_lines, self.bound_colors):
line.set_color(color)
v_line = self.get_vertical_line_to_graph(
self.bounds[0], graph, color = YELLOW,
)
question = TextMobject(
"What is the average \\\\ value of $f(x)$?"
)
question.next_to(boundary_lines, UP)
self.play(ShowCreation(graph), Write(graph_label))
self.play(ShowCreation(boundary_lines))
self.play(FadeIn(
question,
run_time = 2,
submobject_mode = "lagged_start",
))
self.play(ShowCreation(v_line))
for bound in reversed(self.bounds):
self.play(self.get_v_line_change_anim(
v_line, graph, bound,
run_time = 3,
))
self.wait()
self.wait()
def get_v_line_change_anim(self, v_line, graph, target_x, **kwargs):
start_x = self.x_axis.point_to_number(v_line.get_bottom())
def update(v_line, alpha):
new_x = interpolate(start_x, target_x, alpha)
v_line.put_start_and_end_on(
self.coords_to_point(new_x, 0),
self.input_to_graph_point(new_x, graph)
)
return v_line
return UpdateFromAlphaFunc(v_line, update, **kwargs)
class ThisVideo(TeacherStudentsScene):
def construct(self):
series = VideoSeries()
series.to_edge(UP)
this_video = series[8]
self.play(FadeIn(series, submobject_mode = "lagged_start"))
self.teacher_says(
"A new view of \\\\ the fundamental theorem",
bubble_kwargs = {"height" : 3},
added_anims = [
this_video.shift, this_video.get_height()*DOWN/2,
this_video.set_color, YELLOW,
]
)
self.change_student_modes(*["pondering"]*3)
self.wait(3)
class AverageOfSineStart(AverageOfContinuousVariable):
CONFIG = {
"y_min" : -2,
"y_max" : 2,
"x_min" : -1,
"x_max" : 2.5*np.pi,
"x_leftmost_tick" : 0,
"x_tick_frequency" : np.pi/4,
"x_axis_width" : 12,
"graph_origin" : 5*LEFT,
"x_label_scale_val" : 0.75,
"func" : np.sin,
"graph_color" : BLUE,
"bounds" : [0, np.pi],
}
def construct(self):
self.setup_axes()
self.add_graph()
self.ask_about_average()
def add_graph(self, run_time = 1):
graph = self.get_graph(self.func, color = self.graph_color)
graph_label = self.get_graph_label(
graph, "\\sin(x)",
direction = UP
)
self.play(
ShowCreation(graph),
Write(graph_label),
run_time = run_time
)
self.graph = graph
self.graph_label = graph_label
def ask_about_average(self):
half_period_graph = self.get_graph_portion_between_bounds()
question = TextMobject("Average height?")
question.to_edge(UP)
arrow = Arrow(question.get_bottom(), half_period_graph.get_top())
midpoint = np.mean(self.bounds)
v_line = self.get_vertical_line_to_graph(
midpoint, self.graph,
line_class = DashedLine,
color = WHITE
)
self.play(FadeIn(half_period_graph))
self.play(
Write(question, run_time = 2),
ShowCreation(arrow)
)
self.play(ShowCreation(v_line))
for bound in self.bounds + [midpoint]:
self.play(self.get_v_line_change_anim(
v_line, self.graph, bound,
run_time = 3
))
#########
def get_graph_portion_between_bounds(self):
self.graph_portion_between_bounds = self.get_graph(
self.func,
x_min = self.bounds[0],
x_max = self.bounds[1],
color = YELLOW
)
return self.graph_portion_between_bounds
def setup_axes(self):
GraphScene.setup_axes(self)
self.add_x_axis_labels()
def add_x_axis_labels(self):
labels_and_x_values = [
("\\pi/2", np.pi/2),
("\\pi", np.pi),
("3\\pi/2", 3*np.pi/2),
("2\\pi", 2*np.pi),
]
self.x_axis_labels = VGroup()
for label, x in labels_and_x_values:
tex_mob = TexMobject(label)
tex_mob.scale(self.x_label_scale_val)
tex_mob.move_to(
self.coords_to_point(x, -3*self.x_axis.tick_size),
)
self.add(tex_mob)
self.x_axis_labels.add(tex_mob)
class LengthOfDayGraph(GraphScene):
CONFIG = {
"x_min" : 0,
"x_max" : 365,
"x_axis_width" : 12,
"x_tick_frequency" : 25,
"x_labeled_nums" : list(range(50, 365, 50)),
"x_axis_label" : "Days since March 21",
"y_min" : 0,
"y_max" : 16,
"y_axis_height" : 6,
"y_tick_frequency" : 1,
"y_labeled_nums" : list(range(2, 15, 2)),
"y_axis_label" : "Hours of daylight",
"graph_origin" : 6*LEFT + 3*DOWN,
"camera_class" : ThreeDCamera,
"camera_config" : {
"shading_factor" : 1,
}
}
def construct(self):
self.setup_axes()
self.add_graph()
self.show_solar_pannel()
self.set_color_summer_months()
self.mention_constants()
def add_graph(self):
x_label = self.x_axis_label_mob
y_label = self.y_axis_label_mob
graph = self.get_graph(
lambda x : 2.7*np.sin((2*np.pi)*x/365 ) + 12.4,
color = GREEN,
)
graph_label = TexMobject("2.7\\sin(2\\pi x/365) + 12.4")
graph_label.to_corner(UP+RIGHT).shift(LEFT)
VGroup(*graph_label[3:6]).set_color(graph.get_color())
graph_label[9].set_color(YELLOW)
self.remove(x_label, y_label)
for label in y_label, x_label:
self.play(FadeIn(
label,
run_time = 2,
submobject_mode = "lagged_start"
))
self.play(
ShowCreation(graph, rate_func = None),
FadeIn(
graph_label,
rate_func = squish_rate_func(smooth, 0.5, 1),
submobject_mode = "lagged_start"
),
run_time = 3,
)
self.wait()
self.graph = graph
self.graph_label = graph_label
def show_solar_pannel(self):
randy = Randolph()
randy.to_edge(DOWN)
panel = ThreeDMobject(*[
Rectangle(
height = 0.7, width = 0.25,
fill_color = DARK_GREY,
fill_opacity = 1,
stroke_width = 1,
stroke_color = GREY,
)
for x in range(6)
])
panel.arrange_submobjects(RIGHT, buff = SMALL_BUFF)
panel.center()
panels = ThreeDMobject(panel, panel.copy(), panel.copy())
panels.arrange_submobjects(DOWN)
panels.rotate(4*np.pi/12, DOWN)
panels.rotate(-np.pi/6, OUT)
side_vect = RIGHT
side_vect = rotate_vector(side_vect, 4*np.pi/12, DOWN)
side_vect = rotate_vector(side_vect, -np.pi/3, OUT)
panels.next_to(randy.get_corner(UP+RIGHT), RIGHT)
self.play(FadeIn(randy))
self.play(
randy.change, "thinking", panels.get_right(),
FadeIn(
panels,
run_time = 2,
submobject_mode = "lagged_start"
)
)
for angle in -np.pi/4, np.pi/4:
self.play(*[
Rotate(
panel, angle,
axis = side_vect,
in_place = True,
run_time = 2,
rate_func = squish_rate_func(smooth, a, a+0.8)
)
for panel, a in zip(panels, np.linspace(0, 0.2, len(panels)))
])
self.play(Blink(randy))
self.play(*list(map(FadeOut, [randy, panels])))
def set_color_summer_months(self):
summer_rect = Rectangle()
summer_rect.set_stroke(width = 0)
summer_rect.set_fill(YELLOW, opacity = 0.25)
summer_rect.replace(Line(
self.graph_origin,
self.coords_to_point(365/2, 15.5)
), stretch = True)
winter_rect = Rectangle()
winter_rect.set_stroke(width = 0)
winter_rect.set_fill(BLUE, opacity = 0.25)
winter_rect.replace(Line(
self.coords_to_point(365/2, 15.5),
self.coords_to_point(365, 0),
), stretch = True)
summer_words, winter_words = [
TextMobject("%s \\\\ months"%s).move_to(rect)
for s, rect in [
("Summer", summer_rect),
("Winter", winter_rect),
]
]
for rect, words in (summer_rect, summer_words), (winter_rect, winter_words):
self.play(
FadeIn(rect),
Write(words, run_time = 2)
)
self.wait()
def mention_constants(self):
#2.7\\sin(2\\pi t/365) + 12.4
constants = VGroup(*[
VGroup(*self.graph_label[i:j])
for i, j in [(0, 3), (7, 9), (11, 14), (16, 20)]
])
self.play(*[
ApplyFunction(
lambda c : c.scale_in_place(0.9).shift(SMALL_BUFF*DOWN).set_color(RED),
constant,
run_time = 3,
rate_func = squish_rate_func(there_and_back, a, a+0.7)
)
for constant, a in zip(
constants,
np.linspace(0, 0.3, len(constants))
)
])
self.wait()
#####
class AskAboutAverageOfContinuousVariables(TeacherStudentsScene):
def construct(self):
self.student_says(
"The average \\dots of a \\\\ continuous thing?",
target_mode = "sassy",
)
self.change_student_modes("confused", "sassy", "confused")
self.play(self.teacher.change_mode, "happy")
self.wait(2)
class AverageOfFiniteSet(Scene):
CONFIG = {
"lengths" : [1, 4, 2, 5]
}
def construct(self):
lengths = self.lengths
lines = VGroup(*[
Line(ORIGIN, length*RIGHT)
for length in lengths
])
colors = Color(BLUE).range_to(RED, len(lengths))
lines.set_color_by_gradient(*colors)
lines.arrange_submobjects(RIGHT)
lines.generate_target()
lines.target.arrange_submobjects(RIGHT, buff = 0)
for mob in lines, lines.target:
mob.shift(UP)
brace = Brace(lines.target, UP)
labels = VGroup(*[
TexMobject(str(d)).next_to(line, UP).set_color(line.get_color())
for d, line in zip(lengths, lines)
])
plusses = [TexMobject("+") for x in range(len(lengths)-1)]
symbols = VGroup(*
plusses + [TexMobject("=")]
)
symbols.set_fill(opacity = 0)
labels.generate_target()
symbols.generate_target()
symbols.target.set_fill(opacity = 1)
sum_eq = VGroup(*it.chain(*list(zip(labels.target, symbols.target))))
sum_eq.arrange_submobjects(RIGHT)
sum_eq.next_to(brace, UP)
sum_mob = TexMobject(str(sum(lengths)))
sum_mob.next_to(sum_eq, RIGHT)
dividing_lines = VGroup(*[
DashedLine(p + MED_SMALL_BUFF*UP, p + MED_LARGE_BUFF*DOWN)
for alpha in np.linspace(0, 1, len(lengths)+1)
for p in [interpolate(
lines.target.get_left(),
lines.target.get_right(),
alpha
)]
])
lower_braces = VGroup(*[
Brace(VGroup(*dividing_lines[i:i+2]), DOWN)
for i in range(len(lengths))
])
averages = VGroup(*[
lb.get_text("$%d/%d$"%(sum(lengths), len(lengths)))
for lb in lower_braces
])
circle = Circle(color = YELLOW)
circle.replace(averages[1], stretch = True)
circle.scale_in_place(1.5)
self.add(lines)
self.play(FadeIn(
labels,
submobject_mode = "lagged_start",
run_time = 3
))
self.wait()
self.play(
GrowFromCenter(brace),
*list(map(MoveToTarget, [lines, labels, symbols])),
run_time = 2
)
self.play(Write(sum_mob))
self.wait()
self.play(ShowCreation(dividing_lines, run_time = 2))
self.play(*it.chain(
list(map(Write, averages)),
list(map(GrowFromCenter, lower_braces))
))
self.play(ShowCreation(circle))
self.wait(2)
class TryToAddInfinitelyManyPoints(AverageOfSineStart):
CONFIG = {
"max_denominator" : 40,
}
def construct(self):
self.add_graph()
self.try_to_add_infinitely_many_values()
self.show_continuum()
self.mention_integral()
def add_graph(self):
self.setup_axes()
AverageOfSineStart.add_graph(self, run_time = 0)
self.add(self.get_graph_portion_between_bounds())
self.graph_label.to_edge(RIGHT)
self.graph_label.shift(DOWN)
def try_to_add_infinitely_many_values(self):
v_lines = VGroup(*[
self.get_vertical_line_to_graph(
numerator*np.pi/denominator, self.graph,
color = YELLOW,
stroke_width = 6./denominator
)
for denominator in range(self.max_denominator)
for numerator in range(1, denominator)
if fractions.gcd(numerator, denominator) == 1
])
ghost_lines = v_lines.copy().set_stroke(GREY)
v_lines.generate_target()
start_lines = VGroup(*v_lines.target[:15])
end_lines = VGroup(*v_lines.target[15:])
plusses = VGroup(*[TexMobject("+") for x in start_lines])
sum_eq = VGroup(*it.chain(*list(zip(start_lines, plusses))))
sum_eq.add(*end_lines)
sum_eq.arrange_submobjects(RIGHT)
sum_eq.next_to(v_lines[0], UP, aligned_edge = LEFT)
sum_eq.to_edge(UP, buff = MED_SMALL_BUFF)
h_line = Line(LEFT, RIGHT)
h_line.set_width(start_lines.get_width())
h_line.set_color(WHITE)
h_line.next_to(sum_eq, DOWN, aligned_edge = LEFT)
infinity = TexMobject("\\infty")
infinity.next_to(h_line, DOWN)
self.play(ShowCreation(
v_lines,
run_time = 3,
))
self.add(ghost_lines, v_lines)
self.wait(2)
self.play(
MoveToTarget(
v_lines,
run_time = 3,
submobject_mode = "lagged_start"
),
Write(plusses)
)
self.play(ShowCreation(h_line))
self.play(Write(infinity))
self.wait()
def show_continuum(self):
arrow = Arrow(ORIGIN, UP+LEFT)
input_range = Line(*[
self.coords_to_point(bound, 0)
for bound in self.bounds
])
VGroup(arrow, input_range).set_color(RED)
self.play(FadeIn(arrow))
self.play(
arrow.next_to, input_range.get_start(),
DOWN+RIGHT, SMALL_BUFF
)
self.play(
arrow.next_to, input_range.copy().get_end(),
DOWN+RIGHT, SMALL_BUFF,
ShowCreation(input_range),
run_time = 3,
)
self.play(
arrow.next_to, input_range.get_start(),
DOWN+RIGHT, SMALL_BUFF,
run_time = 3
)
self.play(FadeOut(arrow))
self.wait()
def mention_integral(self):
randy = Randolph()
randy.to_edge(DOWN)
randy.shift(3*LEFT)
self.play(FadeIn(randy))
self.play(PiCreatureBubbleIntroduction(
randy, "Use an integral!",
bubble_class = ThoughtBubble,
target_mode = "hooray"
))
self.play(Blink(randy))
curr_bubble = randy.bubble
new_bubble = randy.get_bubble("Somehow...")
self.play(
Transform(curr_bubble, new_bubble),
Transform(curr_bubble.content, new_bubble.content),
randy.change_mode, "shruggie",
)
self.play(Blink(randy))
self.wait()
class FiniteSample(TryToAddInfinitelyManyPoints):
CONFIG = {
"dx" : 0.1,
"graph_origin" : 6*LEFT + 0.5*DOWN,
}
def construct(self):
self.add_graph()
self.show_finite_sample()
def show_finite_sample(self):
v_lines = self.get_sample_lines(dx = self.dx)
summed_v_lines = v_lines.copy()
plusses = VGroup(*[
TexMobject("+").scale(0.75)
for l in v_lines
])
numerator = VGroup(*it.chain(*list(zip(summed_v_lines, plusses))))
for group in numerator, plusses:
group.remove(plusses[-1])
numerator.arrange_submobjects(
RIGHT,
buff = SMALL_BUFF,
aligned_edge = DOWN
)
# numerator.set_width(FRAME_X_RADIUS)
numerator.scale(0.5)
numerator.move_to(self.coords_to_point(3*np.pi/2, 0))
numerator.to_edge(UP)
frac_line = | |
self.cached_enemies:
#get old position.
o_pos = None
moved = False
if self.unit_positions.get(unit.tag):
o_pos = self.unit_positions.get(unit.tag)
if o_pos:
if o_pos != unit.position:
#moved.
moved = True
#update the new_moves dict
new_moves.update({unit.tag:moved})
new_positions.update({unit.tag:unit.position})
self.unit_positions = new_positions
self.unit_moves = new_moves
def get_mapvals(self):
#check to see if we can get a cached version.
cache_dict = self._training_data.loadMapVals(self.map_name)
if cache_dict:
print ('map vals loaded from cache')
self.mapDistances = cache_dict
return
print ('running map vals')
#get all teh valid points in the pathing map.
all_points = [
Point2((x, y))
for x in range(self._game_info.pathing_grid.width)
for y in range(self._game_info.pathing_grid.height)
#if self._game_info.pathing_grid[(x, y)] != 0
if not self.inPathingGrid(Point2((x,y)))
]
#print (all_points)
valdict = {}
for pos in all_points:
#find the distance to the nearest non valid point.
distance = self.nearestNonValid(pos)
self._client.debug_text_3d(str(int(distance)), self.turn3d(pos))
posStr = "{}:{}".format(pos.x, pos.y)
valdict.update({posStr:int(distance)})
#print (pos, distance)
#find ramps and intersections and increase the value for pathing through them.
#save it.
self._training_data.saveMapVals(self.map_name, valdict)
self.mapDistances = valdict
def nearestNonValid(self, pos):
#create a grid around the position that is 6 points around it.
grid = self.retreatGrid(pos, size=3)
#filter out the retreat points that we can move too.
grid = {x for x in grid if self.inPathingGrid(x)}
#loop through the grid and find the closest distance.
if grid:
#print ('g', grid, pos)
return pos.position.distance_to_closest(grid)
#print ('no grid')
return 20
##################
#unorganized code#
##################
def _get_next_expansion(self):
locations = []
for possible in self.expansion_locations:
#make sure the location doesn't have a nexus already.
if not self.units(NEXUS).closer_than(12, possible).exists:
distance = sqrt((possible[0] - self.start_location[0])**2 + (possible[1] - self.start_location[1])**2)
locations.append([distance, possible])
return sorted(locations, key=itemgetter(0))[0][1]
def findRangeRetreatTarget(self, unit, enemyThreats, inc_size=1):
#get all possible retreat points around us.
retreatPoints = self.retreatGrid(unit.position, size=inc_size)
if retreatPoints:
#get the center of all the ground units that can attack us.
if enemyThreats:
#enemyThreatsCenter = self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit).center
enemyThreatsCenter = self.center3d(enemyThreats)
#retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(enemyThreatsCenter) - x.distance_to(unit))
retreatPoint = enemyThreatsCenter.position.furthest(retreatPoints)
#debug info below.
if unit.is_selected or _debug_combat:
self._client.debug_line_out(self.unitDebugPos(unit), self.p3AddZ(enemyThreatsCenter), (244, 217, 66))
self._client.debug_line_out(self.unitDebugPos(unit), self.p2AddZ(retreatPoint), (176, 66, 244))
return retreatPoint
def inRange(self, unit):
#find out if any enemies have us in their range.
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.target_in_range(unit, 1)).sorted(lambda x: x.distance_to(unit))
if enemyThreats:
return True, enemyThreats
return False, None
def turn3d(self, p2):
return Point3((p2.position.x, p2.position.y, self.getHeight(p2.position)))
def checkCargo(self, unit, cargo):
cargo_unit = self.units().find_by_tag(cargo)
if cargo_unit:
#if len(cargo_unit.orders) == 0:
#if 'attack' in str(probe.orders).lower():
distance = sqrt((cargo_unit.position[0] - unit.position[0])**2 + (cargo_unit.position[1] - unit.position[1])**2)
print (cargo, unit.position, cargo_unit.position, distance, cargo_unit.orders, len(cargo_unit.orders))
if distance < 6 or len(cargo_unit.orders) == 0:
return True
return False
def findDropTarget(self, unit, enemy, dis1=6, dis2=8):
#dropPoints = self.neighbors8(enemy.position, distance=dis1) | self.neighbors8(enemy.position, distance=dis2)
dropPoints = self.retreatGrid(unit.position, size=3)
if dropPoints:
dropPoints = {x for x in dropPoints if self.inPathingGrid(x)}
if dropPoints:
dropPoint = max(dropPoints, key=lambda x: x.distance_to(unit) - x.distance_to(enemy))
return dropPoint
def inList(self, triedList, tPoint):
nposx = round(tPoint[0], 2)
nposy = round(tPoint[1], 2)
newPos = (nposx, nposy)
if newPos not in triedList:
return True
return False
def center3d(self, units):
pos = Point3((sum([unit.position.x for unit in units]) / units.amount, sum([unit.position.y for unit in units]) / units.amount, sum([unit.position3d.z for unit in units]) / units.amount))
return pos
def unitDebugPos(self, unit):
return Point3((unit.position3d.x, unit.position3d.y, (unit.position3d.z + 1)))
def p3AddZ(self, pos):
return Point3((pos.x, pos.y, (pos.z + 1)))
def getHeight(self, pos):
off = 0
if self.hm_offset:
off = self.hm_offset
x = int(pos.x)
y = int(pos.y)
if x < 1:
x = 1
if y < 1:
y = 1
return self.game_info.terrain_height[x, y] - off
def p2AddZ(self, pos):
return Point3((pos.x, pos.y, self.getHeight(pos)))
def p2RelZ(self, pos, unit):
return Point3((pos.x, pos.y, (unit.position3d.z + 0.5)))
def findAirRetreatTarget(self, unit, inc_size=3, enemy_radius=10):
#get all possible retreat points around us.
retreatPoints = self.retreatGrid(unit.position, size=inc_size)
if retreatPoints:
#get the center of all the ground units that can attack us.
if self.cached_enemies.filter(lambda x: x.can_attack_air).closer_than(enemy_radius, unit):
#enemyThreatsCenter = self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit).center
enemyThreatsCenter = self.center3d(self.cached_enemies.filter(lambda x: x.can_attack_air).closer_than(enemy_radius, unit))
#retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(enemyThreatsCenter) - x.distance_to(unit))
retreatPoint = enemyThreatsCenter.position.furthest(retreatPoints)
#debug info below.
if unit.is_selected or _debug_combat:
self._client.debug_line_out(self.unitDebugPos(unit), self.p3AddZ(enemyThreatsCenter), (244, 217, 66))
self._client.debug_line_out(self.unitDebugPos(unit), self.p2AddZ(retreatPoint), (176, 66, 244))
return retreatPoint
def findRetreatTarget(self, unit, enemy, is_flying=False, dis1=2, dis2=4, inc_size=3):
retreatPoints = self.retreatGrid(unit.position, size=inc_size)
#retreatPoints = self.neighbors8(unit.position, distance=dis1) | self.neighbors8(unit.position, distance=dis2)
if not is_flying:
retreatPoints = {x for x in retreatPoints if self.inPathingGrid(x)}
if retreatPoints:
retreatPoint = enemy.position.furthest(retreatPoints)
#retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(enemy) - x.distance_to(unit))
return retreatPoint
def findFlyingTarget(self, unit, can_target_air=False, max_enemy_distance=5, target_hitpoints=True, target_buildings=False):
enemyThreats = []
if target_buildings:
enemyThreats = self.cached_enemies.flying.closer_than(max_enemy_distance, unit)
else:
if can_target_air:
#look for medivac MEDIVAC .of_type([ADEPTPHASESHIFT])
enemyThreats = self.cached_enemies.of_type([MEDIVAC]).in_attack_range_of(unit) #.closer_than(max_enemy_distance, unit)
if not enemyThreats:
enemyThreats = self.cached_enemies.flying.filter(lambda x: x.can_attack_air).in_attack_range_of(unit)#.closer_than(max_enemy_distance, unit)
else:
enemyThreats = self.cached_enemies.flying.filter(lambda x: x.can_attack_ground).in_attack_range_of(unit)#.closer_than(max_enemy_distance, unit)
if enemyThreats.exists:
if target_hitpoints:
enemyThreats = enemyThreats.sorted(lambda x: x.health + x.shield)
return enemyThreats[0]
else:
enemyThreats = enemyThreats.sorted(lambda x: x.distance_to(unit))
return enemyThreats[0]
def findGroundTarget(self, unit, can_target_air=False, max_enemy_distance=5, target_hitpoints=True, target_buildings=False):
enemyThreats = []
if target_buildings:
#in_attack_range_of
#enemyThreats = self.cached_enemies.not_flying.closer_than(max_enemy_distance, unit)
enemyThreats = self.cached_enemies.not_flying.in_attack_range_of(unit)
else:
if can_target_air:
#enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).not_flying.filter(lambda x: x.can_attack_air).closer_than(max_enemy_distance, unit)
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).not_flying.filter(lambda x: x.can_attack_air).in_attack_range_of(unit)
else:
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).not_flying.filter(lambda x: x.can_attack_ground).in_attack_range_of(unit)
if enemyThreats.exists:
if target_hitpoints:
enemyThreats = enemyThreats.sorted(lambda x: x.health + x.shield)
return enemyThreats[0]
else:
enemyThreats = enemyThreats.sorted(lambda x: x.distance_to(unit))
return enemyThreats[0]
def filterTargets(self, unit, enemyThreats):
pass
def findDetectors(self, unit, detect_range):
enemyThreats = self.cached_enemies.filter(lambda x: x.detect_range > 0).closer_than(detect_range, unit).sorted(lambda x: x.distance_to(unit))
if enemyThreats:
return enemyThreats[0]
def findTargetExp(self, unit, can_target_air=False, max_enemy_distance=5, target_hitpoints=False, target_buildings=False, target_dangerous=True):
#filter down to all enemies in the distance with radius added.
enemyThreats = []
if target_buildings:
enemyThreats = self.cached_enemies.structure.closer_than(20, unit)
else:
if can_target_air:
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.can_attack_air).closer_than(20, unit)
else:
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.can_attack_ground).closer_than(20, unit)
#keep only the threats that are in our range.
#print ('len1', len(enemyThreats))
if enemyThreats.exists:
if target_dangerous:
#target the enemies that we are in range of first, of those in range
if unit.is_flying:
enemyThreats = enemyThreats.sorted(lambda x: x.distance_to(unit) - (x.radius + x.air_range + unit.radius))
else:
enemyThreats = enemyThreats.sorted(lambda x: x.distance_to(unit) - (x.radius + x.ground_range + unit.radius))
elif target_hitpoints:
enemyThreats = enemyThreats.sorted(lambda x: x.health + x.shield)
else:
enemyThreats = enemyThreats.sorted(lambda x: x.distance_to(unit))
ct = 0
enemyInRange = []
for enemy in enemyThreats:
#distance = unit.radius + range + enemy.radius
#print (ct)
ct += 1
attack_range = 0
if unit.can_attack_ground and not enemy.is_flying:
attack_range = unit.ground_range
elif unit.can_attack_air and enemy.is_flying:
attack_range = unit.air_range
enemy_attack_range = 0
if enemy.can_attack_ground and not unit.is_flying:
enemy_attack_range = enemy.ground_range
elif enemy.can_attack_air and unit.is_flying:
enemy_attack_range = enemy.air_range
full_range = enemy.radius + attack_range + unit.radius
enemy_range = enemy.radius + enemy_attack_range + unit.radius
#print ('seeing', full_range, unit.distance_to(enemy), enemy_range)
if unit.distance_to(enemy) < enemy.radius + attack_range + unit.radius:
return enemy
#enemyInRange.append(enemy)
#print ('adding', full_range, unit.distance_to(enemy))
#enemyThreats.remove(enemy)
#print ('len2', len(enemyInRange))
if len(enemyInRange) > 0:
return enemyInRange[0]
def findTarget(self, unit, can_target_air=False, max_enemy_distance=5, target_hitpoints=True, target_buildings=False):
enemyThreats = []
if target_buildings:
enemyThreats = self.cached_enemies.in_attack_range_of(unit)#.closer_than(max_enemy_distance, unit)
else:
if can_target_air:
enemyThreats = self.cached_enemies.of_type([MEDIVAC,CARRIER,BATTLECRUISER]).in_attack_range_of(unit) #.closer_than(max_enemy_distance, unit)
if not enemyThreats:
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.can_attack_air).in_attack_range_of(unit)#.closer_than(max_enemy_distance, unit)
else:
enemyThreats = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.can_attack_ground).in_attack_range_of(unit)#.closer_than(max_enemy_distance, unit)
if enemyThreats.exists:
if target_hitpoints:
enemyThreats = enemyThreats.sorted(lambda x: x.health + x.shield)
return enemyThreats[0]
else:
enemyThreats = enemyThreats.sorted(lambda x: x.distance_to(unit))
return enemyThreats[0]
def regPathingScore(self, pos):
if pos[0] < 0 or pos[1] < 0:
return False
if pos[0] >= self._game_info.pathing_grid.width or pos[1] >= self._game_info.pathing_grid.height:
return False
posStr = "{}:{}".format(str(int(pos.x)), str(int(pos.y)))
if self.mapDistances.get(str(pos)):
return self.mapDistances.get(str(pos))
return 0
def abovePathingScore(self, pos):
if pos[0] < 0 or pos[1] < 0:
return False
if pos[0] >= self._game_info.pathing_grid.width or pos[1] >= self._game_info.pathing_grid.height:
return False
posStr = "{}:{}".format(str(int(pos.x)), str(int(pos.y)))
if self.mapDistances.get(str(pos)) and self.mapDistances.get(str(pos)) >= 3:
return True
return False
#stolen and edited from burny
def inPathingGrid(self, pos):
if pos[0] < 0 or pos[1] < 0:
return False
if pos[0] >= self._game_info.pathing_grid.width or pos[1] >= self._game_info.pathing_grid.height:
return False
# returns True if it is possible for a ground unit to move to pos - doesnt seem to work on ramps or near edges
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self._game_info.pathing_grid[(pos)] != 0
def retreatGrid(self, position, size=2):
#create a grid size by size around the unit for possible retreat points.
#eg: size=2 equals a 5 by 5 grid with position in the center.
p = position
d = ((size * 2) + 1)
rdone = 0 - size
retreatPoints = []
while rdone < d:
cdone = 0 - size
while cdone < d:
if (p.x - rdone) > 0 and (p.y - cdone) > 0:
retreatPoints.append(Point2((p.x -rdone, p.y - cdone)))
cdone += 1
rdone += 1
return retreatPoints
def groundRetreatGrid(self, position, size=2):
#create a grid size by size around the unit for possible retreat points.
#eg: size=2 equals a 5 by 5 grid with position in the center.
#checks for valid spots for ground units.
p = position
d = ((size * 2) + 1)
rdone = 0 - size
retreatPoints = []
while rdone < d:
cdone = 0 - size
while cdone < d:
if (p.x - rdone) > 0 and (p.y - cdone) > 0:
possible = Point2((p.x -rdone, p.y - cdone))
#check to make sure ground unit can move there.
if self.inPathingGrid(possible):
retreatPoints.append(possible)
cdone += 1
rdone += 1
return retreatPoints
def getDefensiveSearchPos(self):
#get the nearest locations.
locations = []
for possible in self.expansion_locations:
distance = sqrt((possible[0] - self.start_location.position[0])**2 + (possible[1] - self.start_location.position[1])**2)
locations.append([distance, possible])
locations = sorted(locations, key=itemgetter(0))
del locations[5:]
return random.choice(locations)[1]
#if distance of that start location is less than 10,then search nearby areas.
startPos = self.start_location
if unit.distance_to(self.start_location) > 10:
return startPos
else:
#search random among the nearest 10 expansion slots to unit.
locations = []
for possible in self.expansion_locations:
distance = sqrt((possible[0] - unit.position[0])**2 + (possible[1] - unit.position[1])**2)
locations.append([distance, possible])
locations = sorted(locations, key=itemgetter(0))
del locations[10:]
return random.choice(locations)[1]
#properties
@property
def main_ramp_bottom_center(self) -> Point2:
pos = Point2((sum([p.x for p in self.main_base_ramp.lower]) / len(self.main_base_ramp.lower), \
sum([p.y for p in self.main_base_ramp.lower]) / len(self.main_base_ramp.lower)))
return pos
@property
def trueGates(self) -> int:
#total = self.units(GATEWAY).amount + self.units(WARPGATE).amount
return self.units(GATEWAY).amount + self.units(WARPGATE).amount
@property
def queuedGates(self) -> bool:
return self.buildingList.gatesQueued
@property
def queuedStarGates(self) -> bool:
return self.buildingList.stargatesQueued
@property
def queuedRobos(self) -> bool:
return self.buildingList.robosQueued
@property
def allQueued(self) -> bool:
return self.buildingList.allQueued
@property
def productionBuildings(self) -> int:
#count all our production facilities.
return self.units.of_type([GATEWAY,WARPGATE,STARGATE,ROBOTICSFACILITY]).amount
###############
#old code area#
###############
def inDangerWorking(self, unit, is_flying=False, friend_range=10, enemy_range=10, min_shield_attack=0):
friendlyClose = self.units().closer_than(friend_range, unit)
enemyThreatsClose = []
closestEnemy = None
if unit.is_flying:
enemyThreatsClose = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.can_attack_air).closer_than(enemy_range, unit)
else:
enemyThreatsClose = self.cached_enemies.exclude_type([ADEPTPHASESHIFT]).filter(lambda x: x.can_attack_ground).closer_than(enemy_range, unit)
if enemyThreatsClose.exists:
closestEnemy = enemyThreatsClose.closest_to(unit)
if unit.shield_percentage < min_shield_attack:
return True, closestEnemy
enemyDPStoGround = 0
enemyDPStoAir = 0
enemyAirHealth = 0
enemyGroundHealth = 0
for enemy in enemyThreatsClose:
if enemy.can_attack_ground or | |
request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__change_artifact_dep_setting_with_http_info(bt_locator, artifact_dep_locator, field_name, **kwargs) # noqa: E501
else:
(data) = self.__change_artifact_dep_setting_with_http_info(bt_locator, artifact_dep_locator, field_name, **kwargs) # noqa: E501
return data
def change_feature_setting(self, bt_locator, feature_id, name, **kwargs): # noqa: E501
"""change_feature_setting # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_feature_setting(bt_locator, feature_id, name, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str feature_id: (required)
:param str name: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__change_feature_setting_with_http_info(bt_locator, feature_id, name, **kwargs) # noqa: E501
else:
(data) = self.__change_feature_setting_with_http_info(bt_locator, feature_id, name, **kwargs) # noqa: E501
return data
def change_requirement_setting(self, bt_locator, agent_requirement_locator, field_name, **kwargs): # noqa: E501
"""change_requirement_setting # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_requirement_setting(bt_locator, agent_requirement_locator, field_name, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str agent_requirement_locator: (required)
:param str field_name: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__change_requirement_setting_with_http_info(bt_locator, agent_requirement_locator, field_name, **kwargs) # noqa: E501
else:
(data) = self.__change_requirement_setting_with_http_info(bt_locator, agent_requirement_locator, field_name, **kwargs) # noqa: E501
return data
def change_step_setting(self, bt_locator, step_id, field_name, **kwargs): # noqa: E501
"""change_step_setting # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_step_setting(bt_locator, step_id, field_name, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str step_id: (required)
:param str field_name: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__change_step_setting_with_http_info(bt_locator, step_id, field_name, **kwargs) # noqa: E501
else:
(data) = self.__change_step_setting_with_http_info(bt_locator, step_id, field_name, **kwargs) # noqa: E501
return data
def change_trigger_setting(self, bt_locator, trigger_locator, field_name, **kwargs): # noqa: E501
"""change_trigger_setting # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_trigger_setting(bt_locator, trigger_locator, field_name, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str trigger_locator: (required)
:param str field_name: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__change_trigger_setting_with_http_info(bt_locator, trigger_locator, field_name, **kwargs) # noqa: E501
else:
(data) = self.__change_trigger_setting_with_http_info(bt_locator, trigger_locator, field_name, **kwargs) # noqa: E501
return data
def delete_agent_requirement(self, bt_locator, agent_requirement_locator, **kwargs): # noqa: E501
"""delete_agent_requirement # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_agent_requirement(bt_locator, agent_requirement_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str agent_requirement_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_agent_requirement_with_http_info(bt_locator, agent_requirement_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_agent_requirement_with_http_info(bt_locator, agent_requirement_locator, **kwargs) # noqa: E501
return data
def delete_all_parameters(self, bt_locator, **kwargs): # noqa: E501
"""delete_all_parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_all_parameters(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_all_parameters_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_all_parameters_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def delete_all_parameters_0(self, bt_locator, **kwargs): # noqa: E501
"""delete_all_parameters_0 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_all_parameters_0(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_all_parameters_0_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_all_parameters_0_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def delete_artifact_dep(self, bt_locator, artifact_dep_locator, **kwargs): # noqa: E501
"""delete_artifact_dep # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_artifact_dep(bt_locator, artifact_dep_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str artifact_dep_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_artifact_dep_with_http_info(bt_locator, artifact_dep_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_artifact_dep_with_http_info(bt_locator, artifact_dep_locator, **kwargs) # noqa: E501
return data
def delete_build_type(self, bt_locator, **kwargs): # noqa: E501
"""delete_build_type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_build_type(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_build_type_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_build_type_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def delete_feature(self, bt_locator, feature_id, **kwargs): # noqa: E501
"""delete_feature # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_feature(bt_locator, feature_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str feature_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_feature_with_http_info(bt_locator, feature_id, **kwargs) # noqa: E501
else:
(data) = self.__delete_feature_with_http_info(bt_locator, feature_id, **kwargs) # noqa: E501
return data
def delete_parameter(self, name, bt_locator, **kwargs): # noqa: E501
"""delete_parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_parameter(name, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str name: (required)
:param str bt_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_parameter_with_http_info(name, bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_parameter_with_http_info(name, bt_locator, **kwargs) # noqa: E501
return data
def delete_parameter_0(self, name, bt_locator, **kwargs): # noqa: E501
"""delete_parameter_0 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_parameter_0(name, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str name: (required)
:param str bt_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_parameter_0_with_http_info(name, bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_parameter_0_with_http_info(name, bt_locator, **kwargs) # noqa: E501
return data
def delete_snapshot_dep(self, bt_locator, snapshot_dep_locator, **kwargs): # noqa: E501
"""delete_snapshot_dep # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_dep(bt_locator, snapshot_dep_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str snapshot_dep_locator: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_snapshot_dep_with_http_info(bt_locator, snapshot_dep_locator, **kwargs) # noqa: E501
else:
(data) = self.__delete_snapshot_dep_with_http_info(bt_locator, snapshot_dep_locator, **kwargs) # noqa: E501
return data
def delete_step(self, bt_locator, step_id, **kwargs): # noqa: E501
"""delete_step # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_step(bt_locator, step_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str step_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_step_with_http_info(bt_locator, step_id, **kwargs) # noqa: E501
else:
(data) = self.__delete_step_with_http_info(bt_locator, step_id, **kwargs) # noqa: E501
return data
def delete_trigger(self, bt_locator, trigger_locator, **kwargs): # noqa: E501
"""delete_trigger # noqa: E501
This method | |
<reponame>AlexaVillaume/irtf_website
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class EquivWidths(models.Model):
name = models.TextField(db_column='Name', unique=True, blank=True, null=True) # Field name made lowercase. This field type is a guess.
nai082 = models.TextField(db_column='naI082', blank=True, null=True) # Field name made lowercase. This field type is a guess.
ca_ii1 = models.TextField(db_column='ca_II1', blank=True, null=True) # Field name made lowercase. This field type is a guess.
ca_ii2 = models.TextField(db_column='ca_II2', blank=True, null=True) # Field name made lowercase. This field type is a guess.
ca_ii3 = models.TextField(db_column='ca_II3', blank=True, null=True) # Field name made lowercase. This field type is a guess.
feh099 = models.TextField(blank=True, null=True) # This field type is a guess.
ki1 = models.TextField(blank=True, null=True) # This field type is a guess.
ki2 = models.TextField(blank=True, null=True) # This field type is a guess.
cai1981 = models.TextField(blank=True, null=True) # This field type is a guess.
cai1982 = models.TextField(blank=True, null=True) # This field type is a guess.
co2301 = models.TextField(blank=True, null=True) # This field type is a guess.
co2302 = models.TextField(blank=True, null=True) # This field type is a guess.
ca4227 = models.TextField(db_column='Ca4227', blank=True, null=True) # Field name made lowercase. This field type is a guess.
g4300 = models.TextField(db_column='G4300', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe4383 = models.TextField(db_column='Fe4383', blank=True, null=True) # Field name made lowercase. This field type is a guess.
ca4455 = models.TextField(db_column='Ca4455', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe4531 = models.TextField(db_column='Fe4531', blank=True, null=True) # Field name made lowercase. This field type is a guess.
c4668 = models.TextField(db_column='C4668', blank=True, null=True) # Field name made lowercase. This field type is a guess.
hbeta = models.TextField(db_column='Hbeta', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe5015 = models.TextField(db_column='Fe5015', blank=True, null=True) # Field name made lowercase. This field type is a guess.
mgb = models.TextField(db_column='Mgb', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe5270 = models.TextField(db_column='Fe5270', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe5335 = models.TextField(db_column='Fe5335', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe5406 = models.TextField(db_column='Fe5406', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe5709 = models.TextField(db_column='Fe5709', blank=True, null=True) # Field name made lowercase. This field type is a guess.
fe5782 = models.TextField(db_column='Fe5782', blank=True, null=True) # Field name made lowercase. This field type is a guess.
nad = models.TextField(db_column='NaD', blank=True, null=True) # Field name made lowercase. This field type is a guess.
hdeltaa = models.TextField(db_column='HdeltaA', blank=True, null=True) # Field name made lowercase. This field type is a guess.
hgammaa = models.TextField(db_column='HgammaA', blank=True, null=True) # Field name made lowercase. This field type is a guess.
hdeltaf = models.TextField(db_column='HdeltaF', blank=True, null=True) # Field name made lowercase. This field type is a guess.
hgammaf = models.TextField(db_column='HgammaF', blank=True, null=True) # Field name made lowercase. This field type is a guess.
mgi8800 = models.TextField(db_column='mgI8800', blank=True, null=True) # Field name made lowercase. This field type is a guess.
nai114 = models.TextField(db_column='naI114', blank=True, null=True) # Field name made lowercase. This field type is a guess.
tio = models.TextField(db_column='TiO', blank=True, null=True) # Field name made lowercase. This field type is a guess.
cn1 = models.TextField(db_column='CN1', blank=True, null=True) # Field name made lowercase. This field type is a guess.
nai220 = models.TextField(db_column='naI220', blank=True, null=True) # Field name made lowercase. This field type is a guess.
class Meta:
managed = False
db_table = 'equiv_widths'
class EwLower(models.Model):
name = models.TextField(db_column='Name', primary_key=True, blank=True, null=False) # Field name made lowercase.
ca4227_l = models.TextField(db_column='Ca4227_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
g4300_l = models.TextField(db_column='G4300_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe4383_l = models.TextField(db_column='Fe4383_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
ca4455_l = models.TextField(db_column='Ca4455_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe4531_l = models.TextField(db_column='Fe4531_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
c4668_l = models.TextField(db_column='C4668_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
hbeta_l = models.TextField(db_column='Hbeta_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe5015_l = models.TextField(db_column='Fe5015_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
mgb_l = models.TextField(db_column='Mgb_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe5270_l = models.TextField(db_column='Fe5270_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe5335_l = models.TextField(db_column='Fe5335_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe5406_l = models.TextField(db_column='Fe5406_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe5709_l = models.TextField(db_column='Fe5709_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe5782_l = models.TextField(db_column='Fe5782_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
nad_l = models.TextField(db_column='NaD_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
hdeltaa_l = models.TextField(db_column='HdeltaA_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
hgammaa_l = models.TextField(db_column='HgammaA_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
hdeltaf_l = models.TextField(db_column='HdeltaF_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
hgammaf_l = models.TextField(db_column='HgammaF_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
nai082_l = models.TextField(db_column='naI082_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
ca_ii1_l = models.TextField(db_column='ca_II1_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
ca_ii2_l = models.TextField(db_column='ca_II2_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
ca_ii3_l = models.TextField(db_column='ca_II3_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
feh099_l = models.TextField(blank=True, null=False) # This field type is a guess.
ki1_l = models.TextField(blank=True, null=False) # This field type is a guess.
ki2_l = models.TextField(blank=True, null=False) # This field type is a guess.
cai1981_l = models.TextField(blank=True, null=False) # This field type is a guess.
cai1982_l = models.TextField(blank=True, null=False) # This field type is a guess.
co2301_l = models.TextField(blank=True, null=False) # This field type is a guess.
co2302_l = models.TextField(blank=True, null=False) # This field type is a guess.
mgi8800_l = models.TextField(db_column='mgI8800_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
nai114_l = models.TextField(db_column='naI114_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
tio_l = models.TextField(db_column='TiO_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
cn1_l = models.TextField(db_column='CN1_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
nai220_l = models.TextField(db_column='naI220_l', blank=True, null=False) # Field name made lowercase. This field type is a guess.
class Meta:
managed = False
db_table = 'ew_lower'
class EwUpper(models.Model):
name = models.TextField(db_column='Name', primary_key=True, blank=True, null=False) # Field name made lowercase.
ca4227_u = models.TextField(db_column='Ca4227_u', blank=True, null=False) # Field name made lowercase. This field type is a guess.
g4300_u = models.TextField(db_column='G4300_u', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe4383_u = models.TextField(db_column='Fe4383_u', blank=True, null=False) # Field name made lowercase. This field type is a guess.
ca4455_u = models.TextField(db_column='Ca4455_u', blank=True, null=False) # Field name made lowercase. This field type is a guess.
fe4531_u = models.TextField(db_column='Fe4531_u', blank=True, null=False) # Field name made lowercase. This field type is a guess.
c4668_u = models.TextField(db_column='C4668_u', blank=True, null=False) # Field name | |
<filename>test/azure/version-tolerant/Expected/AcceptanceTests/LroVersionTolerant/lroversiontolerant/aio/operations/_operations.py
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ...operations._operations import (
build_lr_os_custom_header_post202_retry200_request_initial,
build_lr_os_custom_header_post_async_retry_succeeded_request_initial,
build_lr_os_custom_header_put201_creating_succeeded200_request_initial,
build_lr_os_custom_header_put_async_retry_succeeded_request_initial,
build_lro_retrys_delete202_retry200_request_initial,
build_lro_retrys_delete_async_relative_retry_succeeded_request_initial,
build_lro_retrys_delete_provisioning202_accepted200_succeeded_request_initial,
build_lro_retrys_post202_retry200_request_initial,
build_lro_retrys_post_async_relative_retry_succeeded_request_initial,
build_lro_retrys_put201_creating_succeeded200_request_initial,
build_lro_retrys_put_async_relative_retry_succeeded_request_initial,
build_lros_delete202_no_retry204_request_initial,
build_lros_delete202_retry200_request_initial,
build_lros_delete204_succeeded_request_initial,
build_lros_delete_async_no_header_in_retry_request_initial,
build_lros_delete_async_no_retry_succeeded_request_initial,
build_lros_delete_async_retry_failed_request_initial,
build_lros_delete_async_retry_succeeded_request_initial,
build_lros_delete_async_retrycanceled_request_initial,
build_lros_delete_no_header_in_retry_request_initial,
build_lros_delete_provisioning202_accepted200_succeeded_request_initial,
build_lros_delete_provisioning202_deleting_failed200_request_initial,
build_lros_delete_provisioning202_deletingcanceled200_request_initial,
build_lros_patch200_succeeded_ignore_headers_request_initial,
build_lros_patch201_retry_with_async_header_request_initial,
build_lros_patch202_retry_with_async_and_location_header_request_initial,
build_lros_post200_with_payload_request_initial,
build_lros_post202_list_request_initial,
build_lros_post202_no_retry204_request_initial,
build_lros_post202_retry200_request_initial,
build_lros_post_async_no_retry_succeeded_request_initial,
build_lros_post_async_retry_failed_request_initial,
build_lros_post_async_retry_succeeded_request_initial,
build_lros_post_async_retrycanceled_request_initial,
build_lros_post_double_headers_final_azure_header_get_default_request_initial,
build_lros_post_double_headers_final_azure_header_get_request_initial,
build_lros_post_double_headers_final_location_get_request_initial,
build_lros_put200_acceptedcanceled200_request_initial,
build_lros_put200_succeeded_no_state_request_initial,
build_lros_put200_succeeded_request_initial,
build_lros_put200_updating_succeeded204_request_initial,
build_lros_put201_creating_failed200_request_initial,
build_lros_put201_creating_succeeded200_request_initial,
build_lros_put201_succeeded_request_initial,
build_lros_put202_retry200_request_initial,
build_lros_put_async_no_header_in_retry_request_initial,
build_lros_put_async_no_retry_succeeded_request_initial,
build_lros_put_async_no_retrycanceled_request_initial,
build_lros_put_async_non_resource_request_initial,
build_lros_put_async_retry_failed_request_initial,
build_lros_put_async_retry_succeeded_request_initial,
build_lros_put_async_sub_resource_request_initial,
build_lros_put_no_header_in_retry_request_initial,
build_lros_put_non_resource_request_initial,
build_lros_put_sub_resource_request_initial,
build_lrosads_delete202_non_retry400_request_initial,
build_lrosads_delete202_retry_invalid_header_request_initial,
build_lrosads_delete204_succeeded_request_initial,
build_lrosads_delete_async_relative_retry400_request_initial,
build_lrosads_delete_async_relative_retry_invalid_header_request_initial,
build_lrosads_delete_async_relative_retry_invalid_json_polling_request_initial,
build_lrosads_delete_async_relative_retry_no_status_request_initial,
build_lrosads_delete_non_retry400_request_initial,
build_lrosads_post202_no_location_request_initial,
build_lrosads_post202_non_retry400_request_initial,
build_lrosads_post202_retry_invalid_header_request_initial,
build_lrosads_post_async_relative_retry400_request_initial,
build_lrosads_post_async_relative_retry_invalid_header_request_initial,
build_lrosads_post_async_relative_retry_invalid_json_polling_request_initial,
build_lrosads_post_async_relative_retry_no_payload_request_initial,
build_lrosads_post_non_retry400_request_initial,
build_lrosads_put200_invalid_json_request_initial,
build_lrosads_put_async_relative_retry400_request_initial,
build_lrosads_put_async_relative_retry_invalid_header_request_initial,
build_lrosads_put_async_relative_retry_invalid_json_polling_request_initial,
build_lrosads_put_async_relative_retry_no_status_payload_request_initial,
build_lrosads_put_async_relative_retry_no_status_request_initial,
build_lrosads_put_error201_no_provisioning_state_payload_request_initial,
build_lrosads_put_non_retry201_creating400_invalid_json_request_initial,
build_lrosads_put_non_retry201_creating400_request_initial,
build_lrosads_put_non_retry400_request_initial,
)
T = TypeVar("T")
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LROsOperations: # pylint: disable=too-many-public-methods
"""LROsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _put200_succeeded_initial(self, product: JSONType = None, **kwargs: Any) -> Optional[JSONType]:
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lros_put200_succeeded_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def begin_put200_succeeded(self, product: JSONType = None, **kwargs: Any) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Succeeded’.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._put200_succeeded_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _patch200_succeeded_ignore_headers_initial(self, product: JSONType = None, **kwargs: Any) -> JSONType:
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lros_patch200_succeeded_ignore_headers_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_patch200_succeeded_ignore_headers(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a 200 to the initial request with location header. We
should not have any subsequent calls after receiving this first response.
:param product: Product to patch.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._patch200_succeeded_ignore_headers_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _patch201_retry_with_async_header_initial(self, product: JSONType = None, **kwargs: Any) -> JSONType:
cls | |
"""storeReleasePublishLogs_get # noqa: E501
Returns publish logs for a particular release published to a particular store # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.storeReleasePublishLogs_get(store_name, release_id, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string store_name: The name of the store (required)
:param string release_id: The ID of the realease (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.storeReleasePublishLogs_get_with_http_info(store_name, release_id, owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.storeReleasePublishLogs_get_with_http_info(store_name, release_id, owner_name, app_name, **kwargs) # noqa: E501
return data
def storeReleasePublishLogs_get_with_http_info(self, store_name, release_id, owner_name, app_name, **kwargs): # noqa: E501
"""storeReleasePublishLogs_get # noqa: E501
Returns publish logs for a particular release published to a particular store # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.storeReleasePublishLogs_get_with_http_info(store_name, release_id, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string store_name: The name of the store (required)
:param string release_id: The ID of the realease (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'release_id', 'owner_name', 'app_name'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storeReleasePublishLogs_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params or
params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `storeReleasePublishLogs_get`") # noqa: E501
# verify the required parameter 'release_id' is set
if ('release_id' not in params or
params['release_id'] is None):
raise ValueError("Missing the required parameter `release_id` when calling `storeReleasePublishLogs_get`") # noqa: E501
# verify the required parameter 'owner_name' is set
if ('owner_name' not in params or
params['owner_name'] is None):
raise ValueError("Missing the required parameter `owner_name` when calling `storeReleasePublishLogs_get`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `storeReleasePublishLogs_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name'] # noqa: E501
if 'release_id' in params:
path_params['release_id'] = params['release_id'] # noqa: E501
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data', 'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/distribution_stores/{store_name}/releases/{release_id}/publish_logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ErrorDetails', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storeReleases_getPublishError(self, store_name, release_id, owner_name, app_name, **kwargs): # noqa: E501
"""storeReleases_getPublishError # noqa: E501
Return the Error Details of release which failed in publishing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.storeReleases_getPublishError(store_name, release_id, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string store_name: The name of the store (required)
:param number release_id: The id of the release (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.storeReleases_getPublishError_with_http_info(store_name, release_id, owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.storeReleases_getPublishError_with_http_info(store_name, release_id, owner_name, app_name, **kwargs) # noqa: E501
return data
def storeReleases_getPublishError_with_http_info(self, store_name, release_id, owner_name, app_name, **kwargs): # noqa: E501
"""storeReleases_getPublishError # noqa: E501
Return the Error Details of release which failed in publishing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.storeReleases_getPublishError_with_http_info(store_name, release_id, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string store_name: The name of the store (required)
:param number release_id: The id of the release (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'release_id', 'owner_name', 'app_name'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storeReleases_getPublishError" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params or
params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `storeReleases_getPublishError`") # noqa: E501
# verify the required parameter 'release_id' is set
if ('release_id' not in params or
params['release_id'] is None):
raise ValueError("Missing the required parameter `release_id` when calling `storeReleases_getPublishError`") # noqa: E501
# verify the required parameter 'owner_name' is set
if ('owner_name' not in params or
params['owner_name'] is None):
raise ValueError("Missing the required parameter `owner_name` when calling `storeReleases_getPublishError`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `storeReleases_getPublishError`") # noqa: E501
collection_formats = {}
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name'] # noqa: E501
if 'release_id' in params:
path_params['release_id'] = params['release_id'] # noqa: E501
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data', 'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/distribution_stores/{store_name}/releases/{release_id}/publish_error_details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ErrorDetails', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storeReleases_get(self, store_name, release_id, owner_name, app_name, **kwargs): # noqa: E501
"""storeReleases_get # noqa: E501
Return releases published in a store for releaseId and storeId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.storeReleases_get(store_name, release_id, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string store_name: The name of the store (required)
:param string release_id: The name of the store (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.storeReleases_get_with_http_info(store_name, release_id, owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.storeReleases_get_with_http_info(store_name, release_id, owner_name, app_name, **kwargs) # noqa: E501
return data
def storeReleases_get_with_http_info(self, store_name, release_id, owner_name, app_name, **kwargs): # noqa: E501
"""storeReleases_get # noqa: E501
Return releases published in a store for releaseId and storeId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.storeReleases_get_with_http_info(store_name, release_id, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string store_name: The name of the store (required)
:param string release_id: The name of the store (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If | |
from wallaby import *
import constants as c
import movement as m
import utils as u
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~HOW TO USE LFOLLOW COMMANDS~~~~~~~~~~~~~~~~~~~~~~~~
# All lfollow commands follow a certain pattern which if you learn, you can come up
# with commands without the need to look in this file. Keep in mind that these rules apply only to
# lfollow commands, but once you learn their pattern you can figure out all other patterns.
# To start off, this is the pattern:
# lfollow_[left, right, backwards]_[inside_line]_[until_left_senses_black, until right senses black, until (event)]_[smooth]([time you want the lfollow to run in ms], [starting speed for left motor], [starting speed for right motor], [refresesh rate for the lfollow in ms])
# - To signify that you want to run an lfollow command, write lfollow.
# - Then, you must choose which sensor you want to lfollow with (left tophat, right tophat, or the third tophat respectively)
# - After this, everything is optional and is only required if you choose to put it in and the situation calls for it.
# - If you input time=0, then the command will not stop after it is finished.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~States~~~~~~~~~~~~~~~~~~~~~~~~
def BlackLeft():
return (analog(c.LEFT_TOPHAT) > c.LEFT_TOPHAT_BW)
def NotBlackLeft():
return (analog(c.LEFT_TOPHAT) < c.LEFT_TOPHAT_BW)
def BlackRight():
return (analog(c.RIGHT_TOPHAT) > c.RIGHT_TOPHAT_BW)
def NotBlackRight():
return (analog(c.RIGHT_TOPHAT) < c.RIGHT_TOPHAT_BW)
def BlackThird():
return (analog(c.THIRD_TOPHAT) > c.THIRD_TOPHAT_BW)
def NotBlackThird():
return (analog(c.THIRD_TOPHAT) < c.THIRD_TOPHAT_BW)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Basic Align Functions~~~~~~~~~~~~~~~~~~~~~~~~
def align_close():
# Aligns completely on the side of the line closest to the claw
print "Starting align_close()"
left_backwards_until_white()
right_backwards_until_white()
right_forwards_until_black()
left_forwards_until_black()
print "Aligned to close side of line\n"
def align_close_smart():
# Aligns completely on the side of the line closest to the claw
print "Starting align_close_smart()"
starting_left_time = seconds()
if BlackLeft():
left_backwards_until_white()
else:
left_forwards_until_black()
total_left_time = seconds() - starting_left_time
starting_right_time = seconds()
if BlackRight():
right_backwards_until_white()
else:
right_forwards_until_black()
total_right_time = seconds() - starting_right_time
print "Second motor run time: " + str(total_right_time)
if total_right_time > .3:
print "Another align is probably necessary here.\n"
if BlackLeft():
left_backwards_until_white()
else:
left_forwards_until_black()
print "Aligned to close side of line\n"
def align_far(left_first=True):
# Aligns completely on the side of the line closest to the camera
print "Starting align_far()"
if left_first == True:
right_forwards_until_white()
left_forwards_until_white()
left_backwards_until_black()
right_backwards_until_black()
else:
left_forwards_until_white()
right_forwards_until_white()
right_backwards_until_black()
left_backwards_until_black()
print "Aligned to far side of line\n"
def align_far_smart():
# Aligns completely on the side of the line closest to the camera
print "Starting align_far_smart()"
if BlackLeft() and BlackRight():
drive_until_both_white()
starting_left_time = seconds()
if BlackLeft():
left_forwards_until_white()
else:
left_backwards_until_black()
total_left_time = seconds() - starting_left_time
starting_right_time = seconds()
if BlackRight():
right_forwards_until_white()
else:
right_backwards_until_black()
total_right_time = seconds() - starting_right_time
print "Time difference: " + str(abs(total_left_time - total_right_time))
if abs(total_left_time - total_right_time) > .5:
print "Woah there! We probably need to do another align here./n"
if total_left_time > total_right_time:
if BlackRight():
right_forwards_until_white()
else:
right_backwards_until_black()
else:
if BlackLeft():
left_forwards_until_white()
else:
left_backwards_until_black()
print "Aligned to far side of line\n"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Single Motor Align Functions~~~~~~~~~~~~~~~~~~~~~~~~
def left_forwards_until_black(time=c.SAFETY_TIME):
# Left motor goes forwards until right tophat senses black
print "Starting left_forwards_until_black()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackLeft():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_forwards_until_white(time=c.SAFETY_TIME):
# Left motor goes forwards until right tophat senses white
print "Starting left_forwards_until_white()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackLeft():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_forwards_until_right_senses_black(time=c.SAFETY_TIME):
print "Starting left_forwards_until_right_senses_black()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_forwards_until_right_senses_white(time=c.SAFETY_TIME):
print "Starting left_forwards_until_right_senses_white()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_forwards_until_third_senses_black(time=c.SAFETY_TIME):
# Left motor goes forwards until right tophat senses white
print "Starting left_forwards_until_third_senses_black()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackThird():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_forwards_until_third_senses_white(time=c.SAFETY_TIME):
# Left motor goes forwards until right tophat senses white
print "Starting left_forwards_until_third_senses_white()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackThird():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_forwards_until_black(time=c.SAFETY_TIME):
# Right motor goes forwards until right tophat senses black
print "Starting right_forwards_until_black()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_forwards_until_white(time=c.SAFETY_TIME):
# Right motor goes forwards until right tophat senses white
print "Starting right_forwards_until_white()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_forwards_until_left_senses_black(time=c.SAFETY_TIME):
print "Starting right_forwards_until_left_senses_black()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackLeft():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_forwards_until_left_senses_white(time=c.SAFETY_TIME):
print "Starting right_forwards_until_left_senses_white()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackLeft():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_forwards_until_third_senses_black(time=c.SAFETY_TIME):
# Left motor goes forwards until right tophat senses white
print "Starting left_forwards_until_third_senses_black()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackThird():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_forwards_until_third_senses_white(time=c.SAFETY_TIME):
# Left motor goes forwards until right tophat senses white
print "Starting left_forwards_until_third_senses_white()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackThird():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_backwards_until_black(time=c.SAFETY_TIME):
# Left motor goes backwards until left tophat senses black
print "Starting left_backwards_until_black()"
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackLeft():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_backwards_until_white(time=c.SAFETY_TIME):
# Left motor goes backwards until the left tophat senses white
print "Starting left_backwards_until_white()"
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackLeft():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_backwards_until_right_senses_black(time=c.SAFETY_TIME):
# Left motor goes backwards until right tophat senses black
print "Starting left_backwards_until_right_senses_black()"
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_backwards_until_right_senses_white(time=c.SAFETY_TIME):
# Left motor goes backwards until the right tophat senses white
print "Starting left_backwards_until_right_senses_white()"
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_backwards_until_third_senses_black(time=c.SAFETY_TIME):
# Left motor goes backwards until third tophat senses white
print "Starting left_backwards_until_third_senses_black()"
m.av(c.LEFT_MOTOR, -c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackThird():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def left_backwards_until_third_senses_white(time=c.SAFETY_TIME):
# Left motor goes backwards until third tophat senses white
print "Starting left_backwards_until_third_senses_white()"
m.av(c.LEFT_MOTOR, -c.BASE_LM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackThird():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_backwards_until_black(time=c.SAFETY_TIME):
# Right motor goes back until right tophat senses black
print "Starting right_backwards_until_black()"
m.av(c.RIGHT_MOTOR, -1 * c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_backwards_until_white(time=c.SAFETY_TIME):
# Right motor goes back until right tophat senses white
print "Starting right_backwards_until_white()"
m.av(c.RIGHT_MOTOR, -1 * c.BASE_RM_POWER)
if time == 0:
time = c.SAFETY_TIME_NO_STOP
sec = seconds() + time / 1000.0
while seconds() < sec and BlackRight():
pass
if time != c.SAFETY_TIME_NO_STOP:
m.deactivate_motors()
def right_backwards_until_left_senses_black(time=c.SAFETY_TIME):
# Right motor goes back until left tophat senses black
print "Starting right_backwards_until_left_senses_black()"
| |
<filename>pineboolib/plugins/sql/flqpsql.py
from PyQt5.QtCore import QTime, QDate, QDateTime, Qt # type: ignore
from PyQt5.Qt import qWarning, QDomDocument, QRegExp # type: ignore
from PyQt5.QtWidgets import QMessageBox, QProgressDialog, QWidget # type: ignore
from pineboolib.core.utils.utils_base import text2bool, auto_qt_translate_text
from pineboolib.application.utils.check_dependencies import check_dependencies
from pineboolib.application.database.pnsqlquery import PNSqlQuery
from pineboolib.application.database.pnsqlcursor import PNSqlCursor
from pineboolib.application.metadata.pnfieldmetadata import PNFieldMetaData
from pineboolib.fllegacy.flutil import FLUtil
from sqlalchemy import create_engine # type: ignore
import traceback
from pineboolib.application import project
from pineboolib import logging
from typing import Iterable, Optional, Union, List, Dict, Any, cast
logger = logging.getLogger(__name__)
class FLQPSQL(object):
version_: str
conn_: Any = None
name_: str
alias_: str
errorList = None
lastError_: Optional[str]
db_ = None
mobile_: bool = False
pure_python_: bool = False
defaultPort_: int
engine_ = None
session_ = None
declarative_base_ = None
def __init__(self):
self.version_ = "0.8"
self.name_ = "FLQPSQL"
self.open_ = False
self.errorList = []
self.alias_ = "PostgreSQL (PSYCOPG2)"
self._dbname = None
self.mobile_ = False
self.pure_python_ = False
self.defaultPort_ = 5432
self.engine_ = None
self.session_ = None
self.declarative_base_ = None
self.lastError_ = None
def useThreads(self) -> bool:
return True
def useTimer(self) -> bool:
return False
def version(self) -> str:
return self.version_
def driverName(self) -> str:
return self.name_
def isOpen(self) -> bool:
return self.open_
def pure_python(self) -> bool:
return self.pure_python_
def safe_load(self) -> Any:
return check_dependencies(
{"psycopg2": "python3-psycopg2", "sqlalchemy": "sqlAlchemy"}, False
)
def mobile(self) -> bool:
return self.mobile_
def DBName(self) -> Any:
return self._dbname
def connect(self, db_name, db_host, db_port, db_userName, db_password) -> Any:
self._dbname = db_name
check_dependencies({"psycopg2": "python3-psycopg2", "sqlalchemy": "sqlAlchemy"})
import psycopg2 # type: ignore
from psycopg2.extras import LoggingConnection # type: ignore
logger = logging.getLogger(self.alias_)
logger.debug = logger.trace # type: ignore # Send Debug output to Trace
conninfostr = "dbname=%s host=%s port=%s user=%s password=%s connect_timeout=5" % (
db_name,
db_host,
db_port,
db_userName,
db_password,
)
try:
self.conn_ = psycopg2.connect(conninfostr, connection_factory=LoggingConnection)
self.conn_.initialize(logger)
self.engine_ = create_engine(
"postgresql+psycopg2://%s:%s@%s:%s/%s"
% (db_userName, db_password, db_host, db_port, db_name)
)
except psycopg2.OperationalError as e:
if project._splash:
project._splash.hide()
if project._DGI and not project.DGI.localDesktop():
return False
if "does not exist" in str(e) or "no existe" in str(e):
ret = QMessageBox.warning(
QWidget(),
"Pineboo",
"La base de datos %s no existe.\n¿Desea crearla?" % db_name,
cast(QMessageBox, QMessageBox.Ok | QMessageBox.No),
)
if ret == QMessageBox.No:
return False
else:
conninfostr2 = (
"dbname=postgres host=%s port=%s user=%s password=%s connect_timeout=5"
% (db_host, db_port, db_userName, db_password)
)
try:
tmpConn = psycopg2.connect(conninfostr2)
tmpConn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = tmpConn.cursor()
try:
cursor.execute("CREATE DATABASE %s" % db_name)
except Exception:
print("ERROR: FLPSQL.connect", traceback.format_exc())
cursor.execute("ROLLBACK")
cursor.close()
return False
cursor.close()
return self.connect(db_name, db_host, db_port, db_userName, db_password)
except Exception:
qWarning(traceback.format_exc())
QMessageBox.information(
QWidget(),
"Pineboo",
"ERROR: No se ha podido crear la Base de Datos %s" % db_name,
QMessageBox.Ok,
)
print("ERROR: No se ha podido crear la Base de Datos %s" % db_name)
return False
else:
QMessageBox.information(
QWidget(), "Pineboo", "Error de conexión\n%s" % str(e), QMessageBox.Ok
)
return False
# self.conn_.autocommit = True #Posiblemente tengamos que ponerlo a
# false para que las transacciones funcionen
self.conn_.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
if self.conn_:
self.open_ = True
try:
self.conn_.set_client_encoding("UTF8")
except Exception:
qWarning(traceback.format_exc())
return self.conn_
def engine(self) -> Any:
return self.engine_
def session(self) -> Any:
if self.session_ is None:
from sqlalchemy.orm import sessionmaker # type: ignore
# from sqlalchemy import event
# from pineboolib.pnobjectsfactory import before_commit, after_commit, after_flush
Session = sessionmaker(bind=self.engine())
self.session_ = Session()
# event.listen(Session, 'before_commit', before_commit, self.session_)
# event.listen(Session, 'after_commit', after_commit, self.session_)
# event.listen(Session, 'after_flush', after_flush)
return self.session_
def declarative_base(self) -> Any:
if self.declarative_base_ is None:
from sqlalchemy.ext.declarative import declarative_base # type: ignore
self.declarative_base_ = declarative_base()
return self.declarative_base_
def formatValueLike(self, type_, v: Any, upper) -> str:
util = FLUtil()
res = "IS NULL"
if type_ == "bool":
s = str(v[0]).upper()
if s == str(util.translate("application", "Sí")[0]).upper():
res = "='t'"
elif str(util.translate("application", "No")[0]).upper():
res = "='f'"
elif type_ == "date":
dateamd = util.dateDMAtoAMD(str(v))
if dateamd is None:
dateamd = ""
res = "::text LIKE '%%" + dateamd + "'"
elif type_ == "time":
t = v.toTime()
res = "::text LIKE '" + t.toString(Qt.ISODate) + "%%'"
else:
res = str(v)
if upper:
res = "%s" % res.upper()
res = "::text LIKE '" + res + "%%'"
return res
def formatValue(self, type_, v: Any, upper) -> Union[bool, str, None]:
util = FLUtil()
s: Any = None
# if v == None:
# v = ""
# TODO: psycopg2.mogrify ???
if v is None:
return "NULL"
if type_ == "bool" or type_ == "unlock":
s = text2bool(v)
elif type_ == "date":
if s != "Null":
if len(str(v).split("-")[0]) < 3:
val = util.dateDMAtoAMD(v)
else:
val = v
s = "'%s'" % val
elif type_ == "time":
s = "'%s'" % v
elif type_ in ("uint", "int", "double", "serial"):
if s == "Null":
s = "0"
else:
s = v
elif type_ in ("string", "stringlist"):
if v == "":
s = "Null"
else:
if type_ == "string":
v = auto_qt_translate_text(v)
if upper and type_ == "string":
v = v.upper()
s = "'%s'" % v
elif type_ == "pixmap":
if v.find("'") > -1:
v = self.normalizeValue(v)
s = "'%s'" % v
else:
s = v
# print ("PNSqlDriver(%s).formatValue(%s, %s) = %s" % (self.name_, type_, v, s))
return s
def canOverPartition(self) -> bool:
return True
def nextSerialVal(self, table: str, field: str) -> Any:
q = PNSqlQuery()
q.setSelect(u"nextval('" + table + "_" + field + "_seq')")
q.setFrom("")
q.setWhere("")
if not q.exec_():
qWarning("not exec sequence")
return None
if q.first():
return q.value(0)
else:
return None
def savePoint(self, n) -> bool:
if not self.isOpen():
qWarning("PSQLDriver::savePoint: Database not open")
return False
cursor = self.conn_.cursor()
try:
cursor.execute("SAVEPOINT sv_%s" % n)
except Exception:
self.setLastError("No se pudo crear punto de salvaguarda", "SAVEPOINT sv_%s" % n)
qWarning(
"PSQLDriver:: No se pudo crear punto de salvaguarda SAVEPOINT sv_%s \n %s "
% (n, traceback.format_exc())
)
return False
return True
def canSavePoint(self) -> bool:
return True
def canTransaction(self) -> bool:
return True
def rollbackSavePoint(self, n) -> bool:
if not self.isOpen():
qWarning("PSQLDriver::rollbackSavePoint: Database not open")
return False
cursor = self.conn_.cursor()
try:
cursor.execute("ROLLBACK TO SAVEPOINT sv_%s" % n)
except Exception:
self.setLastError(
"No se pudo rollback a punto de salvaguarda", "ROLLBACK TO SAVEPOINTt sv_%s" % n
)
qWarning(
"PSQLDriver:: No se pudo rollback a punto de salvaguarda ROLLBACK TO SAVEPOINT sv_%s\n %s"
% (n, traceback.format_exc())
)
return False
return True
def setLastError(self, text, command) -> None:
self.lastError_ = "%s (%s)" % (text, command)
def lastError(self) -> Optional[str]:
return self.lastError_
def commitTransaction(self) -> bool:
if not self.isOpen():
qWarning("PSQLDriver::commitTransaction: Database not open")
cursor = self.conn_.cursor()
try:
cursor.execute("COMMIT TRANSACTION")
except Exception:
self.setLastError("No se pudo aceptar la transacción", "COMMIT")
qWarning(
"PSQLDriver:: No se pudo aceptar la transacción COMMIT\n %s"
% traceback.format_exc()
)
return False
return True
def rollbackTransaction(self) -> bool:
if not self.isOpen():
qWarning("PSQLDriver::rollbackTransaction: Database not open")
cursor = self.conn_.cursor()
try:
cursor.execute("ROLLBACK TRANSACTION")
except Exception:
self.setLastError("No se pudo deshacer la transacción", "ROLLBACK")
qWarning(
"PSQLDriver:: No se pudo deshacer la transacción ROLLBACK\n %s"
% traceback.format_exc()
)
return False
return True
def transaction(self) -> bool:
if not self.isOpen():
qWarning("PSQLDriver::transaction: Database not open")
cursor = self.conn_.cursor()
try:
cursor.execute("BEGIN TRANSACTION")
except Exception:
self.setLastError("No se pudo crear la transacción", "BEGIN")
qWarning(
"PSQLDriver:: No se pudo crear la transacción BEGIN\n %s" % traceback.format_exc()
)
return False
return True
def releaseSavePoint(self, n) -> bool:
if not self.isOpen():
qWarning("PSQLDriver::releaseSavePoint: Database not open")
return False
cursor = self.conn_.cursor()
try:
cursor.execute("RELEASE SAVEPOINT sv_%s" % n)
except Exception:
self.setLastError(
"No se pudo release a punto de salvaguarda", "RELEASE SAVEPOINT sv_%s" % n
)
qWarning(
"PSQLDriver:: No se pudo release a punto de salvaguarda RELEASE SAVEPOINT sv_%s\n %s"
% (n, traceback.format_exc())
)
return False
return True
def setType(self, type_, leng=None) -> str:
if leng:
return "::%s(%s)" % (type_, leng)
else:
return "::%s" % type_
def refreshQuery(self, curname, fields, table, where, cursor, conn) -> None:
sql = "DECLARE %s NO SCROLL CURSOR WITH HOLD FOR SELECT %s FROM %s WHERE %s " % (
curname,
fields,
table,
where,
)
try:
cursor.execute(sql)
except Exception as e:
logger.error("refreshQuery: %s", e)
logger.info("SQL: %s", sql)
logger.trace("Detalle:", stack_info=True)
def refreshFetch(self, number, curname, table, cursor, fields, where_filter) -> None:
sql = "FETCH %d FROM %s" % (number, curname)
try:
cursor.execute(sql)
except Exception as e:
logger.error("refreshFetch: %s", e)
logger.info("SQL: %s", | |
import hashlib
import json
import logging
import os
import time
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from urllib.parse import urlparse
import boto3
import ephemeral_port_reserve
import requests
import yaml
from boto3 import Session
AWS_CREDENTIALS_DIR = '/etc/boto_cfg/'
AWS_TEMP_CREDENTIALS_PROVIDER = 'org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider'
GPU_POOLS_YAML_FILE_PATH = '/nail/srv/configs/gpu_pools.yaml'
DEFAULT_PAASTA_VOLUME_PATH = '/etc/paasta/volumes.json'
DEFAULT_SPARK_MESOS_SECRET_FILE = '/nail/etc/paasta_spark_secret'
DEFAULT_SPARK_RUN_CONFIG = '/nail/srv/configs/spark.yaml'
DEFAULT_SPARK_SERVICE = 'spark'
GPUS_HARD_LIMIT = 15
CLUSTERMAN_METRICS_YAML_FILE_PATH = '/nail/srv/configs/clusterman_metrics.yaml'
CLUSTERMAN_YAML_FILE_PATH = '/nail/srv/configs/clusterman.yaml'
DEFAULT_MAX_CORES = 4
DEFAULT_EXECUTOR_CORES = 2
DEFAULT_EXECUTOR_INSTANCES = 2
DEFAULT_EXECUTOR_MEMORY = '4g'
NON_CONFIGURABLE_SPARK_OPTS = {
'spark.master',
'spark.ui.port',
'spark.mesos.principal',
'spark.mesos.secret',
'spark.mesos.executor.docker.image',
'spark.mesos.executor.docker.parameters',
'spark.executorEnv.PAASTA_SERVICE',
'spark.executorEnv.PAASTA_INSTANCE',
'spark.executorEnv.PAASTA_CLUSTER',
'spark.executorEnv.SPARK_EXECUTOR_DIRS',
'spark.hadoop.fs.s3a.access.key',
'spark.hadoop.fs.s3a.secret.key',
'spark.hadoop.fs.s3a.session.token',
'spark.kubernetes.pyspark.pythonVersion',
'spark.kubernetes.container.image',
'spark.kubernetes.namespace',
'spark.kubernetes.authenticate.caCertFile',
'spark.kubernetes.authenticate.clientKeyFile',
'spark.kubernetes.authenticate.clientCertFile',
'spark.kubernetes.container.image.pullPolicy',
'spark.kubernetes.executor.label.yelp.com/paasta_service',
'spark.kubernetes.executor.label.yelp.com/paasta_instance',
'spark.kubernetes.executor.label.yelp.com/paasta_cluster',
'spark.kubernetes.executor.label.paasta.yelp.com/service',
'spark.kubernetes.executor.label.paasta.yelp.com/instance',
'spark.kubernetes.executor.label.paasta.yelp.com/cluster',
}
K8S_AUTH_FOLDER = '/etc/spark_k8s_secrets'
log = logging.Logger(__name__)
def _load_aws_credentials_from_yaml(yaml_file_path) -> Tuple[str, str, Optional[str]]:
with open(yaml_file_path, 'r') as yaml_file:
try:
credentials_yaml = yaml.safe_load(yaml_file.read())
return (
credentials_yaml['aws_access_key_id'],
credentials_yaml['aws_secret_access_key'],
credentials_yaml.get('aws_session_token', None),
)
except Exception as e:
raise ValueError(
f'Encountered {type(e)} when trying to parse AWS credentials yaml {yaml_file_path}'
'Suppressing further output to avoid leaking credentials.',
)
def get_aws_credentials(
service: Optional[str] = DEFAULT_SPARK_SERVICE,
no_aws_credentials: bool = False,
aws_credentials_yaml: Optional[str] = None,
profile_name: Optional[str] = None,
session: Optional[boto3.Session] = None,
aws_credentials_json: Optional[str] = None,
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""load aws creds using different method/file"""
if no_aws_credentials:
return None, None, None
elif aws_credentials_yaml:
return _load_aws_credentials_from_yaml(aws_credentials_yaml)
elif aws_credentials_json:
with open(aws_credentials_json, 'r') as f:
creds = json.load(f)
return (creds.get('accessKeyId'), creds.get('secretAccessKey'), None)
elif service != DEFAULT_SPARK_SERVICE:
service_credentials_path = os.path.join(AWS_CREDENTIALS_DIR, f'{service}.yaml')
if os.path.exists(service_credentials_path):
return _load_aws_credentials_from_yaml(service_credentials_path)
elif not session:
log.warning(
f'Did not find service AWS credentials at {service_credentials_path}. '
'Falling back to user credentials.',
)
session = session or Session(profile_name=profile_name)
creds = session.get_credentials()
return (
creds.access_key,
creds.secret_key,
creds.token,
)
def _pick_random_port(app_name):
"""Return a random port. """
hash_key = f'{app_name}_{time.time()}'.encode('utf-8')
hash_number = int(hashlib.sha1(hash_key).hexdigest(), 16)
preferred_port = 33000 + (hash_number % 25000)
return ephemeral_port_reserve.reserve('0.0.0.0', preferred_port)
def _get_mesos_docker_volumes_conf(
spark_opts: Mapping[str, str],
extra_volumes: Optional[List[Mapping[str, str]]] = None,
load_paasta_default_volumes: bool = False,
) -> Dict[str, str]:
"""return volume str to be configured for spark.mesos.executor.docker.volume
if no extra_volumes and volumes_from_spark_opts, it will read from
DEFAULT_PAASTA_VOLUME_PATH and parse it.
Also spark required to have `/etc/passwd` and `/etc/group` being mounted as
well. This will ensure it does have those files in the list.
"""
volume_str = spark_opts.get('spark.mesos.executor.docker.volumes')
volumes = volume_str.split(',') if volume_str else []
if load_paasta_default_volumes:
with open(DEFAULT_PAASTA_VOLUME_PATH) as fp:
extra_volumes = (extra_volumes or []) + json.load(fp)['volumes']
for volume in (extra_volumes or []):
if os.path.exists(volume['hostPath']):
volumes.append(f"{volume['hostPath']}:{volume['containerPath']}:{volume['mode'].lower()}")
else:
log.warning(f"Path {volume['hostPath']} does not exist on this host. Skipping this bindings.")
distinct_volumes = set(volumes)
# docker.parameters user needs /etc/passwd and /etc/group to be mounted
for required in ['/etc/passwd', '/etc/group']:
full_mount_str = f'{required}:{required}:ro'
if full_mount_str not in distinct_volumes:
distinct_volumes.add(full_mount_str)
volume_str = ','.join(distinct_volumes) # ensure we don't have duplicated files
return {'spark.mesos.executor.docker.volumes': volume_str}
def _append_sql_shuffle_partitions_conf(spark_opts: Dict[str, str]) -> Dict[str, str]:
if 'spark.sql.shuffle.partitions' in spark_opts:
return spark_opts
num_partitions = 2 * (
int(spark_opts.get('spark.cores.max', 0)) or
int(spark_opts.get('spark.executor.instances', 0)) * int(spark_opts.get('spark.executor.cores', 0))
)
log.warning(
f'spark.sql.shuffle.partitions has been set to {num_partitions} '
'to be equal to twice the number of requested cores, but you should '
'consider setting a higher value if necessary.'
' Follow y/spark for help on partition sizing',
)
spark_opts['spark.sql.shuffle.partitions'] = str(num_partitions)
return spark_opts
def _append_event_log_conf(
spark_opts: Dict[str, str],
access_key: Optional[str],
secret_key: Optional[str],
session_token: Optional[str] = None,
) -> Dict[str, str]:
enabled = spark_opts.setdefault('spark.eventLog.enabled', 'true').lower()
if enabled != 'true':
# user configured to disable log, not continue
return spark_opts
event_log_dir = spark_opts.get('spark.eventLog.dir')
if event_log_dir is not None:
# we don't want to overwrite user's settings
return spark_opts
try:
with open(DEFAULT_SPARK_RUN_CONFIG) as fp:
spark_run_conf = yaml.safe_load(fp.read())
except Exception as e:
log.warning(f'Failed to load {DEFAULT_SPARK_RUN_CONFIG}: {e}, disable event log')
return spark_opts
try:
account_id = (
boto3.client(
'sts',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
)
.get_caller_identity()
.get('Account')
)
except Exception as e:
log.warning('Failed to identify account ID, error: {}'.format(str(e)))
spark_opts['spark.eventLog.enabled'] = 'false'
return spark_opts
for conf in spark_run_conf.get('environments', {}).values():
if account_id == conf['account_id']:
spark_opts['spark.eventLog.enabled'] = 'true'
spark_opts['spark.eventLog.dir'] = conf['default_event_log_dir']
return spark_opts
log.warning(f'Disable event log because No preset event log dir for account: {account_id}')
spark_opts['spark.eventLog.enabled'] = 'false'
return spark_opts
def _adjust_spark_requested_resources(
user_spark_opts: Dict[str, str],
cluster_manager: str,
pool: str,
) -> Dict[str, str]:
executor_memory = user_spark_opts.setdefault('spark.executor.memory', DEFAULT_EXECUTOR_MEMORY)
executor_cores = int(user_spark_opts.setdefault('spark.executor.cores', str(DEFAULT_EXECUTOR_CORES)))
if cluster_manager == 'mesos':
max_cores = int(user_spark_opts.setdefault('spark.cores.max', str(DEFAULT_MAX_CORES)))
executor_instances = max_cores / executor_cores
elif cluster_manager == 'kubernetes':
executor_instances = int(
user_spark_opts.setdefault('spark.executor.instances', str(DEFAULT_EXECUTOR_INSTANCES)),
)
max_cores = executor_instances * executor_cores
if max_cores < executor_cores:
raise ValueError(f'Total number of cores {max_cores} is less than per-executor cores {executor_cores}')
memory = parse_memory_string(executor_memory)
if memory > 32 * 1024:
log.warning('Executor memory is {memory / 32}g, greater than recommended value: 32g')
num_gpus = int(user_spark_opts.get('spark.mesos.gpus.max', '0'))
task_cpus = int(user_spark_opts.get('spark.task.cpus', '1'))
# we can skip this step if user is not using gpu or do not configure
# task cpus and executor cores
if num_gpus == 0 or (task_cpus != 1 and executor_cores != 1):
return user_spark_opts
if num_gpus != 0 and cluster_manager != 'mesos':
raise ValueError('spark.mesos.gpus.max is only available for mesos')
if num_gpus > GPUS_HARD_LIMIT:
raise ValueError(
'Requested {num_gpus} GPUs, which exceeds hard limit of {GPUS_HARD_LIMIT}',
)
gpus_per_inst = 0
cpus_per_inst = 0
with open(GPU_POOLS_YAML_FILE_PATH) as fp:
pool_def = yaml.safe_load(fp).get(pool)
if pool_def is not None:
gpus_per_inst = int(pool_def['gpus_per_instance'])
cpus_per_inst = int(pool_def['cpus_per_instance'])
if gpus_per_inst == 0 or cpus_per_inst == 0:
raise ValueError(
'Unable to adjust spark.task.cpus and spark.executor.cores because '
f'pool {pool} does not appear to have any GPUs and/or CPUs',
)
else:
raise ValueError(
'Unable to adjust spark.task.cpus and spark.executor.cores because '
f"pool \"{pool}\" not found in gpu_pools",
)
instances = num_gpus // gpus_per_inst
if (instances * gpus_per_inst) != num_gpus:
raise ValueError(
'Unable to adjust spark.task.cpus and spark.executor.cores because '
'spark.mesos.gpus.max=%i is not a multiple of %i'
% (num_gpus, gpus_per_inst),
)
cpus_per_gpu = cpus_per_inst // gpus_per_inst
total_cpus = cpus_per_gpu * num_gpus
num_cpus = (
int(max_cores) if cluster_manager == 'mesos'
else int(executor_instances) * int(executor_cores)
)
if num_cpus != total_cpus:
log.warning(
f'spark.cores.max has been adjusted to {total_cpus}. '
'See y/horovod for sizing of GPU pools.',
)
user_spark_opts.update({
# Mesos limitation - need this to access GPUs
'spark.mesos.containerizer': 'mesos',
# For use by horovod.spark.run(...) in place of num_proc
'spark.default.parallelism': str(num_gpus),
# we need to adjust the requirements to meet the gpus requriements
'spark.task.cpus': str(cpus_per_gpu),
'spark.executor.cores': str(cpus_per_gpu * gpus_per_inst),
'spark.cores.max': str(total_cpus),
})
return user_spark_opts
def find_spark_master(paasta_cluster):
"""Finds the Mesos leader of a PaaSTA cluster.
:param str paasta_cluster: Name of a PaaSTA cluster.
:return str: The Mesos cluster manager to connect to. Callers are expected to check result.
"""
try:
response = requests.get(f'http://paasta-{paasta_cluster}.yelp:5050/redirect')
except requests.RequestException:
raise ValueError(f'Cannot find spark master for cluster {paasta_cluster}')
return f'mesos://{urlparse(response.url).hostname}:5050'
def _get_mesos_spark_env(
user_spark_opts: Mapping[str, Any],
paasta_cluster: str,
paasta_pool: str,
paasta_service: str,
paasta_instance: str,
docker_img: str,
extra_volumes: Optional[List[Mapping[str, str]]],
extra_docker_params: Optional[Mapping[str, str]],
with_secret: bool,
needs_docker_cfg: bool,
mesos_leader: Optional[str],
load_paasta_default_volumes: bool,
) -> Dict[str, str]:
if mesos_leader is None:
mesos_leader = find_spark_master(paasta_cluster)
else:
mesos_leader = f'mesos://{mesos_leader}'
docker_parameters = [
# Limit a container's cpu usage
f"cpus={user_spark_opts['spark.executor.cores']}",
f'label=paasta_service={paasta_service}',
f'label=paasta_instance={paasta_instance}',
]
if extra_docker_params:
docker_parameters.extend(f'{key}={value}' for key, value in extra_docker_params.items())
auth_configs = {}
if with_secret:
try:
with open(DEFAULT_SPARK_MESOS_SECRET_FILE, 'r') as f:
secret = f.read()
except IOError as e:
log.error(
'Cannot load mesos secret from %s' % DEFAULT_SPARK_MESOS_SECRET_FILE,
)
raise ValueError(str(e))
auth_configs = {'spark.mesos.secret': secret}
spark_env = {
'spark.master': mesos_leader,
'spark.executorEnv.PAASTA_SERVICE': paasta_service,
'spark.executorEnv.PAASTA_INSTANCE': paasta_instance,
'spark.executorEnv.PAASTA_CLUSTER': paasta_cluster,
'spark.executorEnv.PAASTA_INSTANCE_TYPE': 'spark',
'spark.executorEnv.SPARK_USER': 'root',
'spark.executorEnv.SPARK_EXECUTOR_DIRS': '/tmp',
'spark.mesos.executor.docker.parameters': ','.join(docker_parameters),
'spark.mesos.executor.docker.image': docker_img,
'spark.mesos.constraints': f'pool:{paasta_pool}',
'spark.mesos.executor.docker.forcePullImage': 'true',
'spark.mesos.principal': 'spark',
**auth_configs,
**_get_mesos_docker_volumes_conf(
user_spark_opts, extra_volumes,
load_paasta_default_volumes,
),
}
if needs_docker_cfg:
spark_env['spark.mesos.uris'] = 'file:///root/.dockercfg'
return spark_env
def _get_k8s_spark_env(
paasta_cluster: str,
paasta_service: str,
paasta_instance: str,
docker_img: str,
volumes: Optional[List[Mapping[str, str]]],
paasta_pool: str,
) -> Dict[str, str]:
spark_env = {
'spark.master': f'k8s://https://k8s.paasta-{paasta_cluster}.yelp:16443',
'spark.executorEnv.PAASTA_SERVICE': paasta_service,
'spark.executorEnv.PAASTA_INSTANCE': paasta_instance,
'spark.executorEnv.PAASTA_CLUSTER': paasta_cluster,
'spark.executorEnv.PAASTA_INSTANCE_TYPE': 'spark',
'spark.executorEnv.SPARK_EXECUTOR_DIRS': '/tmp',
'spark.kubernetes.pyspark.pythonVersion': '3',
'spark.kubernetes.container.image': docker_img,
'spark.kubernetes.namespace': 'paasta-spark',
'spark.kubernetes.authenticate.caCertFile': f'{K8S_AUTH_FOLDER}/{paasta_cluster}-ca.crt',
'spark.kubernetes.authenticate.clientKeyFile': f'{K8S_AUTH_FOLDER}/{paasta_cluster}-client.key',
'spark.kubernetes.authenticate.clientCertFile': f'{K8S_AUTH_FOLDER}/{paasta_cluster}-client.crt',
'spark.kubernetes.container.image.pullPolicy': 'Always',
'spark.kubernetes.executor.label.yelp.com/paasta_service': paasta_service,
'spark.kubernetes.executor.label.yelp.com/paasta_instance': paasta_instance,
'spark.kubernetes.executor.label.yelp.com/paasta_cluster': paasta_cluster,
'spark.kubernetes.executor.label.paasta.yelp.com/service': paasta_service,
'spark.kubernetes.executor.label.paasta.yelp.com/instance': paasta_instance,
'spark.kubernetes.executor.label.paasta.yelp.com/cluster': paasta_cluster,
'spark.kubernetes.node.selector.yelp.com/pool': paasta_pool,
'spark.kubernetes.executor.label.yelp.com/pool': paasta_pool,
}
for i, volume in enumerate(volumes or []):
volume_name = i
spark_env[f'spark.kubernetes.executor.volumes.hostPath.{volume_name}.mount.path'] = volume['containerPath']
spark_env[f'spark.kubernetes.executor.volumes.hostPath.{volume_name}.mount.readOnly'] = (
'true' if volume['mode'].lower() == 'ro' else 'false'
)
spark_env[f'spark.kubernetes.executor.volumes.hostPath.{volume_name}.options.path'] = volume['hostPath']
return spark_env
def stringify_spark_env(spark_env: Mapping[str, str]) -> str:
return ' '.join([f'--conf {k}={v}' for k, v in spark_env.items()])
def _filter_user_spark_opts(user_spark_opts: Mapping[str, str]) -> MutableMapping[str, str]:
non_configurable_opts = set(user_spark_opts.keys()) & set(NON_CONFIGURABLE_SPARK_OPTS)
if non_configurable_opts:
log.warning(f'The following options are | |
"""
pass
def list_applied_schema_arns(DirectoryArn=None, NextToken=None, MaxResults=None):
"""
Lists schemas applied to a directory.
See also: AWS API Documentation
:example: response = client.list_applied_schema_arns(
DirectoryArn='string',
NextToken='string',
MaxResults=123
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory you are listing.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'SchemaArns': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_attached_indices(DirectoryArn=None, TargetReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Lists indices attached to an object.
See also: AWS API Documentation
:example: response = client.list_attached_indices(
DirectoryArn='string',
TargetReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory.
:type TargetReference: dict
:param TargetReference: [REQUIRED]
A reference to the object to that has indices attached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level to use for this operation.
:rtype: dict
:return: {
'IndexAttachments': [
{
'IndexedAttributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ObjectIdentifier': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def list_development_schema_arns(NextToken=None, MaxResults=None):
"""
Retrieves each Amazon Resource Name (ARN) of schemas in the development state.
See also: AWS API Documentation
:example: response = client.list_development_schema_arns(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'SchemaArns': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_directories(NextToken=None, MaxResults=None, state=None):
"""
Lists directories created within an account.
See also: AWS API Documentation
:example: response = client.list_directories(
NextToken='string',
MaxResults=123,
state='ENABLED'|'DISABLED'|'DELETED'
)
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:type state: string
:param state: The state of the directories in the list. Can be either Enabled, Disabled, or Deleted.
:rtype: dict
:return: {
'Directories': [
{
'Name': 'string',
'DirectoryArn': 'string',
'State': 'ENABLED'|'DISABLED'|'DELETED',
'CreationDateTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
"""
pass
def list_facet_attributes(SchemaArn=None, Name=None, NextToken=None, MaxResults=None):
"""
Retrieves attributes attached to the facet.
See also: AWS API Documentation
:example: response = client.list_facet_attributes(
SchemaArn='string',
Name='string',
NextToken='string',
MaxResults=123
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The ARN of the schema where the facet resides.
:type Name: string
:param Name: [REQUIRED]
The name of the facet whose attributes will be retrieved.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'AttributeDefinition': {
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
}
},
'AttributeReference': {
'TargetFacetName': 'string',
'TargetAttributeName': 'string'
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_facet_names(SchemaArn=None, NextToken=None, MaxResults=None):
"""
Retrieves the names of facets that exist in a schema.
See also: AWS API Documentation
:example: response = client.list_facet_names(
SchemaArn='string',
NextToken='string',
MaxResults=123
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) to retrieve facet names from.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'FacetNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_incoming_typed_links(DirectoryArn=None, ObjectReference=None, FilterAttributeRanges=None, FilterTypedLink=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.list_incoming_typed_links(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
FilterAttributeRanges=[
{
'AttributeName': 'string',
'Range': {
'StartMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'StartValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'EndMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'EndValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
],
FilterTypedLink={
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory where you want to list the typed links.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
Reference that identifies the object whose attributes will be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type FilterAttributeRanges: list
:param FilterAttributeRanges: Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.
(dict) --Identifies the range of attributes that are used by a specified filter.
AttributeName (string) --The unique name of the typed link attribute.
Range (dict) -- [REQUIRED]The range of attribute values that are being selected.
StartMode (string) -- [REQUIRED]The inclusive or exclusive range start.
StartValue (dict) --The value to start the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
EndMode (string) -- [REQUIRED]The inclusive or exclusive range end.
EndValue (dict) --The attribute value to terminate the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:type FilterTypedLink: dict
:param FilterTypedLink: Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
TypedLinkName (string) -- [REQUIRED]The unique | |
#!/usr/bin/env python3
import os
import json
import dateutil.parser
from datetime import *
import requests
import zlib
from lxml import etree
import argparse
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from osmosis_postgis import OsmosisPostgis
from project_postgis import ProjectPostgis
from tag_analyzer import TagAnalyzer
class MapathonChangeCreator(object):
"""
Functionality for finding changes made for the HOT-OSM project during the mapathon.
It utilizes the GeoJSON that specifies the project area and the osc file(s) that contain
the changes for the specific location and day.
"""
def __init__(self):
self.analysis_percentage = 0
self.osmosis_postgis = OsmosisPostgis()
self.project_postgis = ProjectPostgis()
self.tag_analyzer = TagAnalyzer()
def is_inside_any_of_polygons(self, point, polygons):
#print(polygons)
for polygon in polygons:
shapely_point = Point(point['lat'], point['lon'])
shapely_polygon = Polygon(polygon)
# TODO probably would be better to use the intersects function.
is_inside = shapely_polygon.contains(shapely_point)
if is_inside:
return True
return False
def is_inside_polygon(self, point, polygon_points):
# adapted from http://stackoverflow.com/questions/36399381/whats-the-fastest-way-of-checking-if-a-point-is-inside-a-polygon-in-python
shapely_point = Point(point['lat'], point['lon'])
shapely_polygon = Polygon(polygon_points)
is_inside = shapely_polygon.contains(shapely_point)
#print(is_inside)
return is_inside
def create_polygons_from_file(self, project_json_file):
with open(project_json_file, 'r') as data_file:
data = json.load(data_file)
polygons = self.create_polygons_from_feature_collection(data)
return polygons
def create_polygons_from_feature_collection(self, data):
polygons = []
geojson_features = data['features']
for feature in geojson_features:
#print(feature)
print(feature['geometry']['type'])
if feature['geometry']['type'] == 'Polygon':
lines = feature['geometry']['coordinates'][0]
polygon = self.create_polygon(lines)
polygons.append(polygon)
elif feature['geometry']['type'] == 'MultiPolygon':
for subgeom in feature['geometry']['coordinates']:
lines = subgeom[0]
polygon = self.create_polygon(lines)
polygons.append(polygon)
else:
print('unhandled feature geometry type', feature['geometry']['type'])
return polygons
def create_polygon(self, lines):
polygon_string = ""
#print(lines)
for line in lines:
polygon_string += str("%.9f" % round(line[1],9)) + " " + str("%.9f" % round(line[0], 9)) + " "
polygon_string = polygon_string.rstrip(" ")
data = polygon_string.split(' ')
#print(data)
#print(len(data))
coords = []
for i in range(0, len(data), 2):
coords.append((float(data[i]), float(data[i+1])))
#print(coords)
return coords
def calculate_center(self, points):
#print(points)
center_point = {}
lat_sum = 0
lon_sum = 0
for point in points:
lat_sum += point['lat']
lon_sum += point['lon']
center_point['lat'] = lat_sum / len(points)
center_point['lon'] = lon_sum / len(points)
return center_point
def create_feature(self, way):
feature = {}
feature['id'] = way.xpath("string(@id)")
feature['user'] = way.xpath("string(@user)")
feature['uid'] = way.xpath("string(@uid)")
feature_version = int(way.xpath("string(@version)"))
feature['version'] = feature_version
#print(len(way))
nds = way.xpath("nd")
feature_nodes = []
for nd in nds:
feature_node = {}
node_ref = nd.xpath("string(@ref)")
feature_node['id'] = node_ref
nodes = osc_root_element.xpath("//node[@id='%s']" % node_ref)
if len(nodes) == 1: # NOTE: can also be 0
lat = nodes[0].xpath("string(@lat)")
lon = nodes[0].xpath("string(@lon)")
feature_node['lat'] = float(lat)
feature_node['lon'] = float(lon)
feature_nodes.append(feature_node)
if len(feature_nodes) == 0: # do not store a way that does not have any new nodes
self.count_ways_with_no_nodes += 1
return None
else:
center = self.calculate_center(feature_nodes)
if not self.is_inside_any_of_polygons(center, project_polygons):
return None
if feature_version == 1: # store only nodes for created features to save memory & bandwidth
feature["nodes"] = feature_nodes
return feature
def create_mapathon_changes_with_db(self, area_name, project_number, date, min_hour_utz):
buildings = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'building', geomtype='polygon')
residential_areas = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'landuse', ['residential'], geomtype='polygon')
landuse_farmlands = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'landuse', ['farmland'], geomtype='polygon')
landuse_orchards = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'landuse', ['orchard'], geomtype='polygon')
landuse_any_other = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'landuse', None, ['residential', 'farmland', 'orchard'], geomtype='polygon')
highways_path = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['path'])
highways_primary = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['primary'])
highways_residential = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['residential'])
highways_secondary = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['secondary'])
highways_service = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['service'])
highways_tertiary = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['tertiary'])
highways_track = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['track'])
highways_unclassified = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['unclassified'])
highways_road = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['road'])
highways_footway = self.project_postgis.find_changes(self.db_name, date, min_hour_utz, 'highway', ['footway'])
data = {
"building": buildings,
"landuse_residential": residential_areas,
"landuse_farmland": landuse_farmlands,
"landuse_orchard": landuse_orchards,
"landuse_any_other": landuse_any_other,
"highway_path": highways_path,
"highway_primary": highways_primary,
"highway_residential": highways_residential,
"highway_secondary": highways_secondary,
"highway_service": highways_service,
"highway_tertiary": highways_tertiary,
"highway_track": highways_track,
"highway_unclassified": highways_unclassified,
"highway_road": highways_road,
"highway_footway": highways_footway
}
self.tag_analyzer.analyze_tags(area_name, project_number, date, min_hour_utz, data)
return data
def get_analysis_results(self, project_number):
return self.tag_analyzer.get_analysis_results(project_number)
def get_all_tags(self):
return self.tag_analyzer.get_all_tags()
def create_mapathon_changes(self, project_polygons, osc_root_element, date, min_hour_utz):
self.analysis_percentage = 0
ways = osc_root_element.xpath("//way[starts-with(@timestamp, '{0}')]".format(date))
buildings = []
residential_areas = []
landuse_farmlands = []
landuse_orchards = []
landuse_any_other = []
highways_path = []
highways_primary = []
highways_residential = []
highways_secondary = []
highways_service = []
highways_tertiary = []
highways_track = []
highways_unclassified = []
highways_road = []
highways_footway = []
self.count_ways_with_no_nodes = 0
for i, way in enumerate(ways):
percentage = i / len(ways) * 100
print("Done", "%.2f" % round(percentage, 2), "\b%")
self.analysis_percentage = round(percentage, 2)
timestamp = dateutil.parser.parse(way.xpath("string(@timestamp)")) #datetime.datetime object
if timestamp.hour >= int(min_hour_utz):
feature = self.create_feature(way)
if feature is None: # Version of the way > 1 or the way has no nodes
continue
tags = way.xpath("tag")
if(len(tags) > 0):
feature_tags = {}
feature_type = ''
feature_type_value = ''
for tag in tags:
key = tag.xpath("string(@k)")
value = tag.xpath("string(@v)")
feature_tags[key] = value
if key == "building" or key == "landuse" or key == "highway":
feature_type = key
feature_type_value = value
feature['tags'] = feature_tags
if feature_type == "building":
buildings.append(feature)
elif feature_type == "landuse":
if feature_type_value == "residential":
residential_areas.append(feature)
elif feature_type_value == "farmland":
landuse_farmlands.append(feature)
elif feature_type_value == "orchard":
landuse_orchards.append(feature)
else:
landuse_any_other.append(feature)
elif feature_type == "highway":
if feature_type_value == "path":
highways_path.append(feature)
elif feature_type_value == "primary":
highways_primary.append(feature)
elif feature_type_value == "residential":
highways_residential.append(feature)
elif feature_type_value == "secondary":
highways_secondary.append(feature)
elif feature_type_value == "service":
highways_service.append(feature)
elif feature_type_value == "tertiary":
highways_tertiary.append(feature)
elif feature_type_value == "track":
highways_track.append(feature)
elif feature_type_value == "unclassified":
highways_unclassified.append(feature)
elif feature_type_value == "road":
highways_road.append(feature)
elif feature_type_value == "motorway":
highways_road.append(feature)
elif feature_type_value == "trunk":
highways_road.append(feature)
elif feature_type_value == "living_street":
highways_road.append(feature)
elif feature_type_value == "footway":
highways_footway.append(feature)
else:
print(feature_type_value)
print("self.count_ways_with_no_nodes: ", self.count_ways_with_no_nodes)
return {
"building": buildings,
"landuse_residential": residential_areas,
"landuse_farmland": landuse_farmlands,
"landuse_orchard": landuse_orchards,
"landuse_any_other": landuse_any_other,
"highway_path": highways_path,
"highway_primary": highways_primary,
"highway_residential": highways_residential,
"highway_secondary": highways_secondary,
"highway_service": highways_service,
"highway_tertiary": highways_tertiary,
"highway_track": highways_track,
"highway_unclassified": highways_unclassified,
"highway_road": highways_road,
"highway_footway": highways_footway
}
def create_mapathon_changes_from_file(self, project_json_file, osc_file, date, min_hour_utz, output_dir):
project_polygons = self.create_polygons_from_file(project_json_file)
osc_root_element = etree.parse(osc_file).getroot()
results = self.create_mapathon_changes(project_polygons, osc_root_element, date, min_hour_utz)
os.makedirs(output_dir, exist_ok=True)
# print(len(ways))
# print(len(buildings))
with open(output_dir + '/' + 'buildings.json', 'w') as outfile:
json.dump(results['building'], outfile)
# print(len(residential_areas))
# print(json.dumps(residential_areas))
with open(output_dir + '/' + 'residential_areas.json', 'w') as outfile:
json.dump(results['landuse_residential'], outfile)
with open(output_dir + '/' + 'landuse_farmland.json', 'w') as outfile:
json.dump(results['landuse_farmland'], outfile)
with open(output_dir + '/' + 'landuse_orchard.json', 'w') as outfile:
json.dump(results['landuse_orchard'], outfile)
with open(output_dir + '/' + 'landuse_any_other.json', 'w') as outfile:
json.dump(results['landuse_any_other'], outfile)
# print(len(highways_path))
# print(json.dumps(highways_path))
with open(output_dir + '/' + 'highways_path.json', 'w') as outfile:
json.dump(results['highway_path'], outfile)
# print(len(highways_primary))
with open(output_dir + '/' + 'highways_primary.json', 'w') as outfile:
json.dump(results['highway_primary'], outfile)
# print(len(highways_residential))
with open(output_dir + '/' + 'highways_residential.json', 'w') as outfile:
json.dump(results['highway_residential'], outfile)
# print(len(highways_secondary))
with open(output_dir + '/' + 'highways_secondary.json', 'w') as outfile:
json.dump(results['highway_secondary'], outfile)
# print(len(highways_service))
with open(output_dir + '/' + 'highways_service.json', 'w') as outfile:
json.dump(results['highway_service'], outfile)
# print(len(highways_tertiary))
with open(output_dir + '/' + 'highways_tertiary.json', 'w') as outfile:
json.dump(results['highway_tertiary'], outfile)
# print(len(highways_track))
with open(output_dir + '/' + 'highways_track.json', 'w') as outfile:
json.dump(results['highway_track'], outfile)
# print(len(highways_unclassified))
with open(output_dir + '/' + 'highways_unclassified.json', 'w') as outfile:
json.dump(results['highway_unclassified'], outfile)
# print(len(highways_road))
with open(output_dir + '/' + 'highways_road.json', 'w') as outfile:
json.dump(results['highway_road'], outfile)
# print(len(highways_footway))
with open(output_dir + '/' + 'highways_footway.json', 'w') as outfile:
json.dump(results['highway_footway'], outfile)
def create_mapathon_changes_from_URL(self, area_name, project_number, project_polygon_feature_collection, osc_file_download_url, date, min_hour_utz):
# project_polygons is a geojson featurecollection of polygons similarly to the contents of the project_json_file argument
file_name = osc_file_download_url.split(':')[1][2:].replace('download.geofabrik.de/', '').replace('/', '_').replace('-', '_')
output_path = os.path.join(os.getcwd(), 'osc_data', file_name)
if not os.path.isfile(output_path):
try:
#osc_gz_response = requests.get(osc_file_download_url)
osc_gz_response = requests.get(osc_file_download_url, stream=True)
except Exception as e:
print(e)
# TODO handle all possible error conditions
self.save_osc_to_file(output_path, osc_gz_response)
self.insert_data_to_db(file_name, project_polygon_feature_collection, date)
# osc_data = zlib.decompress(osc_gz_response.content, 16 + zlib.MAX_WBITS)
# osc_root_element = etree.fromstring(osc_data)
# project_polygons = self.create_polygons_from_feature_collection(project_polygon_feature_collection)
return self.create_mapathon_changes_with_db(area_name, project_number, date, min_hour_utz)
def insert_data_to_db(self, file_name, project_polygon_feature_collection, date):
self.db_name = file_name.split('.')[0] + '_' + date.replace('-', '_')
ret = self.osmosis_postgis.prepare_db(self.db_name)
if ret == 'created':
ret = self.osmosis_postgis.write_osc_to_pg_using_osmosis(self.db_name, file_name)
if ret == 0:
pass
else:
pass
# TODO
self.project_postgis.write_project_features_to_pg(self.db_name, project_polygon_feature_collection)
def save_osc_to_file(self, output_path, osc_gz_response):
with open(output_path, 'wb') as outfile:
for chunk in osc_gz_response.iter_content(chunk_size=1024):
if chunk:
outfile.write(chunk)
def get_analysis_progress(self):
return self.analysis_percentage
def filter_same_changes(self, mapathon_changes_for_multiple_areas):
# if changes were extracted from more than one area (osc file) then
# the areas (of the osc files) can partially overlap and therefore there is need to look up and filter
# the same changes
# mapathon_changes_for_multiple_areas parameter is an array of dictionaries that | |
<gh_stars>1-10
#!/usr/bin/env python3
"""
Funtional Data Analysis on multifrequency acoustic data.
Adapted from the R package 'fda.oce' (https://github.com/EPauthenet/fda.oce)
to work on multifrequency and depth-varying acoustic data, as explained in
"Ariza et al. (under review). Acoustic seascape partitioning through functional
data analysis".
Copyright (c) 2020 EchoPY
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__authors__ = ['<NAME>' # wrote the code
]
__credits__ = ['<NAME>' # supervised + provided original code
]
import copy
import numpy as np
from skfda.representation import basis, FDataGrid
from skfda.misc.regularization import L2Regularization
from echopy.utils.transform import lin, log
def get_fdo(Sv, r, nb, order=4, f=None, var=None):
"""
Creates a functional data object from single-freqeuncy or multifrequency
Sv data.
Args:
Sv (float): 2D or 3D numpy array with Sv data (dB), with dimensions:
- 1st: range
- 2nd: time
- 3st: frequency (if Sv is a 3D-array)
r (float): 1D numpy array with range data (m).
nb (int): Number of basis for the functional data object.
order (int): Order of basis functions.
f (float): list or 1D-numpy array with frequency data (kHz).
var (dict): Variables included in the functional data object. It
should indicate the frequency and the depth interval of each
variable with the following structure:
- keys (str): variable names
- values (int): 3-integers tuple with the following information:
- 1: frequency (kHz)
- 2: upper range (m)
- 3: lower range (m)
Returns:
object: Functional data object.
Notes:
By default, the function accepts 2D-single-frequency Sv arrays and the
unique variable considered will be that at the frequency provided, and
considering the full depth range provided. If 3D-multifrequency data is
provided and more than one variable is defined at each frequency, the
depth range of variables must be consistent. Further details in Ariza
et al. (under review). Acoustic seascape partitioning through
functional data analysis.
Examples:
# Convert variables 38 kHz from 10 to 200 m, 120 kHz from 10 to 200 m,
# and 38 kHz from 200 to 390 m into a functional data objected defined
# by 20 basis for every variable:
Sv = ... # a 3D-array.
r = ... # 1D-array with length equalt to 1st Sv dimension
f = [38, 120] # 1D-array with length equalt to 3rd Sv dimension
var = {'038kHz_010-200m': ( 38, 10, 200),
'120kHz_010-200m': (120, 10, 200),
'038kHz_200-390m': ( 38, 200, 390)}
fdo = get_fdobj(Sv, r, 20, f=f, var=var)
"""
# Check Sv and convert from 2d to 3d array
if (Sv.ndim!=2)&(Sv.ndim!=3):
raise Exception('\'Sv\' array must have 2 or 3 dimensions')
if Sv.ndim==2:
Sv = Sv[:,:, np.newaxis]
# provide missing inputs
if f is None:
f = [0]
if var is None:
var = {'var1': (0, r[0], r[-1])}
# check rest of inputs
if not isinstance(r, np.ndarray):
raise Exception('\'r\' must be a 1d-numpy array')
if r.ndim!=1:
raise Exception('\'r\' array must have 1 dimension')
if not isinstance(Sv, np.ndarray):
raise Exception('\'Sv\' must be a 2d- or 3d-numpy array')
if Sv.shape[0]!=len(r):
raise Exception('\'r\' and \'Sv\' 1st dimension lengths must be equal')
if not isinstance(nb, int):
raise Exception('\'nb\' must be an integer')
if not isinstance(order, int):
raise Exception('\'order\' must be an integer')
if not (isinstance(f, np.ndarray) | isinstance(f, list)):
raise Exception('if multifrequency Sv is provided, \'f\' must be '
+'a list or an array with length equal to the 3rd '
+'dimension of the Sv array')
if not isinstance(var, dict):
raise Exception('if multifrequency Sv is provided, \'var\' must '
+'be a dictionary with names, frequency, and '
+'depth interval of the variables to be analyzed')
if isinstance(f, np.ndarray):
if f.ndim!=1:
raise Exception('\'f\' array must have 1 dimension')
if isinstance(f, np.ndarray)|isinstance(f, list):
if Sv.shape[-1]!=len(f):
raise Exception('\'f\' & \'Sv\' 3rd dimension lenght must match')
if isinstance(var, dict):
x0 = list(var.values())[0]
for k, v in zip(var.keys(), var.values()):
if not isinstance(v, tuple):
raise Exception('variable values must be a 3-element tuple')
if len(v)!=3:
raise Exception('variable values must be a 3-element tuple')
if v[0] not in f:
raise Exception('%s kHz frequency not available' % v[0])
if x0[2]-x0[1] != v[2]-v[1]:
raise Exception('range extent of variables might be consistent')
if v[1]<r[0]:
raise Exception(('%s m is above the depth range available for '
+'%s kHz, define a different upper depth '
+ 'for variable \'%s\'.') % (v[1], v[0], k))
if v[2]>r[-1]:
raise Exception(('%s m is below the depth range available for '
+'%s kHz, define a different lower depth '
+ 'for variable \'%s\'.') % (v[2], v[0], k))
# interate through variables
for i, key in enumerate(var.keys()):
# get frequency and range indexes to extract variables
fi = var[key][0]
r0 = var[key][1]
r1 = var[key][2]
k = np.where([x==fi for x in f])[0][0]
minpositive = r-r0 *1.0
minpositive[minpositive<0] = np.inf
maxnegative = r-r1 *1.0
maxnegative[maxnegative>0] = -np.inf
i0 = np.argmin(minpositive)
i1 = np.argmax(maxnegative)
# get Sv and range domain for every variable
y = Sv[i0:i1+1, :, k]
x = r [i0:i1+1 ] - r[i0]
x0 = x[ 0]
x1 = x[-1]
# get functional data object for every variable and joind them in
# an unique functional data object
bspline = basis.BSpline(domain_range=(x0, x1), n_basis=nb, order=order)
fdo_i = FDataGrid(y.T, sample_points=x, domain_range=(x0, x1))
fdo_i = fdo_i.to_basis(bspline)
if i==0:
fdo = copy.deepcopy(fdo_i)
fdo.dim_names = [key]
fdo.domain_depth = [(r[i0], r[i1])]
else:
fdo.domain_depth.append((r[i0], r[i1]))
fdo.dim_names.append(key)
fdo.coefficients = np.dstack((fdo.coefficients,fdo_i.coefficients))
return fdo
def get_fpca(fdo):
"""
Performs functional PCA on funtional data object and returns results.
Args:
fdo (object): Functional data object.
Returns:
dict: Functional PCA results, includes the following keys:
- 'C' : Centered coefficients matrix 'C'
- 'Cm' : Mean coefficients 'Cm'
- 'inertia' : Inertia
- 'W' : Function-to-discrete metric equivalence matrix 'W'
- 'M' : Weighting matrix 'M'
- 'values' : PCs values
- 'pval' : Percentage of variance of PCs.
- 'vecnotWM': PCs vectors
- 'vectors' : PCs weighted vectors
- 'axes' : Axes
- 'pc' : PCs projected on the modes
"""
# get number of samples, basis, and dimensions
nsam = fdo.n_samples
nbas = fdo.n_basis
ndim = len(fdo.dim_names)
# if 3d-array coefficients, convert to a 2d-array (n_samples, n_basis)
if ndim>1:
C = np.zeros((nsam, nbas*ndim))
for k in range(ndim):
j0 = nbas * k
j1 = nbas * (k+1)
C[:, j0:j1] = fdo.coefficients[:, :, k]
else:
C = fdo.coefficients
# compute centered coefficients matrix by subtractig the mean
Cm = np.mean(C, axis=0)
Cc = C - Cm[np.newaxis,:]
# get basis penalty matrix
regularization = L2Regularization()
penalty = regularization.penalty_matrix(fdo.basis)
# compute crossed-covariance matrix of C and Inertia
inertia = np.zeros(ndim)
for k in range(ndim):
j0 = nbas * k
j1 = nbas * (k+1)
V = Cc[:, j0:j1].T @ Cc[:, j0:j1] @ penalty / nsam
inertia[k] = np.trace( V )
# compute weighting matrix 'M' to balance variables of different units
M = np.zeros((ndim*nbas,ndim*nbas))
Mdeminv = M.copy()
W = M.copy()
aux = np.diag(np.ones(nbas))
for k in range(ndim):
i0, j0 = nbas * k , nbas * k
i1, j1 | |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A user-friendly wrapper for a Google Cloud Bigtable Backup."""
import re
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import (
BigtableTableAdminClient,
)
from google.cloud.bigtable_admin_v2.types import table_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.exceptions import NotFound
from google.protobuf import field_mask_pb2
_BACKUP_NAME_RE = re.compile(
r"^projects/(?P<project>[^/]+)/"
r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/"
r"clusters/(?P<cluster_id>[a-z][-a-z0-9]*)/"
r"backups/(?P<backup_id>[a-z][a-z0-9_\-]*[a-z0-9])$"
)
_TABLE_NAME_RE = re.compile(
r"^projects/(?P<project>[^/]+)/"
r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/"
r"tables/(?P<table_id>[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
)
class Backup(object):
"""Representation of a Google Cloud Bigtable Backup.
A :class: `Backup` can be used to:
* :meth:`create` the backup
* :meth:`update` the backup
* :meth:`delete` the backup
:type backup_id: str
:param backup_id: The ID of the backup.
:type instance: :class:`~google.cloud.bigtable.instance.Instance`
:param instance: The Instance that owns this Backup.
:type cluster_id: str
:param cluster_id: (Optional) The ID of the Cluster that contains this Backup.
Required for calling 'delete', 'exists' etc. methods.
:type table_id: str
:param table_id: (Optional) The ID of the Table that the Backup is for.
Required if the 'create' method will be called.
:type expire_time: :class:`datetime.datetime`
:param expire_time: (Optional) The expiration time after which the Backup
will be automatically deleted. Required if the `create`
method will be called.
"""
def __init__(
self, backup_id, instance, cluster_id=None, table_id=None, expire_time=None
):
self.backup_id = backup_id
self._instance = instance
self._cluster = cluster_id
self.table_id = table_id
self._expire_time = expire_time
self._parent = None
self._source_table = None
self._start_time = None
self._end_time = None
self._size_bytes = None
self._state = None
@property
def name(self):
"""Backup name used in requests.
The Backup name is of the form
``"projects/../instances/../clusters/../backups/{backup_id}"``
:rtype: str
:returns: The Backup name.
:raises: ValueError: If the 'cluster' has not been set.
"""
if not self._cluster:
raise ValueError('"cluster" parameter must be set')
return BigtableTableAdminClient.backup_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
cluster=self._cluster,
backup=self.backup_id,
)
@property
def cluster(self):
"""The ID of the [parent] cluster used in requests.
:rtype: str
:returns: The ID of the cluster containing the Backup.
"""
return self._cluster
@cluster.setter
def cluster(self, cluster_id):
self._cluster = cluster_id
@property
def parent(self):
"""Name of the parent cluster used in requests.
.. note::
This property will return None if ``cluster`` is not set.
The parent name is of the form
``"projects/{project}/instances/{instance_id}/clusters/{cluster}"``
:rtype: str
:returns: A full path to the parent cluster.
"""
if not self._parent and self._cluster:
self._parent = BigtableTableAdminClient.cluster_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
cluster=self._cluster,
)
return self._parent
@property
def source_table(self):
"""The full name of the Table from which this Backup is created.
.. note::
This property will return None if ``table_id`` is not set.
The table name is of the form
``"projects/../instances/../tables/{source_table}"``
:rtype: str
:returns: The Table name.
"""
if not self._source_table and self.table_id:
self._source_table = BigtableTableAdminClient.table_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
table=self.table_id,
)
return self._source_table
@property
def expire_time(self):
"""Expiration time used in the creation requests.
:rtype: :class:`datetime.datetime`
:returns: A 'datetime' object representing the expiration time of
this Backup.
"""
return self._expire_time
@expire_time.setter
def expire_time(self, new_expire_time):
self._expire_time = new_expire_time
@property
def start_time(self):
"""The time this Backup was started.
:rtype: :class:`datetime.datetime`
:returns: A 'datetime' object representing the time when the creation
of this Backup had started.
"""
return self._start_time
@property
def end_time(self):
"""The time this Backup was finished.
:rtype: :class:`datetime.datetime`
:returns: A 'datetime' object representing the time when the creation
of this Backup was finished.
"""
return self._end_time
@property
def size_bytes(self):
"""The size of this Backup, in bytes.
:rtype: int
:returns: The size of this Backup, in bytes.
"""
return self._size_bytes
@property
def state(self):
"""The current state of this Backup.
:rtype: :class:`~google.cloud.bigtable_admin_v2.gapic.enums.Backup.State`
:returns: The current state of this Backup.
"""
return self._state
@classmethod
def from_pb(cls, backup_pb, instance):
"""Creates a Backup instance from a protobuf message.
:type backup_pb: :class:`table_pb2.Backup`
:param backup_pb: A Backup protobuf object.
:type instance: :class:`Instance <google.cloud.bigtable.instance.Instance>`
:param instance: The Instance that owns the Backup.
:rtype: :class:`~google.cloud.bigtable.backup.Backup`
:returns: The backup parsed from the protobuf response.
:raises: ValueError: If the backup name does not match the expected
format or the parsed project ID does not match the
project ID on the Instance's client, or if the
parsed instance ID does not match the Instance ID.
"""
match = _BACKUP_NAME_RE.match(backup_pb.name)
if match is None:
raise ValueError(
"Backup protobuf name was not in the expected format.", backup_pb.name
)
if match.group("project") != instance._client.project:
raise ValueError(
"Project ID of the Backup does not match the Project ID "
"of the instance's client"
)
instance_id = match.group("instance_id")
if instance_id != instance.instance_id:
raise ValueError(
"Instance ID of the Backup does not match the Instance ID "
"of the instance"
)
backup_id = match.group("backup_id")
cluster_id = match.group("cluster_id")
match = _TABLE_NAME_RE.match(backup_pb.source_table)
table_id = match.group("table_id") if match else None
expire_time = backup_pb.expire_time
backup = cls(
backup_id,
instance,
cluster_id=cluster_id,
table_id=table_id,
expire_time=expire_time,
)
backup._start_time = backup_pb.start_time
backup._end_time = backup_pb.end_time
backup._size_bytes = backup_pb.size_bytes
backup._state = backup_pb.state
return backup
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other.backup_id == self.backup_id and other._instance == self._instance
def __ne__(self, other):
return not self == other
def create(self, cluster_id=None):
"""Creates this backup within its instance.
:type cluster_id: str
:param cluster_id: (Optional) The ID of the Cluster for the newly
created Backup.
:rtype: :class:`~google.api_core.operation.Operation`
:returns: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`
instance, to be used to poll the status of the 'create' request
:raises Conflict: if the Backup already exists
:raises NotFound: if the Instance owning the Backup does not exist
:raises BadRequest: if the `table` or `expire_time` values are invalid,
or `expire_time` is not set
"""
if not self._expire_time:
raise ValueError('"expire_time" parameter must be set')
# TODO: Consider implementing a method that sets a default value of
# `expire_time`, e.g. 1 week from the creation of the Backup.
if not self.table_id:
raise ValueError('"table" parameter must be set')
if cluster_id:
self._cluster = cluster_id
if not self._cluster:
raise ValueError('"cluster" parameter must be set')
backup = table_pb2.Backup(
source_table=self.source_table,
expire_time=_datetime_to_pb_timestamp(self.expire_time),
)
api = self._instance._client.table_admin_client
return api.create_backup(self.parent, self.backup_id, backup)
def get(self):
"""Retrieves metadata of a pending or completed Backup.
:returns: An instance of
:class:`~google.cloud.bigtable_admin_v2.types.Backup`
:raises google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
:raises google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
:raises ValueError: If the parameters are invalid.
"""
api = self._instance._client.table_admin_client
try:
return api.get_backup(self.name)
except NotFound:
return None
def reload(self):
"""Refreshes the stored backup properties."""
backup = self.get()
self._source_table = backup.source_table
self._expire_time = backup.expire_time
self._start_time = backup.start_time
self._end_time = backup.end_time
self._size_bytes = backup.size_bytes
self._state = backup.state
def exists(self):
"""Tests whether this Backup exists.
:rtype: bool
:returns: True if the Backup exists, else False.
"""
return self.get() is not None
def update_expire_time(self, new_expire_time):
"""Update the expire time of this Backup.
:type new_expire_time: :class:`datetime.datetime`
:param new_expire_time: the new expiration time timestamp
"""
backup_update = table_pb2.Backup(
name=self.name,
expire_time=_datetime_to_pb_timestamp(new_expire_time),
)
update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
api = self._instance._client.table_admin_client
api.update_backup(backup_update, update_mask)
self._expire_time = new_expire_time
def delete(self):
"""Delete this Backup."""
self._instance._client.table_admin_client.delete_backup(self.name)
def restore(self, table_id):
"""Creates a new Table by restoring from this Backup. The new Table
must be in the same Instance as the Instance containing the Backup.
The returned Table ``long-running operation`` can be used to track the
progress of the operation and to cancel it. The ``response`` type is
``Table``, if successful.
:param table_id: The ID of the Table to create and restore to.
This Table must not already exist.
:returns: An instance of
:class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`.
:raises: google.api_core.exceptions.AlreadyExists: If the table
already exists.
:raises: google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
:raises: google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
:raises: ValueError: If the parameters are invalid.
"""
api = self._instance._client.table_admin_client
return api.restore_table(self._instance.name, table_id, self.name)
def get_iam_policy(self):
"""Gets the IAM access control policy for this backup.
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this backup.
"""
table_api = self._instance._client.table_admin_client
args = {"resource": self.name}
response = table_api.get_iam_policy(**args)
| |
<reponame>Laogeodritt/KazTron
import asyncio
import logging
from asyncio import Event
from collections.abc import Hashable
from datetime import datetime, timedelta
from typing import Callable, Union, Dict, Awaitable, Any, List, Sequence, Mapping, Optional
import discord
from discord.ext import commands
from kaztron.utils.asyncio import datetime2loop
logger = logging.getLogger(__name__)
TaskFunction = Union[
Callable[[], Awaitable[None]],
Callable[[Any], Awaitable[None]]] #: async def name() -> None
class Task(Hashable):
"""
Class that implements scheduled tasks. This should not be instantiated directly, but using the
:func:`~.task` decorator.
:param callback: A callback coroutine.
:param is_unique: If True, this task can only be scheduled once at a time (includes recurring).
If False, this task can be scheduled to run multiple times in the future.
"""
def __init__(self, callback: TaskFunction, is_unique=True):
if not asyncio.iscoroutinefunction(callback):
raise discord.ClientException("Task callback must be a coroutine.")
self.callback = callback
self.is_unique = is_unique
self.instance = None # instance the last time this Task was accessed as a descriptor
self.on_error = None # type: Callable[[Exception, TaskInstance], Awaitable[None]]
self.on_cancel = None # type: Callable[[TaskInstance], Awaitable[None]]
def __get__(self, instance, owner):
if instance:
self.instance = instance
return self
def run(self, instance=None, *args, **kwargs):
# coroutine - returns the un-awaited coroutine object
return self.callback(instance, *args, **kwargs) if instance else \
self.callback(*args, **kwargs)
def error(self, coro: Callable[[Exception, 'TaskInstance'], Awaitable[None]]):
"""
Decorator. Sets a coroutine as a local error handler for this task. This handler will be
called for any exception raised by the task.
:param coro: Coroutine to handle errors, signature func(exception, task_inst) -> None.
:raise discord.ClientException: Argument is not a coroutine
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException("Error handler must be a coroutine.")
self.on_error = coro
return coro
def cancel(self, coro: Callable[['TaskInstance'], Awaitable[None]]):
"""
Decorator. Sets a coroutine as a cancellation handler. This handler is called whenever the
task is cancelled prior to or while running.
:param coro: Coroutine to handle cancellation. Takes no parameters.
:raise discord.ClientException: Argument is not a coroutine
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException("Error handler must be a coroutine.")
self.on_cancel = coro
return coro
def __str__(self):
return repr(self)
def __repr__(self):
return self.callback.__qualname__
def __hash__(self):
return hash((self.callback, self.is_unique))
def __eq__(self, other):
return self.callback == other.callback and self.is_unique == other.is_unique
# noinspection PyShadowingNames
class TaskInstance:
def __init__(self,
scheduler: 'Scheduler', task: Task, timestamp: float,
instance: Any, args: Sequence[Any], kwargs: Mapping[str, Any]):
self.scheduler = scheduler
self.task = task
self.instance = instance
self.timestamp = timestamp
self.async_task = None
self.stopped_event = Event()
self.args = tuple(args) if args else ()
self.kwargs = dict(kwargs.items()) if kwargs else {}
def cancel(self):
self.scheduler.cancel_task(self)
def __await__(self):
return self.async_task.__await__()
def is_current(self):
""" Return True if called from within this task. """
return self.async_task is asyncio.Task.current_task()
def is_active(self):
"""
Returns True if this task instance is currently active or running. Returns False is the
task completed or stopped.
"""
return not self.stopped_event.is_set()
async def wait(self):
""" Wait for the task to complete (or stop for any reason, e.g. cancel or error). """
if self.is_current():
raise RuntimeError("Cannot wait on task from within the same task: deadlock.")
await self.stopped_event.wait()
# noinspection PyBroadException
async def run(self):
task_id = '{!s}@{:.2f}'.format(self.task, self.timestamp)
# noinspection PyBroadException
try:
if self.instance:
return await self.task.run(self.instance, *self.args, **self.kwargs)
else:
return await self.task.run(*self.args, **self.kwargs)
except (asyncio.CancelledError, KeyboardInterrupt, SystemExit):
raise
except Exception as e:
try:
await self.on_error(e)
except Exception:
logger.exception("Error in Task {!s} while handling error.".format(task_id))
await self.scheduler.bot.on_error('scheduled_task', self.task, self.timestamp)
async def on_error(self, e: Exception):
if self.task.on_error:
if self.instance:
await self.task.on_error(self.instance, e, self)
else:
await self.task.on_error(e, self)
else:
logger.debug("Task {!s} has no error handler".format(self.task))
async def on_cancel(self):
if self.task.on_cancel:
if self.instance:
await self.task.on_cancel(self.instance, self)
else:
await self.task.on_cancel(self)
else:
logger.debug("Task {!s} has no cancellation handler".format(self.task))
def __str__(self):
return str(self.task) + "@{:.2f}".format(self.timestamp)
def task(is_unique=True):
"""
Decorator for a task that can be scheduled. Generally to be used similarly to
``discord.Command`` on async functions.
:param is_unique: If True, this task can only be scheduled once at a time (includes
recurring). If False, this task can be scheduled to run multiple times in the future.
"""
def decorator(func):
if isinstance(func, Task):
raise TypeError("Callback is already a schedulable task.")
elif isinstance(func, commands.Command):
func = func.callback
return Task(callback=func, is_unique=is_unique)
return decorator
# noinspection PyShadowingNames
class Scheduler:
"""
Allows scheduling coroutines for execution at a future point in time, either once or on a
recurring basis. Use the :func:`~.task` decorator on functions or cog methods to mark it as a
task for scheduling, and then use this class's methods to schedule it for future execution.
Tasks allow defining error and cancellation handlers, similar to discord Commands: for instance,
.. code-block:: py
class MyCog(KazCog):
# ... other commands etc. here ...
@scheduler.task(is_unique=False)
async def my_task(self):
pass # task code here
@my_task.error
async def my_task_error_handler(self, exc: Exception, t: TaskInstance):
pass # handle exception here
@my_task.cancel
async def my_task_cancellation_handler(self, t: TaskInstance):
pass # handle cancellation here: cleanup, etc.
Both handlers take a TaskInstance parameter, from which you can obtain the task object,
timestamp, and any args and kwargs passed to the task function on execution of the task.
Furthermore, any errors will call the discord Client's `on_error` event, with
event_type = ``'scheduled_task'`` and two arguments corresponding to the TaskInstance tuple
(i.e. Task object, timestamp/id as a float). This is called even if a local error handler is
defined as per above.
A task can be called via a scheduler instance (normally available via ``KazCog.scheduler``),
e.g.:
.. code-block:: py
self.scheduler.schedule_task_in(self.my_task, 300) # scheduled in 5 minutes (300s)
"""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.tasks = {} # type: Dict[Task, Dict[float, TaskInstance]]
@property
def loop(self):
return self.bot.loop
def _add_task(self,
task: Task, at_loop_time: float,
args: Sequence[Any], kwargs: Mapping[str, Any],
every: float=None, times: float=None) -> TaskInstance:
# validate
if not isinstance(task, Task):
raise ValueError("Scheduled tasks must be decorated with scheduler.task")
if task.is_unique:
# if already have task, and not rescheduling from within the same task
existing_tasks = self.get_instances(task)
is_resched = len(existing_tasks) == 1 and existing_tasks[0].is_current()
if existing_tasks and not is_resched:
raise asyncio.InvalidStateError(
'Task {} is set unique and already exists'.format(task)
)
# set up task
task_inst = TaskInstance(self, task, at_loop_time, task.instance, args, kwargs)
task_inst.async_task = self.loop.create_task(
self._runner(task_inst, at_loop_time, every, times)
)
try:
self.tasks[task][at_loop_time] = task_inst
except KeyError:
self.tasks[task] = {at_loop_time: task_inst}
logger.debug("Task added: {!s}, {:.2f} (now={:.2f})"
.format(task, at_loop_time, self.loop.time()))
return task_inst
def _del_task(self, task_inst: TaskInstance):
try:
del self.tasks[task_inst.task][task_inst.timestamp]
if not self.tasks[task_inst.task]:
del self.tasks[task_inst.task] # avoids leaking memory on a transient task object
except KeyError:
logger.warning("Could not delete task - race condition? {} {}".format(
task_inst.task.__name__, task_inst.timestamp
))
def schedule_task_at(self, task: Task, dt: datetime,
*, args: Sequence[Any]=(), kwargs: Mapping[str, Any]=None,
every: Union[float, timedelta]=None, times: int=None) -> TaskInstance:
"""
Schedule a task to run at a given time.
:param task: The task to run (a coroutine decorated with :meth:`scheduler.task`)
:param dt: When to run the task.
:param args: Positional args to pass to the task, as a sequence (list/tuple) of values. If
the task is a method/descriptor, do NOT include the ``self`` argument's value here.
:param kwargs: Keyword args to pass to the task, as a mapping (dict or similar).
:param every: How often to repeat the task, in seconds or as a timedelta. Optional.
:param times: How many times to repeat the command. If ``every`` is set but ``times`` is
not, the task is repeated forever.
:return: A TaskInstance, which can be used to later cancel this task.
"""
if not kwargs:
kwargs = {}
if every:
try:
every = every.total_seconds()
except AttributeError:
every = float(every)
logger.info("Scheduling task {!s} at {}, recurring every {:.2f}s for {} times"
.format(task, dt.isoformat(' '), every, str(times) if times else 'infinite'))
else:
logger.info("Scheduling task {!s} at {}".format(task, dt.isoformat(' ')))
return self._add_task(task, datetime2loop(dt, self.loop), args, kwargs, every, times)
def schedule_task_in(self, task: Task, in_time: Union[float, timedelta],
*, args: Sequence[Any]=(), kwargs: Mapping[str, Any]=None,
every: Union[float, timedelta]=None, times: int=None) -> TaskInstance:
"""
Schedule a task to run in a certain amount of time. By default, will run the task only once;
if ``every`` is specified, runs the task recurrently up to ``times`` times.
:param task: The task to run (a coroutine decorated with :meth:`scheduler.task`).
:param in_time: In how | |
<gh_stars>0
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright <NAME> 2014 <EMAIL> |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Painters used in list of services views
service_view_painters = [
('service_state', None),
# ('service_type_icon', None),
('service_description', 'service'),
('service_icons', None),
('svc_plugin_output', None),
('svc_state_age', None),
('svc_check_age', None),
('perfometer', None),
]
# Same as list of services, but extended by the hostname
host_service_view_painters = service_view_painters[:]
host_service_view_painters.insert(1, ('host', 'host'))
host_view_painters = [
('host_state', None),
# ('host_type_icon', None),
('host', 'host', 'host_addresses'),
('host_icons', None),
('num_services_ok', 'host_ok'),
('num_services_warn', 'host_warn'),
('num_services_unknown', 'host_unknown'),
('num_services_crit', 'host_crit'),
('num_services_pending', 'host_pending'),
]
multisite_builtin_views.update({
'allhosts': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'hosts',
'description': _('Overall state of all hosts, with counts of services in the various states.'),
'group_painters': [('sitealias', None)],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'allhosts',
'num_columns': 3,
'owner': '',
'painters': host_view_painters,
'play_sounds': False,
'public': True,
'show_filters': ['host_scheduled_downtime_depth',
'host_in_notification_period',
'host_in_service_period',
'hoststate',
'siteopt',
'host_acknowledged',
'hostregex',
'host_notifications_enabled',
'hostgroups',
'opthostgroup',
'host_check_command',
'opthost_contactgroup',
'hostalias',
'host_tags',
],
'sorters': [('site', False), ('site_host', False)],
'title': _('All hosts'),
'topic': _('Hosts')},
'starred_hosts': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'hosts',
'description': _('Overall state of your favorite hosts'),
'group_painters': [('sitealias', None)],
'hard_filters': [],
'hard_filtervars': [('is_host_favorites', '1')],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'allhosts',
'num_columns': 3,
'owner': '',
'painters': host_view_painters,
'play_sounds': False,
'public': True,
'show_filters': ['host_scheduled_downtime_depth',
'host_in_notification_period',
'hoststate',
'siteopt',
'host_acknowledged',
'hostregex',
'host_notifications_enabled',
'hostgroups',
'opthostgroup',
'host_check_command',
'opthost_contactgroup',
'hostalias',
'host_favorites'],
'sorters': [('site', False), ('site_host', False)],
'title': _('Favorite hosts'),
'topic': _('Hosts')},
'allhosts_mini': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'hosts',
'description': _('Showing all hosts in a compact layout.'),
'group_painters': [('sitealias', None)],
'hard_filters': [],
'hard_filtervars': [('site', ''),
('host', ''),
('opthostgroup', '')],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'allhosts_mini',
'num_columns': 6,
'owner': '',
'painters': [('host_state', None),
('host', 'host'),
('num_problems', 'problemsofhost')],
'play_sounds': False,
'public': True,
'show_filters': ['host_scheduled_downtime_depth',
'host_in_notification_period',
'hoststate',
'siteopt',
'host_acknowledged',
'hostregex',
'host_notifications_enabled',
'opthostgroup',
'host_check_command',
'opthost_contactgroup'],
'sorters': [('site', False), ('site_host', False)],
'title': _('All hosts (Mini)'),
'topic': _('Hosts')},
'allservices': {'browser_reload': 90,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('All services grouped by hosts.'),
'group_painters': [('sitealias', 'sitehosts'),
('host_with_state', 'host')],
'hard_filters': [],
'hard_filtervars': [('is_service_in_notification_period', '-1'),
('optservicegroup', ''),
('is_service_notifications_enabled', '-1'),
('is_host_in_notification_period', '-1'),
('is_in_downtime', '-1'),
('is_service_scheduled_downtime_depth', '-1'),
('is_service_acknowledged', '-1'),
('host', ''),
('is_service_active_checks_enabled', '-1'),
('service', ''),
('check_command', ''),
('opthostgroup', ''),
('service_output', ''),
('is_service_is_flapping', '-1')],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'allservices',
'num_columns': 1,
'owner': '',
'painters': service_view_painters,
'play_sounds': False,
'public': True,
'show_filters': ['service_in_notification_period',
'service_in_service_period',
'optservicegroup',
'service_notifications_enabled',
'host_in_notification_period',
'in_downtime',
'service_scheduled_downtime_depth',
'service_acknowledged',
'hostregex',
'service_active_checks_enabled',
'serviceregex',
'check_command',
'svcstate',
'opthostgroup',
'output',
'service_is_flapping',
'siteopt'],
'sorters': [('site', False),
('site_host', False),
('svcdescr', False)],
'title': _('All services'),
'topic': _('Services')},
'starred_services': {'browser_reload': 90,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('All of your favorites services by hosts.'),
'group_painters': [('sitealias', 'sitehosts'),
('host_with_state', 'host')],
'hard_filters': [],
'hard_filtervars': [('is_service_in_notification_period',
'-1'),
('optservicegroup', ''),
('is_service_notifications_enabled',
'-1'),
('is_host_in_notification_period',
'-1'),
('is_in_downtime', '-1'),
('is_service_scheduled_downtime_depth',
'-1'),
('is_service_acknowledged', '-1'),
('host', ''),
('is_service_active_checks_enabled',
'-1'),
('service', ''),
('check_command', ''),
('opthostgroup', ''),
('service_output', ''),
('is_service_is_flapping', '-1'),
('is_service_favorites', '1')],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'starred_services',
'num_columns': 1,
'owner': '',
'painters': service_view_painters,
'play_sounds': False,
'public': True,
'show_filters': ['service_in_notification_period',
'optservicegroup',
'service_notifications_enabled',
'host_in_notification_period',
'in_downtime',
'service_scheduled_downtime_depth',
'service_acknowledged',
'hostregex',
'service_active_checks_enabled',
'serviceregex',
'check_command',
'svcstate',
'opthostgroup',
'output',
'service_is_flapping',
'siteopt',
'host_favorites',
'service_favorites'],
'sorters': [('site', False),
('site_host', False),
('svcdescr', False)],
'title': _('Favorite services'),
'topic': _('Services')},
'comments': {'column_headers': 'pergroup',
'datasource': 'comments',
'description': _('All host- and service comments'),
'group_painters': [('comment_what', None)],
'hard_filters': [],
'hard_filtervars': [('host', ''), ('service', '')],
'hidden': False,
'hide_filters': [],
'icon' : 'comment',
'layout': 'table',
'mustsearch': False,
'name': 'comments',
'num_columns': 1,
'owner': '',
'painters': [('comment_author', None),
('comment_time', None),
('comment_expires', None),
('comment_entry_type', None),
('comment_comment', None),
('host', None),
('service_description', 'service'),
('comment_id', None)],
'public': True,
'show_filters': ['hostregex', 'comment_entry_time', 'serviceregex'],
'sorters': [('comment_type', False), ('comment_author', False)],
'title': _('Comments')
},
'comments_of_host': {'column_headers': 'pergroup',
'datasource': 'comments',
'description': _('Linkable view showing all comments of a specific host'),
'group_painters': [],
'hard_filters': ['service'],
'hard_filtervars': [('service', '')],
'hidden': True,
'hide_filters': ['siteopt', 'host'],
'icon' : 'comment',
'layout': 'table',
'mustsearch': False,
'name': 'comments_of_host',
'num_columns': 1,
'owner': '',
'painters': [('comment_author', None),
('comment_comment', None),
('comment_time', None),
('comment_expires', None),
('comment_entry_type', None)],
'public': True,
'show_filters': [],
'sorters': [],
'linktitle': _('Host comments'),
'title': _('Comments of host'),
},
'comments_of_service': {'column_headers': 'pergroup',
'datasource': 'comments',
'description': _('Linkable view showing all comments of a specific service'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['siteopt', 'host', 'service'],
'icon' : 'comment',
'layout': 'table',
'mustsearch': False,
'name': 'comments_of_service',
'num_columns': 1,
'owner': '',
'painters': [('comment_author', None),
('comment_comment', None),
('comment_time', None),
('comment_expires', None),
('comment_entry_type', None)],
'public': True,
'show_filters': [],
'sorters': [],
'linktitle': _('Comments'),
'title': _('Comments of service'),
},
'downtimes': {'column_headers': 'pergroup',
'datasource': 'downtimes',
'description': _('All host- and service-downtimes'),
'group_painters': [('downtime_what', None)],
'hard_filters': [],
'hard_filtervars': [('is_service_scheduled_downtime_depth',
'-1'),
('host', ''),
('service', '')],
'hidden': False,
'hide_filters': [],
'icon' : 'downtime',
'layout': 'table',
'mustsearch': False,
'name': 'downtimes',
'num_columns': 1,
'owner': '',
'painters': [('host', 'host'),
('service_description', 'service'),
('downtime_origin', None),
('downtime_author', None),
('downtime_entry_time', None),
('downtime_start_time', None),
('downtime_end_time', None),
('downtime_fixed', None),
('downtime_duration', None),
('downtime_recurring', None),
('downtime_comment', None)],
'public': True,
'show_filters': ['service_scheduled_downtime_depth',
'hostregex',
'downtime_entry_time',
'serviceregex'],
'sorters': [('downtime_what', False),
('downtime_start_time', False)],
'title': _('Downtimes')},
'downtime_history': {'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'log_events',
'description': _('All historic scheduled downtimes of hosts and services'),
'group_painters': [('log_what', None)],
'hard_filters': [ 'log_type' ],
'hard_filtervars': [('logtime_from_range', '86400'),
('logtime_from', '60'),
('log_type', 'DOWNTIME ALERT'),
],
'hidden': False,
'hide_filters': [],
'icon' : 'downtime',
'layout': 'table',
'linktitle': _('Host Dt-History'),
'mustsearch': False,
'num_columns': 1,
'painters': [('log_icon', None),
('log_time', None),
('host', 'host_dt_hist'),
('service_description', 'svc_dt_hist'),
('log_state_type', None),
('log_plugin_output', None),
],
'play_sounds': False,
'public': True,
'show_filters': ['logtime', 'hostregex', 'serviceregex', 'log_state_type' ],
'sorters': [('log_what', True), ('log_time', True), ('log_lineno', True), ],
'title': _('History of scheduled downtimes'),
'topic': _('Other'),
},
'api_downtimes': {'column_headers': 'pergroup',
'datasource': 'downtimes',
'description': _('All host- and service-downtimes (including ids)'),
'group_painters': [('downtime_what', None)],
'hard_filters': [],
'hard_filtervars': [('is_service_scheduled_downtime_depth',
'-1'),
('host', ''),
('service', '')],
'hidden': True,
'hide_filters': [],
'icon' : 'downtime',
'layout': 'table',
'mustsearch': False,
'name': 'downtimes',
'num_columns': 1,
'owner': '',
'painters': [
('host', 'host'),
('service_description', 'service'),
('downtime_origin', None),
('downtime_author', None),
('downtime_entry_time', None),
('downtime_start_time', None),
('downtime_end_time', None),
('downtime_fixed', None),
('downtime_duration', None),
('downtime_recurring', None),
('downtime_comment', None),
('downtime_id', None),
],
'public': True,
'show_filters': [
'service_scheduled_downtime_depth',
'hostregex',
'serviceregex',
'downtime_id',
],
'sorters': [('downtime_what', False),
('downtime_start_time', False)],
'title': _('Downtimes')},
'downtimes_of_host': {'column_headers': 'pergroup',
'datasource': 'downtimes',
'description': _('Lists all host downtimes.'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['siteopt', 'host'],
'icon' : 'downtime',
'layout': 'table',
'mustsearch': False,
'name': 'downtimes_of_host',
'num_columns': 1,
'owner': '',
'painters': [
('downtime_origin', None),
('downtime_author', None),
('downtime_entry_time', None),
('downtime_start_time', None),
('downtime_end_time', None),
('downtime_fixed', None),
('downtime_duration', None),
('downtime_recurring', None),
('downtime_comment', None)],
'public': True,
'show_filters': [],
'sorters': [],
'linktitle': _('Host downtimes'),
'title': _('Downtimes of host')},
'downtimes_of_service': {'column_headers': 'pergroup',
'datasource': 'downtimes',
'description': _('Lists all downtimes for services.'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['siteopt', 'service', 'host'],
'icon' : 'downtime',
'layout': 'table',
'mustsearch': False,
'name': 'downtimes_of_service',
'num_columns': 1,
'owner': '',
'painters': [
('downtime_origin', None),
('downtime_author', None),
('downtime_entry_time', None),
('downtime_start_time', None),
('downtime_end_time', None),
('downtime_fixed', None),
('downtime_duration', None),
('downtime_recurring', None),
('downtime_comment', None)],
'public': True,
'show_filters': [],
'sorters': [],
'linktitle': _('Downtimes'),
'title': _('Downtimes of service')},
'host': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('All services of a given host. The host and site must be set via HTML variables.'),
'group_painters': [('host_with_state', | |
`tensor` with shape `shape`.
If one component of 1-D tensor `shape` is the special value -1, the size of that
dimension is computed so that the total size remains constant. In particular, a
`shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be
unknown.
The `shape` must be 1-D and the operation returns a tensor with shape
`shape` filled with the values of `tensor`. In this case, the number of elements
implied by `shape` must be the same as the number of elements in `tensor`.
It is an error if `shape` is not 1-D.
For example:
```
# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
# tensor 't' is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor 't' is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
# -1 can also be used to infer the shape
# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]]
# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
```
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Reshape", name,
tld.op_callbacks, tensor, shape)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return reshape_eager_fallback(
tensor, shape, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Reshape", tensor=tensor, shape=shape, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tshape",
_op._get_attr_type("Tshape"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Reshape", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Reshape = tf_export("raw_ops.Reshape")(_ops.to_raw_op(reshape))
def reshape_eager_fallback(tensor, shape, name, ctx):
_attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx)
_attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], ctx, _dtypes.int32)
_inputs_flat = [tensor, shape]
_attrs = ("T", _attr_T, "Tshape", _attr_Tshape)
_result = _execute.execute(b"Reshape", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Reshape", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def resource_strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):
r"""Assign `value` to the sliced l-value reference of `ref`.
The values of `value` are assigned to the positions in the variable
`ref` that are selected by the slice parameters. The slice parameters
`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
NOTE this op currently does not support broadcasting and so `value`'s
shape must be exactly the shape produced by the slice of `ref`.
Args:
ref: A `Tensor` of type `resource`.
begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
end: A `Tensor`. Must have the same type as `begin`.
strides: A `Tensor`. Must have the same type as `begin`.
value: A `Tensor`.
begin_mask: An optional `int`. Defaults to `0`.
end_mask: An optional `int`. Defaults to `0`.
ellipsis_mask: An optional `int`. Defaults to `0`.
new_axis_mask: An optional `int`. Defaults to `0`.
shrink_axis_mask: An optional `int`. Defaults to `0`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "ResourceStridedSliceAssign",
name, tld.op_callbacks, ref, begin, end, strides, value, "begin_mask",
begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask,
"new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return resource_strided_slice_assign_eager_fallback(
ref, begin, end, strides, value, begin_mask=begin_mask,
end_mask=end_mask, ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if begin_mask is None:
begin_mask = 0
begin_mask = _execute.make_int(begin_mask, "begin_mask")
if end_mask is None:
end_mask = 0
end_mask = _execute.make_int(end_mask, "end_mask")
if ellipsis_mask is None:
ellipsis_mask = 0
ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
if new_axis_mask is None:
new_axis_mask = 0
new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
if shrink_axis_mask is None:
shrink_axis_mask = 0
shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ResourceStridedSliceAssign", ref=ref, begin=begin, end=end,
strides=strides, value=value,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
name=name)
return _op
ResourceStridedSliceAssign = tf_export("raw_ops.ResourceStridedSliceAssign")(_ops.to_raw_op(resource_strided_slice_assign))
def resource_strided_slice_assign_eager_fallback(ref, begin, end, strides, value, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, name, ctx):
if begin_mask is None:
begin_mask = 0
begin_mask = _execute.make_int(begin_mask, "begin_mask")
if end_mask is None:
end_mask = 0
end_mask = _execute.make_int(end_mask, "end_mask")
if ellipsis_mask is None:
ellipsis_mask = 0
ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
if new_axis_mask is None:
new_axis_mask = 0
new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
if shrink_axis_mask is None:
shrink_axis_mask = 0
shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
_attr_T, (value,) = _execute.args_to_matching_eager([value], ctx)
_attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], ctx)
(begin, end, strides) = _inputs_Index
ref = _ops.convert_to_tensor(ref, _dtypes.resource)
_inputs_flat = [ref, begin, end, strides, value]
_attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask,
"end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
_result = _execute.execute(b"ResourceStridedSliceAssign", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
_result = None
return _result
def reverse(tensor, dims, name=None):
r"""Reverses specific dimensions of a tensor.
Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
of `tensor`, this operation reverses each dimension i of `tensor` where
`dims[i]` is `True`.
`tensor` can have up to 8 dimensions. The number of dimensions
of `tensor` must equal the number of elements in `dims`. In other words:
`rank(tensor) = size(dims)`
For example:
```
# tensor 't' is [[[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]],
# [[12, 13, 14, 15],
# [16, 17, 18, 19],
# [20, 21, 22, 23]]]]
# tensor 't' shape is [1, 2, 3, 4]
# 'dims' is [False, False, False, True]
reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4],
[ 11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]]]
# 'dims' is [False, True, False, False]
reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]]]
# 'dims' is [False, False, True, False]
reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]]]
```
Args:
tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.
Up to 8-D.
dims: A `Tensor` of type `bool`. 1-D. The dimensions to reverse.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Reverse", name,
tld.op_callbacks, tensor, dims)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return reverse_eager_fallback(
tensor, dims, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add | |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.privatedns.v20201028 import models
class PrivatednsClient(AbstractClient):
_apiVersion = '2020-10-28'
_endpoint = 'privatedns.tencentcloudapi.com'
_service = 'privatedns'
def CreatePrivateDNSAccount(self, request):
"""创建私有域解析账号
:param request: Request instance for CreatePrivateDNSAccount.
:type request: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateDNSAccountRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateDNSAccountResponse`
"""
try:
params = request._serialize()
body = self.call("CreatePrivateDNSAccount", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreatePrivateDNSAccountResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreatePrivateZone(self, request):
"""创建私有域
:param request: Request instance for CreatePrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneResponse`
"""
try:
params = request._serialize()
body = self.call("CreatePrivateZone", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreatePrivateZoneResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreatePrivateZoneRecord(self, request):
"""添加私有域解析记录
:param request: Request instance for CreatePrivateZoneRecord.
:type request: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneRecordRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.CreatePrivateZoneRecordResponse`
"""
try:
params = request._serialize()
body = self.call("CreatePrivateZoneRecord", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreatePrivateZoneRecordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeletePrivateDNSAccount(self, request):
"""删除私有域解析账号
:param request: Request instance for DeletePrivateDNSAccount.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateDNSAccountRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateDNSAccountResponse`
"""
try:
params = request._serialize()
body = self.call("DeletePrivateDNSAccount", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeletePrivateDNSAccountResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeletePrivateZone(self, request):
"""删除私有域并停止解析
:param request: Request instance for DeletePrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneResponse`
"""
try:
params = request._serialize()
body = self.call("DeletePrivateZone", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeletePrivateZoneResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeletePrivateZoneRecord(self, request):
"""删除私有域解析记录
:param request: Request instance for DeletePrivateZoneRecord.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneRecordRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DeletePrivateZoneRecordResponse`
"""
try:
params = request._serialize()
body = self.call("DeletePrivateZoneRecord", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeletePrivateZoneRecordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeAccountVpcList(self, request):
"""获取私有域解析账号的VPC列表
:param request: Request instance for DescribeAccountVpcList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeAccountVpcListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeAccountVpcListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeAccountVpcList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeAccountVpcListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeAuditLog(self, request):
"""获取操作日志列表
:param request: Request instance for DescribeAuditLog.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeAuditLogRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeAuditLogResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeAuditLog", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeAuditLogResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDashboard(self, request):
"""获取私有域解析概览
:param request: Request instance for DescribeDashboard.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeDashboardRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeDashboardResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDashboard", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDashboardResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePrivateDNSAccountList(self, request):
"""获取私有域解析账号列表
:param request: Request instance for DescribePrivateDNSAccountList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateDNSAccountListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateDNSAccountListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePrivateDNSAccountList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePrivateDNSAccountListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePrivateZone(self, request):
"""获取私有域信息
:param request: Request instance for DescribePrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePrivateZone", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePrivateZoneResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePrivateZoneList(self, request):
"""获取私有域列表
:param request: Request instance for DescribePrivateZoneList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePrivateZoneList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePrivateZoneListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePrivateZoneRecordList(self, request):
"""获取私有域记录列表
:param request: Request instance for DescribePrivateZoneRecordList.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneRecordListRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneRecordListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePrivateZoneRecordList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePrivateZoneRecordListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePrivateZoneService(self, request):
"""查询私有域解析开通状态
:param request: Request instance for DescribePrivateZoneService.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneServiceRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribePrivateZoneServiceResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePrivateZoneService", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePrivateZoneServiceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeQuotaUsage(self, request):
"""查询额度使用情况
:param request: Request instance for DescribeQuotaUsage.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeQuotaUsageRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeQuotaUsageResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeQuotaUsage", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeQuotaUsageResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeRequestData(self, request):
"""获取私有域解析请求量
:param request: Request instance for DescribeRequestData.
:type request: :class:`tencentcloud.privatedns.v20201028.models.DescribeRequestDataRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.DescribeRequestDataResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeRequestData", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeRequestDataResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyPrivateZone(self, request):
"""修改私有域信息
:param request: Request instance for ModifyPrivateZone.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyPrivateZone", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyPrivateZoneResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyPrivateZoneRecord(self, request):
"""修改私有域解析记录
:param request: Request instance for ModifyPrivateZoneRecord.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneRecordRequest`
:rtype: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneRecordResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyPrivateZoneRecord", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyPrivateZoneRecordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyPrivateZoneVpc(self, request):
"""修改私有域关联的VPC
:param request: Request instance for ModifyPrivateZoneVpc.
:type request: :class:`tencentcloud.privatedns.v20201028.models.ModifyPrivateZoneVpcRequest`
| |
<gh_stars>0
import os
import random
from random import seed
import numpy
import pandas as pd
from keras_preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
from sklearn.model_selection import KFold, cross_val_score, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import SVR
import numpy as np
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import load_model, Sequential
import portfolio_model
SEED =1
def get_results(model,X,Y,test):
print('Confusion Matrix')
y_pred = model.predict(X[test], verbose=0)
labels= np.unique(y_pred.argmax(axis=1))
cm = confusion_matrix(Y[test].argmax(axis=1), y_pred.argmax(axis=1), labels=labels)
print(pd.DataFrame(cm, index=labels, columns=labels))
Y_test = np.argmax(Y[test], axis=1)
y_pred = np.argmax(y_pred, axis=1)
from sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score
print(f"Accuracy: {round(accuracy_score(Y_test, y_pred), 2)}")
print(f"Precision: {round(precision_score(Y_test, y_pred), 2)}")
print(f"Recall: {round(recall_score(Y_test, y_pred), 2)}")
print(f"F1_score: {round(f1_score(Y_test, y_pred), 2)}\n\n")
return cm
def NN(df):
X, Y = portfolio_model.nn_preprocess_step(df, "test_features")
ss = StandardScaler()
X = ss.fit_transform(X)
# model_path: str = "NN_end"
# model = load_model(model_path, compile=True, custom_objects=None)
# define 5-fold cross validation test harness
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED)
cvscores = []
models = [[0,0],[0,0]]
for train, test in kfold.split(X, np.argmax(Y, axis=1)):
model, _ = portfolio_model.NN_model()
model.fit(X[train], Y[train], epochs=1000, batch_size=40,verbose=0)
# evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100)
models += get_results(model,X,Y,test)
print(models)
TP = models[0][0]
FP=models[1][0]
FN=models[0][1]
TN = models[1][1]
print(f"Accuracy: {(TP+TN)/(TP+TN+FP+FN)}")
print(f"Precision: {(TP)/(TP+FP)}")
print(f"Recall: {TP/(TP+FN)}")
print(f"F1_score: {(2*TP/(TP+FN)*(TP)/(TP+FP))/(TP/(TP+FN)+(TP)/(TP+FP))}\n\n")
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
def NN_2_lan(df_train , df_test):
X, Y = portfolio_model.nn_preprocess_step(df_train, "train_features")
ss = StandardScaler()
X = ss.fit_transform(X)
X2, Y2 = portfolio_model.nn_preprocess_step(df_test, "test_features")
ss = StandardScaler()
X2 = ss.fit_transform(X2)
# model_path: str = "NN_end"
# model = load_model(model_path, compile=True, custom_objects=None)
# define 5-fold cross validation test harness
kfold = StratifiedKFold(n_splits=5,random_state=SEED,shuffle=True)
cvscores = []
models = [[0,0],[0,0]]
for train, test in kfold.split(X2,np.argmax(Y2, axis=1)):
print(test.size)
model, _ = portfolio_model.NN_model()
model.fit(X2[train], Y2[train], epochs=1000, batch_size=400,verbose=0)
# evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100)
models += get_results(model,X,Y,test)
print(models)
TP = models[0][0]
FP=models[1][0]
FN=models[0][1]
TN = models[1][1]
print(f"Accuracy: {(TP+TN)/(TP+TN+FP+FN)}")
print(f"Precision: {(TP)/(TP+FP)}")
print(f"Recall: {TP/(TP+FN)}")
print(f"F1_score: {(2*TP/(TP+FN)*(TP)/(TP+FP))/(TP/(TP+FN)+(TP)/(TP+FP))}\n\n")
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
def concatenate_per_row(A, B):
m1 = len(A)
m2 = len(B)
n=2
out = np.zeros((m1,n),dtype=A.dtype)
out[:,:1] = A[:,None]
out[:,1:] = np.array(B).reshape(len(B),1)
return out
def CNN(df):
# df.apply(portfolio_model.images, axis=1);
df['file'] = df["file"].apply(portfolio_model.make_jpg)
# Rescaling the images as usual to feed into the CNN
datagen = ImageDataGenerator(rescale=1. / 255.)
# df_generator = datagen.flow_from_dataframe(
# dataframe=df,
# directory="voice_images_test_new",
# x_col="file",
# y_col="label",
# shuffle=False,
# class_mode="categorical",
# target_size=(64, 64))
# define 5-fold cross validation test harness
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED)
#kfold.get_n_splits(df_generator)
X= df.file
Y= df.label
# X = np.array(df_generator.filepaths)
# Y = np.array(df_generator.labels)
cvscores = []
models = [[0,0],[0,0]]
# print(df_generator.filepaths)
# X = df_generator.filepaths
# Y = df_generator.labels
for train, test in kfold.split(X,Y):
# trainData = X[train]
# testData = X[test]
# trainLabels = Y[train]
# testLabels = Y[test]
#trainGenerator = Generator(trainData, trainLabels, batchSize=24, imageSize=(64, 64), augment=True)
# valGenerator = ImageDataGenerator(testData, testLabels, batchSize=5, imageSize=(64, 64), augment=False)
#train = concatenate_per_row(trainData, trainLabels)
#test_df = concatenate_per_row(testData,testLabels)
testData = datagen.flow_from_dataframe(
dataframe=df.iloc[test],
directory="voice_images_test_new",
x_col="file",
y_col="label",
shuffle=False,
class_mode="categorical",
target_size=(64, 64))
trainData = datagen.flow_from_dataframe(
dataframe=df.iloc[train],
directory="voice_images_test_new",
x_col="file",
y_col="label",
shuffle=False,
class_mode="categorical",
target_size=(64, 64))
model, _ = portfolio_model.CNN_model()
model.fit_generator(
trainData,
steps_per_epoch=len(trainData),
epochs=1000,
validation_data=testData,
validation_steps=len(testData))
# scores =model.fit_generator(generator=X[train],
# steps_per_epoch=24,
# validation_data=X[test],
# validation_steps=5,
# epochs=100,
# )
# evaluate the model
scores = model.evaluate(testData, verbose=2)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100)
print('Restored model, accuracy: {:5.2f}%'.format(100 * scores[1]))
y_pred = model.predict_generator(testData)
y_pred = np.argmax(y_pred, axis=1)
y_test = testData.labels
print('Confusion Matrix')
labels = [0,1]
cm = confusion_matrix(testData.labels, y_pred, labels=labels)
models += cm
print(pd.DataFrame(cm, index=labels, columns=labels))
from sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score
print(f"Accuracy: {round(accuracy_score(y_test, y_pred), 2)}")
print(f"Precision: {round(precision_score(y_test, y_pred), 2)}")
print(f"Recall: {round(recall_score(y_test, y_pred), 2)}")
print(f"F1_score: {round(f1_score(y_test, y_pred), 2)}")
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
print(models)
TP = models[0][0]
FP = models[1][0]
FN = models[0][1]
TN = models[1][1]
print(f"Accuracy: {(TP + TN) / (TP + TN + FP + FN)}")
print(f"Precision: {(TP) / (TP + FP)}")
print(f"Recall: {TP / (TP + FN)}")
print(f"F1_score: {(2 * TP / (TP + FN) * (TP) / (TP + FP)) / (TP / (TP + FN) + (TP) / (TP + FP))}\n\n")
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
def CRNN(df):
# df.apply(portfolio_model.images, axis=1);
df['file'] = df["file"].apply(portfolio_model.make_jpg)
# Rescaling the images as usual to feed into the CNN
datagen = ImageDataGenerator(rescale=1. / 255.)
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
X= df.file
Y= df.label
cvscores = []
models = [[0,0],[0,0]]
for train, test in kfold.split(X,Y):
testData = datagen.flow_from_dataframe(
dataframe=df.iloc[test],
directory="voice_images_test_new",
x_col="file",
y_col="label",
shuffle=False,
class_mode="categorical",
target_size=(64, 64))
trainData = datagen.flow_from_dataframe(
dataframe=df.iloc[train],
directory="voice_images_test_new",
x_col="file",
y_col="label",
shuffle=False,
class_mode="categorical",
target_size=(64, 64))
model, _ = portfolio_model.RNN_model()
model.fit_generator(
trainData,
steps_per_epoch=len(trainData),
epochs=1000,
validation_data=testData,
validation_steps=len(testData))
scores = model.evaluate(testData, verbose=2)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100)
print('Restored model, accuracy: {:5.2f}%'.format(100 * scores[1]))
y_pred = model.predict_generator(testData)
y_pred = np.argmax(y_pred, axis=1)
y_test = testData.labels
print('Confusion Matrix')
labels = [0, 1]
cm = confusion_matrix(testData.labels, y_pred, labels=labels)
models += cm
print(pd.DataFrame(cm, index=labels, columns=labels))
from sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score
print(f"Accuracy: {round(accuracy_score(y_test, y_pred), 2)}")
print(f"Precision: {round(precision_score(y_test, y_pred), 2)}")
print(f"Recall: {round(recall_score(y_test, y_pred), 2)}")
print(f"F1_score: {round(f1_score(y_test, y_pred), 2)}")
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
print(models)
TP = models[0][0]
FP = models[1][0]
FN = models[0][1]
TN = models[1][1]
print(f"Accuracy: {(TP + TN) / (TP + TN + FP + FN)}")
print(f"Precision: {(TP) / (TP + FP)}")
print(f"Recall: {TP / (TP + FN)}")
print(f"F1_score: {(2 * TP / (TP + FN) * (TP) / (TP + FP)) / (TP / (TP + FN) + (TP) / (TP + FP))}\n\n")
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
def read_data():
# list the files
filelist = os.listdir('data/true_claims')
# read them into pandas
df_male = pd.DataFrame(filelist)
# Adding the 1 label to the dataframe representing male
df_male['label'] = '1'
# Renaming the column name to file
df_male = df_male.rename(columns={0: 'file'})
# Checking for a file that gets automatically generated and we need to drop
df_male[df_male['file'] == '.DS_Store']
filelist = os.listdir('data/false_claims')
# read them into pandas
df_female = pd.DataFrame(filelist)
df_female['label'] = '0'
df_female = df_female.rename(columns={0: 'file'})
df_female[df_female['file'] == '.DS_Store']
# Dropping the system file
# df_female.drop(981, inplace=True)
# df_female = df_female.reset_index(drop=True)
df = pd.concat([df_female, df_male], ignore_index=True)
df = df.sample(frac=1, random_state=42).reset_index(drop=True)
return df
def read_data_out_of_game():
# list the files
filelist = os.listdir('data/claimed_true_claims')
# read them into pandas
df_male = pd.DataFrame(filelist)
# Adding the 1 label to the dataframe representing male
df_male['label'] = '1'
# Renaming the column name to file
df_male = df_male.rename(columns={0: 'file'})
# Checking for a file that gets automatically generated and we need to drop
df_male[df_male['file'] == '.DS_Store']
filelist = os.listdir('data/claimed_false_claims')
# read them into pandas
df_female = pd.DataFrame(filelist)
df_female['label'] = '0'
df_female = df_female.rename(columns={0: 'file'})
df_female[df_female['file'] == '.DS_Store']
# Dropping the system file
# df_female.drop(981, inplace=True)
# df_female = df_female.reset_index(drop=True)
df = pd.concat([df_female, df_male], ignore_index=True)
# Randomizing our files to be able to split into train, validation and test
df = df.sample(frac=1, random_state=42).reset_index(drop=True)
df['label'].value_counts(normalize=True)
return df
def read_data_heb_heb():
# list the files
filelist = os.listdir('data/heb_claims/TRUE/')
# read them into pandas
df_male = pd.DataFrame(filelist)
# Adding the 1 label to the dataframe representing male
df_male['label'] = '1'
# Renaming the column name to file
df_male = df_male.rename(columns={0: 'file'})
# Checking for a file that gets automatically generated and we need to drop
df_male[df_male['file'] == '.DS_Store']
filelist = os.listdir('data/heb_claims/FALSE/')
# read them into pandas
df_female = pd.DataFrame(filelist)
df_female['label'] = '0'
df_female = df_female.rename(columns={0: 'file'})
df_female[df_female['file'] == '.DS_Store']
# Dropping the system file
# df_female.drop(981, inplace=True)
# df_female = df_female.reset_index(drop=True)
df = pd.concat([df_female, df_male], ignore_index=True)
# Randomizing our files to be able to split into train, validation and test
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
df['label'].value_counts(normalize=True)
return df
def read_data_eng_eng():
# list the files
filelist = os.listdir('data/eng_claims/TRUE/')
# read them into pandas
df_male = pd.DataFrame(filelist)
# Adding the 1 label to the dataframe representing male
df_male['label'] = '1'
# Renaming the column name to file
df_male = df_male.rename(columns={0: 'file'})
# Checking for a file that gets automatically generated and we need to drop
df_male[df_male['file'] == '.DS_Store']
filelist = os.listdir('data/eng_claims/FALSE/')
# read them into pandas
df_female = pd.DataFrame(filelist)
df_female['label'] = '0'
df_female = df_female.rename(columns={0: 'file'})
df_female[df_female['file'] == '.DS_Store']
# Dropping the system | |
<gh_stars>0
from __future__ import print_function
from itertools import product
import os
import pickle
import bdpy
from bdpy.dataform import Features
from bdpy.util import dump_info, makedir_ifnot
import numpy as np
from stability_selection import stability_selection
from sklearn.decomposition import PCA
from scipy import io
# Settings ###################################################################
seed = 42
TINY = 1e-8
# Python RNG
np.random.seed(seed)
subject_set=['subject1','subject2','subject3']
for subject in subject_set:
if subject == 'subject1':
subjects_list = {
'sub-01': 'sub-01_perceptionNaturalImageTraining_VC_v2.h5',
}
subjects_list_test = {
'sub-01': 'sub-01_perceptionNaturalImageTest_VC_v2.h5',
}
elif subject == 'subject2':
subjects_list = {
'sub-02': 'sub-02_perceptionNaturalImageTraining_VC_v2.h5',
}
subjects_list_test = {
'sub-02': 'sub-02_perceptionNaturalImageTest_VC_v2.h5',
}
elif subject == 'subject3':
subjects_list = {
'sub-03': 'sub-03_perceptionNaturalImageTraining_VC_v2.h5',
}
subjects_list_test = {
'sub-03': 'sub-03_perceptionNaturalImageTest_VC_v2.h5',
}
text_model_list = [
'GPTNeo',
'ALBERT',
# 'GPTNeo_phrases',
# 'ALBERT_phrases'
]
rois_list = {
# 'VC': 'ROI_VC = 1',
'LVC': 'ROI_LVC = 1',
'HVC': 'ROI_HVC = 1',
# 'V1': 'ROI_V1 = 1',
# 'V2': 'ROI_V2 = 1',
# 'V3': 'ROI_V3 = 1',
# 'V4': 'ROI_V4 = 1',
# 'LOC': 'ROI_LOC = 1',
# 'FFA': 'ROI_FFA = 1',
# 'PPA': 'ROI_PPA = 1',
'IT': 'ROI_IT = 1',
}
network = 'pytorch/repvgg_b3g4'
features_list = [#'Conv_0',
# 'Conv_1',
'Conv_2',
'Conv_3',
'Conv_4',
'linear',
'final']
features_list = features_list[::-1] # Start training from deep layers
# Brain data
brain_dir = './data/DeepImageReconstruction/data/fmri'
# Image features
timm_extracted_visual_features = './data/DIR-Wiki/visual_feature/ImageNetTraining/'+network
timm_extracted_visual_features_test = './data/DIR-Wiki/visual_feature/ImageNetTest/'+network
timm_extracted_visual_features_aug = './data/DIR-Wiki/visual_feature/Aug_1000/'+network
print('DNN feature')
print(timm_extracted_visual_features)
# Text features
model_extracted_textual_features = './data/Wiki_articles_features'
# Results directory
results_dir_root = './data/DIR-Wiki/visual_feature/ImageNetTraining/'+network+'-PCA'
results_dir_root_test = './data/DIR-Wiki/visual_feature/ImageNetTest/'+network+'-PCA'
results_dir_root_aug = './data/DIR-Wiki/visual_feature/Aug_1000/'+network+'-PCA'
results_fmri_root = './data/DIR-Wiki/brain_feature/LVC_HVC_IT'
results_text_root = './data/DIR-Wiki/textual_feature/ImageNetTraining/text'
results_text_root_test = './data/DIR-Wiki/textual_feature/ImageNetTest/text'
results_text_root_aug = './data/DIR-Wiki/textual_feature/Aug_1000/text'
# Main #######################################################################
analysis_basename = os.path.splitext(os.path.basename(__file__))[0]
# Print info -----------------------------------------------------------------
print('Subjects: %s' % subjects_list.keys())
print('ROIs: %s' % rois_list.keys())
print('Target features: %s' % network.split('/')[-1])
print('Layers: %s' % features_list)
print('')
# Load data ------------------------------------------------------------------
print('----------------------------------------')
print('Loading data')
data_brain = {sbj: bdpy.BData(os.path.join(brain_dir, dat_file))
for sbj, dat_file in subjects_list.items()}
data_features = Features(os.path.join(timm_extracted_visual_features, network))
data_brain_test = {sbj: bdpy.BData(os.path.join(brain_dir, dat_file))
for sbj, dat_file in subjects_list_test.items()}
data_features_test = Features(os.path.join(timm_extracted_visual_features_test, network))
data_features_aug = Features(os.path.join(timm_extracted_visual_features_aug, network))
# Initialize directories -----------------------------------------------------
makedir_ifnot(results_dir_root)
makedir_ifnot(results_dir_root_test)
makedir_ifnot(results_dir_root_aug)
makedir_ifnot(results_text_root)
makedir_ifnot(results_text_root_test)
makedir_ifnot(results_text_root_aug)
makedir_ifnot('tmp')
# Save runtime information ---------------------------------------------------
info_dir = results_dir_root
runtime_params = {
'fMRI data': [os.path.abspath(os.path.join(brain_dir, v)) for v in subjects_list.values()],
'ROIs': rois_list.keys(),
'target DNN': network.split('/')[-1],
'target DNN features': os.path.abspath(timm_extracted_visual_features),
'target DNN layers': features_list,
}
dump_info(info_dir, script=__file__, parameters=runtime_params)
#######################################
# Stability selection
#######################################
select_ratio = 0.15
totalnum = 0
first = 1
best_roi_sel = []
num_voxel = dict()
for sbj, roi in product(subjects_list ,rois_list):
print('--------------------')
print('VC ROI: %s' % roi)
trial1 = []
l1 = []
trial2 = []
l2 = []
trial3 = []
l3 = []
trial4 = []
l4 = []
trial5 = []
l5 = []
# Brain data
x = data_brain[sbj].select(rois_list[roi]) # Brain data
x_labels = data_brain[sbj].select('image_index').flatten() # Label (image index)
x_test = data_brain_test[sbj].select(rois_list[roi]) # Brain data
x_labels_test = data_brain_test[sbj].select('image_index').flatten() # Label (image index)
for l in range(1,int(len(x_labels)/5)+1):
n = np.where(x_labels==l)
#trial1
l1.append(l)
trial1.append(x[n[0][0]])
#trial2
l2.append(l)
trial2.append(x[n[0][1]])
#trial3
l3.append(l)
trial3.append(x[n[0][2]])
#trial4
l4.append(l)
trial4.append(x[n[0][3]])
#trial5
l5.append(l)
trial5.append(x[n[0][4]])
#reshape to select
sel_input = np.array([trial1])
sel_input = np.append(sel_input, np.array([trial2]), axis=0)
sel_input = np.append(sel_input, np.array([trial3]), axis=0)
sel_input = np.append(sel_input, np.array([trial4]), axis=0)
sel_input = np.append(sel_input, np.array([trial5]), axis=0)
select_num = int(select_ratio * (x.shape)[1])
num_voxel.update({roi:select_num})
print('roi_shape=',x.shape)
sel_idx = stability_selection(sel_input, select_num)
#save as best_roi_sel mat
if first:
best_roi_sel = np.array(x[:,sel_idx])
best_roi_sel_test = np.array(x_test[:, sel_idx])
first = 0
else:
best_roi_sel = np.append(best_roi_sel, x[:,sel_idx], axis=1)
best_roi_sel_test = np.append(best_roi_sel_test, x_test[:,sel_idx], axis=1)
totalnum_voxel = (best_roi_sel.shape)[1]
print('total_selected_voxel=', totalnum_voxel)
print(num_voxel)
print('best_roi_sel_shape=',best_roi_sel.shape)
print('x_labels_shape=',x_labels.shape)
print('best_roi_sel_test_shape=',best_roi_sel_test.shape)
print('x_labels_test_shape=',x_labels_test.shape)
#######################################
# Save brain and image feature data
#######################################
# Analysis loop --------------------------------------------------------------
print('----------------------------------------')
print('Analysis loop')
first = 1
for feat, sbj in product(features_list, subjects_list):
print('--------------------')
print('Feature: %s' % feat)
print('Subject: %s' % sbj)
results_dir_alllayer_pca = os.path.join(results_dir_root, sbj)
results_dir_alllayer_pca_test = os.path.join(results_dir_root_test, sbj)
results_dir_alllayer_pca_aug = os.path.join(results_dir_root_aug, sbj)
results_fmri_dir = os.path.join(results_fmri_root, sbj)
# Preparing data
# --------------
print('Preparing data')
# Brain data
x = best_roi_sel # Brain data
x_labels = x_labels # Label (image index)
x_class = data_brain[sbj].select('Label') # Label (class index)
WordNetID = x_class[:, 2]
if sbj == 'sub-03':
class_idx = data_brain[sbj].select('image_index').flatten()
else:
class_idx = x_class[:, 1]
x_test = best_roi_sel_test # Brain data
x_labels_test = x_labels_test # Label (image index)
x_class_test = data_brain_test[sbj].select('Label') # Label (class index)
WordNetID_test = x_class_test[:, 2]
if sbj == 'sub-03':
class_idx_test = data_brain_test[sbj].select('image_index').flatten()
else:
class_idx_test = x_class_test[:, 1]
# Averaging test brain data
x_labels_test_unique, indices = np.unique(x_labels_test, return_index=True)
x_test_unique = np.vstack([np.mean(x_test[(np.array(x_labels_test) == lb).flatten(), :], axis=0) for lb in x_labels_test_unique])
WordNetID_test_unique = WordNetID_test[indices]
class_idx_test_unique = class_idx_test[indices]
# Target features and image labels (file names)
y = data_features.get_features(feat) # Target DNN features
y_labels = data_features.index # Label (image index)
y = np.reshape(y,(y.shape[0],-1))
y_test = data_features_test.get_features(feat) # Target DNN features
y_labels_test = data_features_test.index # Label (image index)
y_test = np.reshape(y_test,(y_test.shape[0],-1))
y_aug = data_features_aug.get_features(feat) # Target DNN features
y_labels_aug_temp = data_features_aug.labels # Label (image index)
y_labels_aug = []
for it in y_labels_aug_temp:
y_labels_aug.append(int(it.split('_')[0][1:]))
y_labels_aug = np.array(y_labels_aug)
y_aug = np.reshape(y_aug,(y_aug.shape[0],-1))
# Calculate normalization parameters
# Normalize X (fMRI data)
x_mean = np.mean(x, axis=0)[np.newaxis, :] # np.newaxis was added to match Matlab outputs
x_norm = np.std(x, axis=0, ddof=1)[np.newaxis, :]
# Normalize Y (DNN features)
y_mean = np.mean(y, axis=0)[np.newaxis, :]
y_norm = np.std(y, axis=0, ddof=1)[np.newaxis, :]
# Y index to sort Y by X (matching samples)
y_index = np.array([np.where(np.array(y_labels) == xl) for xl in x_labels]).flatten()
y_index_test = np.array([np.where(np.array(y_labels_test) == xl) for xl in x_labels_test]).flatten()
y_index_test_unique = np.array([np.where(np.array(y_labels_test) == xl) for xl in x_labels_test_unique]).flatten()
# X preprocessing
print('Normalizing X')
x = (x - x_mean) / (x_norm+TINY)
x[np.isinf(x)] = 0
x_test = (x_test - x_mean) / (x_norm+TINY)
x_test[np.isinf(x_test)] = 0
x_test_unique = (x_test_unique - x_mean) / (x_norm+TINY)
x_test_unique[np.isinf(x_test_unique)] = 0
print('Doing PCA')
ipca = PCA(n_components=0.99, random_state=seed)
ipca.fit(x)
x = ipca.transform(x)
x_test = ipca.transform(x_test)
x_test_unique = ipca.transform(x_test_unique)
print(x.shape)
# Y preprocessing
print('Normalizing Y')
y = (y - y_mean) / (y_norm+TINY)
y[np.isinf(y)] = 0
y_test = (y_test - y_mean) / (y_norm+TINY)
y_test[np.isinf(y_test)] = 0
y_aug = (y_aug - y_mean) / (y_norm+TINY)
y_aug[np.isinf(y_aug)] = 0
print('Doing PCA')
ipca = PCA(n_components=0.99, random_state=seed)
ipca.fit(y)
# ipca.fit(y_aug)
y = ipca.transform(y)
y_test = ipca.transform(y_test)
y_aug = ipca.transform(y_aug)
print(y.shape)
print('Sorting Y')
y = y[y_index, :]
y_test = y_test[y_index_test, :]
y_test_unique = y_test[y_index_test_unique, :]
if first:
feat_pca_train = y
feat_pca_test = y_test
feat_pca_aug = y_aug
feat_pca_test_unique = y_test_unique
first = 0
else:
feat_pca_train = np.concatenate((feat_pca_train, y), axis=1)
feat_pca_test = np.concatenate((feat_pca_test, y_test), axis=1)
feat_pca_aug = np.concatenate((feat_pca_aug, y_aug), axis=1)
feat_pca_test_unique = np.concatenate((feat_pca_test_unique, y_test_unique), axis=1)
print(feat_pca_test_unique.shape)
makedir_ifnot(results_dir_alllayer_pca)
makedir_ifnot(results_dir_alllayer_pca_test)
makedir_ifnot(results_dir_alllayer_pca_aug)
results_dir_alllayer_pca_path = os.path.join(results_dir_alllayer_pca, "feat_pca_train.mat")
io.savemat(results_dir_alllayer_pca_path, {"data":feat_pca_train})
results_dir_alllayer_pca_test_path = os.path.join(results_dir_alllayer_pca_test, "feat_pca_test.mat")
io.savemat(results_dir_alllayer_pca_test_path, {"data":feat_pca_test})
results_dir_alllayer_pca_aug_path = os.path.join(results_dir_alllayer_pca_aug, "feat_pca_aug.mat")
io.savemat(results_dir_alllayer_pca_aug_path, {"data":feat_pca_aug})
results_dir_alllayer_pca_test_path = os.path.join(results_dir_alllayer_pca_test, "feat_pca_test_unique.mat")
io.savemat(results_dir_alllayer_pca_test_path, {"data":feat_pca_test_unique})
makedir_ifnot(results_fmri_dir)
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_train_data.mat")
io.savemat(results_fmri_dir_path, {"data":x, "image_idx":x_labels, "WordNetID":WordNetID, "class_idx":class_idx})
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_test_data.mat")
io.savemat(results_fmri_dir_path, {"data":x_test, "image_idx":x_labels_test, "WordNetID":WordNetID_test, "class_idx":class_idx_test})
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_test_data_unique.mat")
io.savemat(results_fmri_dir_path, {"data":x_test_unique, "image_idx":x_labels_test_unique, "WordNetID":WordNetID_test_unique, "class_idx":class_idx_test_unique})
#######################################
# Save text feature data
#######################################
for feat, sbj in product(text_model_list, subjects_list):
print('--------------------')
print('Feature: %s' % feat)
print('Subject: %s' % sbj)
results_dir_text_fea = os.path.join(results_text_root, feat, sbj)
results_dir_text_fea_test = os.path.join(results_text_root_test, feat, sbj)
results_dir_text_fea_aug = os.path.join(results_text_root_aug, feat, sbj)
# Preparing data
# --------------
print('Preparing data')
# Brain data
x_class = data_brain[sbj].select('Label') # Label (class index)
WordNetID = x_class[:, 2]
class_idx = x_class[:, 1]
x_labels_test = x_labels_test # Label (image index)
x_class_test = data_brain_test[sbj].select('Label') # Label (class index)
WordNetID_test = x_class_test[:, 2]
class_idx_test = x_class_test[:, 1]
# Averaging test brain data
x_labels_test_unique, indices = np.unique(x_labels_test, return_index=True)
WordNetID_test_unique = WordNetID_test[indices]
class_idx_test_unique = class_idx_test[indices]
# Target text features and wnid
name = 'ImageNet_class200_' + feat + '.pkl'
full = os.path.join(model_extracted_textual_features, name)
dictionary = pickle.load(open(full, 'rb'))
firstfeat = 1
firstlabel = 1
for key, value in dictionary.items():
for k, v in value.items():
# print(k, v)
if k == 'wnid':
# print(v)
v = int(v[1:])
if firstlabel:
text_label = np.array([v])
firstlabel = 0
else:
text_label = np.concatenate((text_label, np.array([v])), axis=0)
elif k == 'feats':
v = np.expand_dims(v, axis=0)
if firstfeat:
text_feat = v
firstfeat = 0
else:
text_feat = np.concatenate((text_feat, v), axis=0)
# Extra text features and wnid
name = 'ImageNet_trainval_classes_' + feat + '.pkl'
full = os.path.join(model_extracted_textual_features, name)
dictionary = pickle.load(open(full, 'rb'))
firstfeat = 1
firstlabel = 1
for key, value in dictionary.items():
for k, v in value.items():
# print(k, v)
if k | |
import random, os, httplib2
from biothings.tests.test_helper import BiothingTestHelperMixin, _d, TornadoRequestHelper
from nose.tools import ok_, eq_
from tornado.testing import AsyncHTTPTestCase
import www.index as index
from biothings.settings import BiothingSettings
import config
class MyGeneTest(BiothingTestHelperMixin):
__test__ = True # explicitly set this to be a test class
#############################################################
# Test functions #
#############################################################
host = os.getenv(config.HOST_ENVAR_NAME,"")
#if not host:
# raise ValueError("Missing HOST_ENVAR_NAME")
host = host.rstrip('/')
api = host + '/' + config.API_VERSION
h = httplib2.Http()
def _filter_hits(self, res, field=None):
for hit in res.get("hits"):
if field:
del hit[field]
else:
# because can't remove elem from dict while iterate
# need to keep track on what should be popped...
topop = []
for k in hit.keys():
if k.startswith("_"):
topop.append(k)
[hit.pop(i) for i in topop]
def json_ok(self, url, filter=False, **kwargs):
res = super(MyGeneTest, self).json_ok(url,**kwargs)
if filter:
self._filter_hits(res)
return res
def test_main(self):
self.get_ok(self.host)
def test_gene_object(self):
# test all fields are load in gene objects
res = self.json_ok(self.get_ok(self.api + '/gene/1017'))
attr_li = ['HGNC', 'MIM', 'Vega', '_id', 'accession', 'alias',
'ec', 'ensembl', 'entrezgene', 'genomic_pos', 'go',
'homologene', 'interpro', 'ipi', 'map_location', 'name',
'pdb', 'pharmgkb', 'pir', 'prosite', 'reagent', 'refseq',
'reporter', 'summary', 'symbol', 'taxid', 'type_of_gene',
'unigene', 'uniprot', 'exons', 'generif']
for attr in attr_li:
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "1017" {}'.format(attr,res)
res = self.json_ok(self.get_ok(self.api + '/gene/12566'))
attr = 'MGI'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "12566"'.format(attr)
res = self.json_ok(self.get_ok(self.api + '/gene/245962'))
attr = 'RGD'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "245962"'.format(attr)
res = self.json_ok(self.get_ok(self.api + '/gene/493498'))
attr = 'Xenbase'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "493498"'.format(attr)
res = self.json_ok(self.get_ok(self.api + '/gene/406715'))
attr = 'ZFIN'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "406715"'.format(attr)
res = self.json_ok(self.get_ok(self.api + '/gene/824036'))
attr = 'TAIR'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "824036"'.format(attr)
res = self.json_ok(self.get_ok(self.api + '/gene/42453'))
attr = 'FLYBASE'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "42453"'.format(attr)
# pig
res = self.json_ok(self.get_ok(self.api + '/gene/397593'))
assert 'snowball' in res.get('reporter', {}), 'Missing field "reporter.snowball" in gene "397593"'
# nematode
res = self.json_ok(self.get_ok(self.api + '/gene/172677'))
# this is not nematode, "taxid": 31234
res = self.json_ok(self.get_ok(self.api + '/gene/9821293'))
attr = 'WormBase'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "9821293"'.format(attr)
# fission yeast
res = self.json_ok(self.get_ok(self.api + '/gene/2539869'))
# e coli.
# res = self.json_ok(self.get_ok(self.api + '/gene/12931566'))
# mirna
res = self.json_ok(self.get_ok(self.api + '/gene/406881'))
attr = 'miRBase'
assert res.get(attr, None) is not None, 'Missing field "{}" in gene "406881"'.format(attr)
def test_query(self):
# public query api at /query via get
self.query_has_hits('cdk2')
self.query_has_hits('GO:0004693')
self.query_has_hits('reporter:211803_at')
self.query_has_hits('IPR008351')
self.query_has_hits('hsa-mir-503')
self.query_has_hits('hsa-miR-503')
# test fielded query
self.query_has_hits('symbol:cdk2')
# test interval query
self.query_has_hits('chr1:151,073,054-151,383,976&species=human')
con = self.get_ok(self.api + '/query?q=cdk2&callback=mycallback')
ok_(con.startswith(b'mycallback('))
# testing non-ascii character
res = self.json_ok(self.get_ok(self.api +
'/query?q=54097\xef\xbf\xbd\xef\xbf\xbdmouse'))
eq_(res['hits'], [])
res = self.json_ok(self.get_ok(self.api + '/query'), checkerror=False)
assert 'error' in res
res = self.json_ok(self.get_ok(self.api + '/query?q=tRNA:Y1:85Ae'),
checkerror=False)
assert 'error' in res
# ensure returned fields by default
res = self.json_ok(self.get_ok(self.api + '/query?q=cdk'))
# pick one
idx = random.randrange(0, 10)
deffields = res["hits"][idx].keys() # pick one...
expected = ["_id", "_score", "taxid", "entrezgene", "name", "symbol"]
assert sorted(list(deffields)) == sorted(expected), \
"%s != %s" % (sorted(list(deffields)), sorted(expected))
def test_query_post(self):
# /query via post
#self.json_ok(self.post_ok(self.api + '/query', {'q': '1017'}))
res = self.json_ok(self.post_ok(self.api + '/query',
{'q': '1017', 'scopes': 'entrezgene'}))
eq_(len(res), 1)
eq_(set(res[0].keys()),set(['query', 'taxid', '_score', 'entrezgene', 'symbol', '_id', 'name']))
eq_(res[0]['_id'], '1017')
res = self.json_ok(self.post_ok(self.api + '/query',
{'q': '211803_at,1018',
'scopes': 'reporter,entrezgene'}))
eq_(len(res), 2)
eq_(res[0]['_id'], '1017')
eq_(res[1]['_id'], '1018')
res = self.json_ok(self.post_ok(self.api + '/query',
{'q': 'CDK2',
'species': 'human,10090,frog,pig',
'scopes': 'symbol',
'fields': 'name,symbol'}))
assert len(res) >= 4, (res, len(res))
res = self.json_ok(self.post_ok(self.api + '/query', {}),
checkerror=False)
assert 'error' in res, res
res = self.json_ok(self.post_ok(self.api + '/query',
{'q': '[1017, "1018"]',
'scopes': 'entrezgene',
'jsoninput': 'true'}))
eq_(len(res), 2)
eq_(res[0]['_id'], '1017')
eq_(res[1]['_id'], '1018')
def test_query_interval(self):
res = self.json_ok(self.get_ok(self.api +
'/query?q=chr1:1000-100000&species=human'))
ok_(len(res['hits']) > 1)
ok_('_id' in res['hits'][0])
def test_query_size(self):
res = self.json_ok(self.get_ok(self.api + '/query?q=cdk?'))
eq_(len(res['hits']), 10) # default is 10
ok_(res['total'] > 10)
res = self.json_ok(self.get_ok(self.api + '/query?q=cdk?&size=0'))
eq_(len(res['hits']), 0)
res = self.json_ok(self.get_ok(self.api + '/query?q=cdk?&limit=20'))
eq_(len(res['hits']), 20)
res1 = self.json_ok(self.get_ok(self.api +
'/query?q=cdk?&from=0&size=20'))
res = self.json_ok(self.get_ok(self.api +
'/query?q=cdk?&skip=10&size=20'))
eq_(len(res['hits']), 20)
# print res1['hits'].index(res['hits'][0])
# print [x['_id'] for x in res1['hits']]
# eq_(res['hits'][0], res1['hits'][10])
assert res['hits'][0] in res1['hits']
# API doc says cap 1000
res = self.json_ok(self.get_ok(self.api + '/query?q=*&size=1000'))
eq_(len(res['hits']), 1000)
res = self.json_ok(self.get_ok(self.api + '/query?q=*&size=1001'))
eq_(len(res['hits']), 1000)
res = self.json_ok(self.get_ok(self.api + '/query?q=*&size=2000'))
eq_(len(res['hits']), 1000)
# assert 1==0
res = self.json_ok(self.get_ok(self.api + '/query?q=cdk?&size=1a'),
checkerror=False) # invalid size parameter
assert 'error' in res
def test_gene(self):
res = self.json_ok(self.get_ok(self.api + '/gene/1017'))
eq_(res['entrezgene'], 1017)
# testing non-ascii character
self.get_404(self.api + '/gene/' +
'54097\xef\xbf\xbd\xef\xbf\xbdmouse')
# commented out this test, as no more
# allow dot in the geneid
# res = self.json_ok(self.get_ok(self.api + '/gene/Y105C5B.255'))
# testing filtering parameters
res = self.json_ok(self.get_ok(self.api +
'/gene/1017?fields=symbol,name,entrezgene'))
eq_(set(res), set(['_id', '_score', 'symbol', 'name', 'entrezgene']))
res = self.json_ok(self.get_ok(self.api +
'/gene/1017?filter=symbol,go.MF'))
eq_(set(res), set(['_id', '_score', 'symbol', 'go']))
assert "MF" in res["go"]
self.get_404(self.api + '/gene')
self.get_404(self.api + '/gene/')
def test_gene_post(self):
res = self.json_ok(self.post_ok(self.api + '/gene', {'ids': '1017'}))
eq_(len(res), 1)
# check default fields returned
eq_(set(res[0].keys()),set(['symbol', 'reporter', 'refseq', '_score', 'pdb', 'interpro', 'entrezgene',
'summary', 'genomic_pos_hg19', 'unigene', 'ipi', 'taxid', 'pfam', 'homologene',
'ensembl', 'ec', 'pir', 'type_of_gene', 'pathway', 'exons_hg19', 'MIM', 'generif',
'HGNC', 'name', 'reagent', 'uniprot', 'pharmgkb', 'alias', 'genomic_pos',
'accession', '_id', 'prosite', 'wikipedia', 'go', 'query', 'Vega', 'map_location',
'exons', 'exac','other_names']))
eq_(res[0]['entrezgene'], 1017)
res = self.json_ok(self.post_ok(self.api + '/gene',
{'ids': '1017, 1018'}))
eq_(len(res), 2)
eq_(res[0]['_id'], '1017')
eq_(res[1]['_id'], '1018')
res = self.json_ok(self.post_ok(self.api + '/gene',
{'ids': '1017,1018',
'fields': 'symbol,name,entrezgene'}))
eq_(len(res), 2)
for _g in res:
eq_(set(_g), set(['_id', '_score', 'query', 'symbol',
'name', 'entrezgene']))
res = self.json_ok(self.post_ok(self.api + '/gene',
{'ids': '1017,1018',
'filter': 'symbol,go.MF'}))
eq_(len(res), 2)
for _g in res:
eq_(set(_g), set(['_id', '_score', 'query', 'symbol', 'go']))
assert "MF" in _g["go"]
# get retired gene (make sure _search ES query is run)
res = self.json_ok(self.post_ok(self.api + '/gene',{'ids': '791256'}))
eq_(res[0]['_id'], '50846') # this is the corresponding _id field
def test_status(self):
# /status
self.get_ok(self.host + '/status')
self.head_ok(self.host + '/status')
def test_metadata(self):
root = self.json_ok(self.get_ok(self.host + '/metadata'))
v3 = self.json_ok(self.get_ok(self.api + '/metadata'))
eq_(root, v3)
eq_(set(root.keys()), set(['available_fields', 'src_version',
'app_revision', 'timestamp', 'taxonomy',
'stats', 'genome_assembly', 'source']))
fields = self.json_ok(self.get_ok(self.api + '/metadata/fields'))
# test random field
assert "refseq" in fields
assert "accession.rna" in fields
assert "interpro.desc" in fields
assert "homologene" in fields
assert "reporter.snowball" in fields
# debug info
debug = self.json_ok(self.get_ok(self.api + '/metadata?dev=1'))
print(debug.keys())
assert "software" in debug.keys()
nodebug = self.json_ok(self.get_ok(self.api + '/metadata?dev=0'))
assert not "software" in nodebug.keys()
def test_query_facets(self):
res = self.json_ok(self.get_ok(self.api +
'/query?q=cdk?&facets=taxid'))
ok_('facets' in res)
ok_('taxid' in res['facets'])
eq_(res['facets']['taxid']['total'], res['total'])
eq_(res['facets']['taxid']['other'], 0)
eq_(res['facets']['taxid']['missing'], 0)
u = '/query?q=cdk?&facets=taxid&species_facet_filter=human'
res2 = self.json_ok(self.get_ok(self.api + u))
eq_(res2['facets']['taxid']['total'], res['total'])
eq_(res2['facets']['taxid'], res['facets']['taxid'])
eq_([x["count"] for x in res2['facets']['taxid']['terms']
if x["term"] == 9606][0], res2['total'])
def test_query_userfilter(self):
res1 = self.json_ok(self.get_ok(self.api + '/query?q=cdk'))
res2 = self.json_ok(self.get_ok(self.api +
'/query?q=cdk&userfilter=bgood_cure_griffith'))
ok_(res1['total'] > res2['total'])
# nonexisting user filter gets ignored.
res2 = self.json_ok(self.get_ok(self.api +
'/query?q=cdk&userfilter=aaaa'))
eq_(res1['total'], res2['total'])
def test_existsfilter(self):
res1 = self.json_ok(self.get_ok(self.api + '/query?q=cdk'))
res2 = self.json_ok(self.get_ok(self.api +
'/query?q=cdk&exists=pharmgkb'))
ok_(res1['total'] > res2['total'])
res3 = self.json_ok(self.get_ok(self.api +
'/query?q=cdk&exists=pharmgkb,pdb'))
ok_(res2['total'] > res3['total'])
def test_missingfilter(self):
res1 = self.json_ok(self.get_ok(self.api + '/query?q=cdk'))
res2 = self.json_ok(self.get_ok(self.api + '/query?q=cdk&missing=pdb'))
ok_(res1['total'] > res2['total'])
res3 = self.json_ok(self.get_ok(self.api +
'/query?q=cdk&missing=pdb,MIM'))
ok_(res2['total'] > res3['total'])
def test_unicode(self):
s = u'基因'
self.get_404(self.api + '/gene/' + s)
res = self.json_ok(self.post_ok(self.api + '/gene', {'ids': s}))
eq_(res[0]['notfound'], True)
eq_(len(res), 1)
res = self.json_ok(self.post_ok(self.api + '/gene',
{'ids': '1017, ' + s}))
eq_(res[1]['notfound'], True)
eq_(len(res), 2)
res = self.json_ok(self.get_ok(self.api + '/query?q=' + s))
eq_(res['hits'], [])
res = self.json_ok(self.post_ok(self.api + '/query',
{"q": s, "scopes": 'symbol'}))
eq_(res[0]['notfound'], True)
eq_(len(res), 1)
res = self.json_ok(self.post_ok(self.api + '/query',
{"q": 'cdk2+' + s}))
eq_(res[1]['notfound'], True)
eq_(len(res), 2)
def test_hg19(self):
u = '/query?q=hg19.chr12:57,795,963-57,815,592&species=human'
res = self.json_ok(self.get_ok(self.api + u))
ok_(len(res['hits']) == 2)
ok_('_id' in res['hits'][0])
u = '/query?q=chr12:57,795,963-57,815,592&species=human'
res2 = self.json_ok(self.get_ok(self.api + u))
ok_(res['total'] != res2['total'])
u = '/gene/10017?fields=genomic_pos_hg19,exons_hg19'
res = self.json_ok(self.get_ok(self.api + u))
ok_('genomic_pos_hg19' in res)
ok_('exons_hg19' in res)
def test_mm9(self):
u = '/query?q=mm9.chr12:57,795,963-57,815,592&species=mouse'
res = self.json_ok(self.get_ok(self.api + u))
ok_(len(res['hits']) == 2)
ok_('_id' in res['hits'][0])
u = '/query?q=chr12:57,795,963-57,815,592&species=mouse'
res2 = self.json_ok(self.get_ok(self.api + u))
ok_(res['total'] != res2['total'])
u = | |
thread = api.view_distinctionsv3_with_http_info(orcid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:return: DistinctionsSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_distinctionsv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_distinctionsv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.orcid+xml; qs=5', 'application/orcid+xml; qs=3', 'application/xml', 'application/vnd.orcid+json; qs=4', 'application/orcid+json; qs=2', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/distinctions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DistinctionsSummaryV30', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_distinctionv3(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Distinction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_distinctionv3(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: DistinctionV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_distinctionv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
else:
(data) = self.view_distinctionv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
return data
def view_distinctionv3_with_http_info(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Distinction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_distinctionv3_with_http_info(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: DistinctionV30
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid', 'put_code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_distinctionv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_distinctionv3`") # noqa: E501
# verify the required parameter 'put_code' is set
if ('put_code' not in params or
params['put_code'] is None):
raise ValueError("Missing the required parameter `put_code` when calling `view_distinctionv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
if 'put_code' in params:
path_params['putCode'] = params['put_code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.orcid+xml; qs=5', 'application/orcid+xml; qs=3', 'application/xml', 'application/vnd.orcid+json; qs=4', 'application/orcid+json; qs=2', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/distinction/{putCode}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DistinctionV30', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_education_summaryv3(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Education summary # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_education_summaryv3(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: EducationSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_education_summaryv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
else:
(data) = self.view_education_summaryv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
return data
def view_education_summaryv3_with_http_info(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Education summary # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_education_summaryv3_with_http_info(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: EducationSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid', 'put_code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_education_summaryv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_education_summaryv3`") # noqa: E501
# verify the required parameter 'put_code' is set
if ('put_code' not in params or
params['put_code'] is None):
raise ValueError("Missing the required parameter `put_code` when calling `view_education_summaryv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
if 'put_code' in params:
path_params['putCode'] = params['put_code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.orcid+xml; qs=5', 'application/orcid+xml; qs=3', 'application/xml', 'application/vnd.orcid+json; qs=4', 'application/orcid+json; qs=2', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/education/summary/{putCode}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EducationSummaryV30', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_educationsv3(self, orcid, **kwargs): # noqa: E501
"""Fetch all educations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_educationsv3(orcid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:return: EducationsSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_educationsv3_with_http_info(orcid, **kwargs) # noqa: E501
else:
(data) = self.view_educationsv3_with_http_info(orcid, **kwargs) # noqa: E501
return data
def view_educationsv3_with_http_info(self, orcid, **kwargs): # noqa: E501
"""Fetch all educations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_educationsv3_with_http_info(orcid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:return: EducationsSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_educationsv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_educationsv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.orcid+xml; qs=5', 'application/orcid+xml; qs=3', 'application/xml', 'application/vnd.orcid+json; qs=4', 'application/orcid+json; qs=2', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/educations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EducationsSummaryV30', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_educationv3(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Education # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_educationv3(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: EducationV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_educationv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
else:
(data) = self.view_educationv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
return data
def view_educationv3_with_http_info(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Education # noqa: E501
This method makes a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.