body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
42da1721307502f95764ebe1d4a9ee1d9b48601f8d8845680a592ef909d679eb
def empty(self): 'Disconnect all vehicles currently enqueued' self.queue = []
Disconnect all vehicles currently enqueued
elvis/waiting_queue.py
empty
dailab/elvis
5
python
def empty(self): self.queue = []
def empty(self): self.queue = []<|docstring|>Disconnect all vehicles currently enqueued<|endoftext|>
ac7cb921b231a8618cff7318935823a276a3348b53bb4ea55fd638e54663f4f4
def attr_is_not_inherited(type_, attr): "\n returns True if type_'s attr is not inherited from any of its base classes\n " bases = type_.__mro__[1:] return (getattr(type_, attr) not in (getattr(base, attr, None) for base in bases))
returns True if type_'s attr is not inherited from any of its base classes
extra_tests/not_impl_gen.py
attr_is_not_inherited
Leonardofreua/RustPython
11,058
python
def attr_is_not_inherited(type_, attr): "\n \n " bases = type_.__mro__[1:] return (getattr(type_, attr) not in (getattr(base, attr, None) for base in bases))
def attr_is_not_inherited(type_, attr): "\n \n " bases = type_.__mro__[1:] return (getattr(type_, attr) not in (getattr(base, attr, None) for base in bases))<|docstring|>returns True if type_'s attr is not inherited from any of its base classes<|endoftext|>
57e4dec2ece3062ad49af31defefa7c5bb2a2df4cb32a6f638ae97bde63f5462
def scan_modules(): "taken from the source code of help('modules')\n\n https://github.com/python/cpython/blob/63298930fb531ba2bb4f23bc3b915dbf1e17e9e1/Lib/pydoc.py#L2178" modules = {} def callback(path, modname, desc, modules=modules): if (modname and (modname[(- 9):] == '.__init__')): modname = (modname[:(- 9)] + ' (package)') if (modname.find('.') < 0): modules[modname] = 1 def onerror(modname): callback(None, modname, None) with warnings.catch_warnings(): warnings.simplefilter('ignore') ModuleScanner().run(callback, onerror=onerror) return list(modules.keys())
taken from the source code of help('modules') https://github.com/python/cpython/blob/63298930fb531ba2bb4f23bc3b915dbf1e17e9e1/Lib/pydoc.py#L2178
extra_tests/not_impl_gen.py
scan_modules
Leonardofreua/RustPython
11,058
python
def scan_modules(): "taken from the source code of help('modules')\n\n https://github.com/python/cpython/blob/63298930fb531ba2bb4f23bc3b915dbf1e17e9e1/Lib/pydoc.py#L2178" modules = {} def callback(path, modname, desc, modules=modules): if (modname and (modname[(- 9):] == '.__init__')): modname = (modname[:(- 9)] + ' (package)') if (modname.find('.') < 0): modules[modname] = 1 def onerror(modname): callback(None, modname, None) with warnings.catch_warnings(): warnings.simplefilter('ignore') ModuleScanner().run(callback, onerror=onerror) return list(modules.keys())
def scan_modules(): "taken from the source code of help('modules')\n\n https://github.com/python/cpython/blob/63298930fb531ba2bb4f23bc3b915dbf1e17e9e1/Lib/pydoc.py#L2178" modules = {} def callback(path, modname, desc, modules=modules): if (modname and (modname[(- 9):] == '.__init__')): modname = (modname[:(- 9)] + ' (package)') if (modname.find('.') < 0): modules[modname] = 1 def onerror(modname): callback(None, modname, None) with warnings.catch_warnings(): warnings.simplefilter('ignore') ModuleScanner().run(callback, onerror=onerror) return list(modules.keys())<|docstring|>taken from the source code of help('modules') https://github.com/python/cpython/blob/63298930fb531ba2bb4f23bc3b915dbf1e17e9e1/Lib/pydoc.py#L2178<|endoftext|>
fab594e75b8c0b69a63ca9f09f5824b16dd17f05a8ff06b38f840623ff5b6b72
def get_wfdisc_rows(session, wfdisc, sta=None, chan=None, t1=None, t2=None, wfids=None, daylong=False, asquery=False, verbose=False): '\n Returns a list of wfdisc records from provided SQLAlchemy ORM mapped\n wfdisc table, for given station, channel, and time window combination.\n\n Parameters\n ----------\n session: bound session instance\n wfdisc: SQLAlchemy mapped wfdisc table\n sta, chan, : str, optional\n station, channel strings,\n t1, t2 : int, optional\n Epoch time window of interest (seconds)\n Actually searches for wfdisc.time between t1-86400 and t2 and\n wfdisc.endtime > t1\n wfids : list of integers, optional\n wfid integers. Obviates other arguments.\n daylong : bool, optional\n If True, uses a slightly different time query for best results.\n Not yet implemented (is currently the default behavior).\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering.\n verbose : bool, optional\n Print request to the stdout. Not used with asquery=True.\n\n Returns\n -------\n list of wfdisc row objects, or sqlalchemy.orm.Query instance\n\n ' CHUNKSIZE = ((24 * 60) * 60) q = session.query(wfdisc) if (wfids is not None): q = q.filter(wfdisc.wfid.in_(wfids)) else: if (sta is not None): q = q.filter((wfdisc.sta == sta)) if (chan is not None): q = q.filter((wfdisc.chan == chan)) if ([t1, t2].count(None) == 0): q = q.filter(wfdisc.time.between((t1 - CHUNKSIZE), t2)) q = q.filter((wfdisc.endtime > t1)) else: if (t1 is not None): q = q.filter((wfdisc.time >= (t1 - CHUNKSIZE))) q = q.filter((wfdisc.endtime > t1)) if (t2 is not None): q = q.filter((wfdisc.time <= t2)) if asquery: res = q else: if verbose: msg = 'Requesting sta={}, chan={}, time=[{}, {}], wfids={}' print(msg.format(sta, chan, UTCDateTime(t1), UTCDateTime(t2), wfids)) res = q.all() return res
Returns a list of wfdisc records from provided SQLAlchemy ORM mapped wfdisc table, for given station, channel, and time window combination. Parameters ---------- session: bound session instance wfdisc: SQLAlchemy mapped wfdisc table sta, chan, : str, optional station, channel strings, t1, t2 : int, optional Epoch time window of interest (seconds) Actually searches for wfdisc.time between t1-86400 and t2 and wfdisc.endtime > t1 wfids : list of integers, optional wfid integers. Obviates other arguments. daylong : bool, optional If True, uses a slightly different time query for best results. Not yet implemented (is currently the default behavior). asquery : bool, optional Return the query object instead of the results. Default, False. Useful if additional you desire additional sorting of filtering. verbose : bool, optional Print request to the stdout. Not used with asquery=True. Returns ------- list of wfdisc row objects, or sqlalchemy.orm.Query instance
pisces/request.py
get_wfdisc_rows
samuelchodur/pisces
12
python
def get_wfdisc_rows(session, wfdisc, sta=None, chan=None, t1=None, t2=None, wfids=None, daylong=False, asquery=False, verbose=False): '\n Returns a list of wfdisc records from provided SQLAlchemy ORM mapped\n wfdisc table, for given station, channel, and time window combination.\n\n Parameters\n ----------\n session: bound session instance\n wfdisc: SQLAlchemy mapped wfdisc table\n sta, chan, : str, optional\n station, channel strings,\n t1, t2 : int, optional\n Epoch time window of interest (seconds)\n Actually searches for wfdisc.time between t1-86400 and t2 and\n wfdisc.endtime > t1\n wfids : list of integers, optional\n wfid integers. Obviates other arguments.\n daylong : bool, optional\n If True, uses a slightly different time query for best results.\n Not yet implemented (is currently the default behavior).\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering.\n verbose : bool, optional\n Print request to the stdout. Not used with asquery=True.\n\n Returns\n -------\n list of wfdisc row objects, or sqlalchemy.orm.Query instance\n\n ' CHUNKSIZE = ((24 * 60) * 60) q = session.query(wfdisc) if (wfids is not None): q = q.filter(wfdisc.wfid.in_(wfids)) else: if (sta is not None): q = q.filter((wfdisc.sta == sta)) if (chan is not None): q = q.filter((wfdisc.chan == chan)) if ([t1, t2].count(None) == 0): q = q.filter(wfdisc.time.between((t1 - CHUNKSIZE), t2)) q = q.filter((wfdisc.endtime > t1)) else: if (t1 is not None): q = q.filter((wfdisc.time >= (t1 - CHUNKSIZE))) q = q.filter((wfdisc.endtime > t1)) if (t2 is not None): q = q.filter((wfdisc.time <= t2)) if asquery: res = q else: if verbose: msg = 'Requesting sta={}, chan={}, time=[{}, {}], wfids={}' print(msg.format(sta, chan, UTCDateTime(t1), UTCDateTime(t2), wfids)) res = q.all() return res
def get_wfdisc_rows(session, wfdisc, sta=None, chan=None, t1=None, t2=None, wfids=None, daylong=False, asquery=False, verbose=False): '\n Returns a list of wfdisc records from provided SQLAlchemy ORM mapped\n wfdisc table, for given station, channel, and time window combination.\n\n Parameters\n ----------\n session: bound session instance\n wfdisc: SQLAlchemy mapped wfdisc table\n sta, chan, : str, optional\n station, channel strings,\n t1, t2 : int, optional\n Epoch time window of interest (seconds)\n Actually searches for wfdisc.time between t1-86400 and t2 and\n wfdisc.endtime > t1\n wfids : list of integers, optional\n wfid integers. Obviates other arguments.\n daylong : bool, optional\n If True, uses a slightly different time query for best results.\n Not yet implemented (is currently the default behavior).\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering.\n verbose : bool, optional\n Print request to the stdout. Not used with asquery=True.\n\n Returns\n -------\n list of wfdisc row objects, or sqlalchemy.orm.Query instance\n\n ' CHUNKSIZE = ((24 * 60) * 60) q = session.query(wfdisc) if (wfids is not None): q = q.filter(wfdisc.wfid.in_(wfids)) else: if (sta is not None): q = q.filter((wfdisc.sta == sta)) if (chan is not None): q = q.filter((wfdisc.chan == chan)) if ([t1, t2].count(None) == 0): q = q.filter(wfdisc.time.between((t1 - CHUNKSIZE), t2)) q = q.filter((wfdisc.endtime > t1)) else: if (t1 is not None): q = q.filter((wfdisc.time >= (t1 - CHUNKSIZE))) q = q.filter((wfdisc.endtime > t1)) if (t2 is not None): q = q.filter((wfdisc.time <= t2)) if asquery: res = q else: if verbose: msg = 'Requesting sta={}, chan={}, time=[{}, {}], wfids={}' print(msg.format(sta, chan, UTCDateTime(t1), UTCDateTime(t2), wfids)) res = q.all() return res<|docstring|>Returns a list of wfdisc records from provided SQLAlchemy ORM mapped wfdisc table, for given station, channel, and time window combination. Parameters ---------- session: bound session instance wfdisc: SQLAlchemy mapped wfdisc table sta, chan, : str, optional station, channel strings, t1, t2 : int, optional Epoch time window of interest (seconds) Actually searches for wfdisc.time between t1-86400 and t2 and wfdisc.endtime > t1 wfids : list of integers, optional wfid integers. Obviates other arguments. daylong : bool, optional If True, uses a slightly different time query for best results. Not yet implemented (is currently the default behavior). asquery : bool, optional Return the query object instead of the results. Default, False. Useful if additional you desire additional sorting of filtering. verbose : bool, optional Print request to the stdout. Not used with asquery=True. Returns ------- list of wfdisc row objects, or sqlalchemy.orm.Query instance<|endoftext|>
6bc5304669e166f18e5c4199ef06e3a3124e51b4ab50a565c54923856e64dd33
def distaz_query(records, deg=None, km=None, swath=None): '\n Out-of-database subset based on distances and/or azimuths.\n\n Parameters\n ----------\n records : iterable of objects with lat, lon attribute floats\n Target of the subset.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n\n Returns\n -------\n list\n Subset of supplied records.\n\n ' mask0 = np.ones(len(records), dtype=np.bool) if deg: dgen = (geod.locations2degrees(irec.lat, irec.lon, deg[0], deg[1]) for irec in records) degrees = np.fromiter(dgen, dtype=float) if (deg[2] is not None): mask0 = np.logical_and(mask0, (deg[2] <= degrees)) if (deg[3] is not None): mask0 = np.logical_and(mask0, (deg[3] >= degrees)) if km: mgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[0] for irec in records) kilometers = (np.fromiter(mgen, dtype=float) / 1000.0) if (km[2] is not None): mask0 = np.logical_and(mask0, (km[2] <= kilometers)) if (km[3] is not None): mask0 = np.logical_and(mask0, (km[3] >= kilometers)) if (swath is not None): minaz = (swath[2] - swath[3]) maxaz = (swath[2] + swath[3]) azgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[1] for irec in records) azimuths = np.fromiter(azgen, dtype=float) mask0 = np.logical_and(mask0, (azimuths >= minaz)) mask0 = np.logical_and(mask0, (azimuths <= maxaz)) idx = np.nonzero(mask0)[0] recs = [records[i] for i in idx] return recs
Out-of-database subset based on distances and/or azimuths. Parameters ---------- records : iterable of objects with lat, lon attribute floats Target of the subset. deg : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in degrees or None for unconstrained. km : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in km or None for unconstrained. swath : list or tuple of numbers, optional (lat, lon, azimuth, tolerance) Azimuth (from North) +/-tolerance from lat,lon point in degrees. Returns ------- list Subset of supplied records.
pisces/request.py
distaz_query
samuelchodur/pisces
12
python
def distaz_query(records, deg=None, km=None, swath=None): '\n Out-of-database subset based on distances and/or azimuths.\n\n Parameters\n ----------\n records : iterable of objects with lat, lon attribute floats\n Target of the subset.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n\n Returns\n -------\n list\n Subset of supplied records.\n\n ' mask0 = np.ones(len(records), dtype=np.bool) if deg: dgen = (geod.locations2degrees(irec.lat, irec.lon, deg[0], deg[1]) for irec in records) degrees = np.fromiter(dgen, dtype=float) if (deg[2] is not None): mask0 = np.logical_and(mask0, (deg[2] <= degrees)) if (deg[3] is not None): mask0 = np.logical_and(mask0, (deg[3] >= degrees)) if km: mgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[0] for irec in records) kilometers = (np.fromiter(mgen, dtype=float) / 1000.0) if (km[2] is not None): mask0 = np.logical_and(mask0, (km[2] <= kilometers)) if (km[3] is not None): mask0 = np.logical_and(mask0, (km[3] >= kilometers)) if (swath is not None): minaz = (swath[2] - swath[3]) maxaz = (swath[2] + swath[3]) azgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[1] for irec in records) azimuths = np.fromiter(azgen, dtype=float) mask0 = np.logical_and(mask0, (azimuths >= minaz)) mask0 = np.logical_and(mask0, (azimuths <= maxaz)) idx = np.nonzero(mask0)[0] recs = [records[i] for i in idx] return recs
def distaz_query(records, deg=None, km=None, swath=None): '\n Out-of-database subset based on distances and/or azimuths.\n\n Parameters\n ----------\n records : iterable of objects with lat, lon attribute floats\n Target of the subset.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n\n Returns\n -------\n list\n Subset of supplied records.\n\n ' mask0 = np.ones(len(records), dtype=np.bool) if deg: dgen = (geod.locations2degrees(irec.lat, irec.lon, deg[0], deg[1]) for irec in records) degrees = np.fromiter(dgen, dtype=float) if (deg[2] is not None): mask0 = np.logical_and(mask0, (deg[2] <= degrees)) if (deg[3] is not None): mask0 = np.logical_and(mask0, (deg[3] >= degrees)) if km: mgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[0] for irec in records) kilometers = (np.fromiter(mgen, dtype=float) / 1000.0) if (km[2] is not None): mask0 = np.logical_and(mask0, (km[2] <= kilometers)) if (km[3] is not None): mask0 = np.logical_and(mask0, (km[3] >= kilometers)) if (swath is not None): minaz = (swath[2] - swath[3]) maxaz = (swath[2] + swath[3]) azgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[1] for irec in records) azimuths = np.fromiter(azgen, dtype=float) mask0 = np.logical_and(mask0, (azimuths >= minaz)) mask0 = np.logical_and(mask0, (azimuths <= maxaz)) idx = np.nonzero(mask0)[0] recs = [records[i] for i in idx] return recs<|docstring|>Out-of-database subset based on distances and/or azimuths. Parameters ---------- records : iterable of objects with lat, lon attribute floats Target of the subset. deg : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in degrees or None for unconstrained. km : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in km or None for unconstrained. swath : list or tuple of numbers, optional (lat, lon, azimuth, tolerance) Azimuth (from North) +/-tolerance from lat,lon point in degrees. Returns ------- list Subset of supplied records.<|endoftext|>
5f54bd2aa5eb1928c9c0360f05370dc1dd508bc30db5973244cec4ff0241b0dd
def geographic_query(q, table, region=None, depth=None, asquery=False): '\n Filter by region (W, E, S, N) [deg] and/or depth range (min, max) [km].\n\n ' if region: if (region.count(None) == 0): q = q.filter(table.lon.between(region[0], region[1])) q = q.filter(table.lat.between(region[2], region[3])) else: if (region[0] is not None): q = q.filter((table.lon > region[0])) if (region[1] is not None): q = q.filter((table.lon < region[1])) if (region[2] is not None): q = q.filter((table.lat > region[2])) if (region[3] is not None): q = q.filter((table.lat < region[3])) if depth: if (depth.count(None) == 0): q = q.filter(table.depth.between(depth[0], depth[1])) else: if depth[0]: q = q.filter((table.depth >= depth[0])) if depth[1]: q = q.filter((table.depth <= depth[1])) if asquery: res = q else: res = q.all() return res
Filter by region (W, E, S, N) [deg] and/or depth range (min, max) [km].
pisces/request.py
geographic_query
samuelchodur/pisces
12
python
def geographic_query(q, table, region=None, depth=None, asquery=False): '\n \n\n ' if region: if (region.count(None) == 0): q = q.filter(table.lon.between(region[0], region[1])) q = q.filter(table.lat.between(region[2], region[3])) else: if (region[0] is not None): q = q.filter((table.lon > region[0])) if (region[1] is not None): q = q.filter((table.lon < region[1])) if (region[2] is not None): q = q.filter((table.lat > region[2])) if (region[3] is not None): q = q.filter((table.lat < region[3])) if depth: if (depth.count(None) == 0): q = q.filter(table.depth.between(depth[0], depth[1])) else: if depth[0]: q = q.filter((table.depth >= depth[0])) if depth[1]: q = q.filter((table.depth <= depth[1])) if asquery: res = q else: res = q.all() return res
def geographic_query(q, table, region=None, depth=None, asquery=False): '\n \n\n ' if region: if (region.count(None) == 0): q = q.filter(table.lon.between(region[0], region[1])) q = q.filter(table.lat.between(region[2], region[3])) else: if (region[0] is not None): q = q.filter((table.lon > region[0])) if (region[1] is not None): q = q.filter((table.lon < region[1])) if (region[2] is not None): q = q.filter((table.lat > region[2])) if (region[3] is not None): q = q.filter((table.lat < region[3])) if depth: if (depth.count(None) == 0): q = q.filter(table.depth.between(depth[0], depth[1])) else: if depth[0]: q = q.filter((table.depth >= depth[0])) if depth[1]: q = q.filter((table.depth <= depth[1])) if asquery: res = q else: res = q.all() return res<|docstring|>Filter by region (W, E, S, N) [deg] and/or depth range (min, max) [km].<|endoftext|>
6575fd70312e28fd5f992020c3cf0d1230883e313c52a939a80f196560db20da
def get_events(session, origin, event=None, region=None, deg=None, km=None, swath=None, mag=None, depth=None, etime=None, orids=None, evids=None, prefor=False, asquery=False): "\n Build common queries for events.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n origin : mapped Origin table class\n event : mapped Event table class, optional\n region : list or tuple of numbers, optional\n (W, E, S, N) in degrees. Default, None.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr) . Default, None.\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr) Default, None.\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n Not yet implemented.\n mag : dict, optional\n {'type1': [min1, max1], 'type2': [min2, max2], ...}\n 'type' can be 'mb', 'ms', or 'ml'. Produces OR clauses.\n depth : tuple or list, optional\n Depth interval [mindep, maxdep] in km.\n Use None for an unconstrained limit.\n etime : tuple or list, optional\n (tstart, tend) epoch event time window\n Use None for an unconstrained limit.\n orids, evids : list or tuple of int, optional\n orid, evid numbers < 1000 in length\n Evids requires event table.\n prefor : bool, optional\n Return preferred origins only. Default False. Requires event table\n be provided.\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering, or\n if you have your own in-database geographic query function(s). If \n supplied, deg, km, and/or swath are ignored in the returned query.\n\n Returns\n -------\n sqlalchemy.orm.Query instance\n\n Notes\n -----\n Each keyword argument corresponds to an AND clause, except 'mag' which\n returns OR clauses. Don't submit a request containing both 'evids' and\n 'orids' unless you want them joined by an AND clause. Otherwise process\n them individually, then collate and unique them afterwards.\n\n " Origin = origin Event = event t = etime q = session.query(Origin) if orids: q = q.filter(Origin.orid.in_(orids)) if t: if (t.count(None) == 0): q = q.filter(Origin.time.between(t[0], t[1])) else: if t[0]: q = q.filter((Origin.time > t[0])) if t[1]: q = q.filter((Origin.time < t[1])) if mag: magclause = [] for (magtype, vals) in mag.iteritems(): magclause.append(getattr(Origin, magtype).between(vals[0], vals[1])) q = q.filter(or_(*magclause)) if evids: q = q.filter((Origin.evid == Event.evid)) q = q.filter(Event.evid.in_(evids)) if prefor: q = q.filter((Origin.orid == Event.prefor)) q = geographic_query(q, Origin, region=region, depth=depth, asquery=True) if asquery: res = q else: res = distaz_query(q.all(), deg=deg, km=km, swath=swath) return res
Build common queries for events. Parameters ---------- session : sqlalchemy.orm.Session instance Must be bound. origin : mapped Origin table class event : mapped Event table class, optional region : list or tuple of numbers, optional (W, E, S, N) in degrees. Default, None. deg : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) . Default, None. minr, maxr in degrees or None for unconstrained. km : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) Default, None. minr, maxr in km or None for unconstrained. swath : list or tuple of numbers, optional (lat, lon, azimuth, tolerance) Azimuth (from North) +/-tolerance from lat,lon point in degrees. Not yet implemented. mag : dict, optional {'type1': [min1, max1], 'type2': [min2, max2], ...} 'type' can be 'mb', 'ms', or 'ml'. Produces OR clauses. depth : tuple or list, optional Depth interval [mindep, maxdep] in km. Use None for an unconstrained limit. etime : tuple or list, optional (tstart, tend) epoch event time window Use None for an unconstrained limit. orids, evids : list or tuple of int, optional orid, evid numbers < 1000 in length Evids requires event table. prefor : bool, optional Return preferred origins only. Default False. Requires event table be provided. asquery : bool, optional Return the query object instead of the results. Default, False. Useful if additional you desire additional sorting of filtering, or if you have your own in-database geographic query function(s). If supplied, deg, km, and/or swath are ignored in the returned query. Returns ------- sqlalchemy.orm.Query instance Notes ----- Each keyword argument corresponds to an AND clause, except 'mag' which returns OR clauses. Don't submit a request containing both 'evids' and 'orids' unless you want them joined by an AND clause. Otherwise process them individually, then collate and unique them afterwards.
pisces/request.py
get_events
samuelchodur/pisces
12
python
def get_events(session, origin, event=None, region=None, deg=None, km=None, swath=None, mag=None, depth=None, etime=None, orids=None, evids=None, prefor=False, asquery=False): "\n Build common queries for events.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n origin : mapped Origin table class\n event : mapped Event table class, optional\n region : list or tuple of numbers, optional\n (W, E, S, N) in degrees. Default, None.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr) . Default, None.\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr) Default, None.\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n Not yet implemented.\n mag : dict, optional\n {'type1': [min1, max1], 'type2': [min2, max2], ...}\n 'type' can be 'mb', 'ms', or 'ml'. Produces OR clauses.\n depth : tuple or list, optional\n Depth interval [mindep, maxdep] in km.\n Use None for an unconstrained limit.\n etime : tuple or list, optional\n (tstart, tend) epoch event time window\n Use None for an unconstrained limit.\n orids, evids : list or tuple of int, optional\n orid, evid numbers < 1000 in length\n Evids requires event table.\n prefor : bool, optional\n Return preferred origins only. Default False. Requires event table\n be provided.\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering, or\n if you have your own in-database geographic query function(s). If \n supplied, deg, km, and/or swath are ignored in the returned query.\n\n Returns\n -------\n sqlalchemy.orm.Query instance\n\n Notes\n -----\n Each keyword argument corresponds to an AND clause, except 'mag' which\n returns OR clauses. Don't submit a request containing both 'evids' and\n 'orids' unless you want them joined by an AND clause. Otherwise process\n them individually, then collate and unique them afterwards.\n\n " Origin = origin Event = event t = etime q = session.query(Origin) if orids: q = q.filter(Origin.orid.in_(orids)) if t: if (t.count(None) == 0): q = q.filter(Origin.time.between(t[0], t[1])) else: if t[0]: q = q.filter((Origin.time > t[0])) if t[1]: q = q.filter((Origin.time < t[1])) if mag: magclause = [] for (magtype, vals) in mag.iteritems(): magclause.append(getattr(Origin, magtype).between(vals[0], vals[1])) q = q.filter(or_(*magclause)) if evids: q = q.filter((Origin.evid == Event.evid)) q = q.filter(Event.evid.in_(evids)) if prefor: q = q.filter((Origin.orid == Event.prefor)) q = geographic_query(q, Origin, region=region, depth=depth, asquery=True) if asquery: res = q else: res = distaz_query(q.all(), deg=deg, km=km, swath=swath) return res
def get_events(session, origin, event=None, region=None, deg=None, km=None, swath=None, mag=None, depth=None, etime=None, orids=None, evids=None, prefor=False, asquery=False): "\n Build common queries for events.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n origin : mapped Origin table class\n event : mapped Event table class, optional\n region : list or tuple of numbers, optional\n (W, E, S, N) in degrees. Default, None.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr) . Default, None.\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr) Default, None.\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n Not yet implemented.\n mag : dict, optional\n {'type1': [min1, max1], 'type2': [min2, max2], ...}\n 'type' can be 'mb', 'ms', or 'ml'. Produces OR clauses.\n depth : tuple or list, optional\n Depth interval [mindep, maxdep] in km.\n Use None for an unconstrained limit.\n etime : tuple or list, optional\n (tstart, tend) epoch event time window\n Use None for an unconstrained limit.\n orids, evids : list or tuple of int, optional\n orid, evid numbers < 1000 in length\n Evids requires event table.\n prefor : bool, optional\n Return preferred origins only. Default False. Requires event table\n be provided.\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering, or\n if you have your own in-database geographic query function(s). If \n supplied, deg, km, and/or swath are ignored in the returned query.\n\n Returns\n -------\n sqlalchemy.orm.Query instance\n\n Notes\n -----\n Each keyword argument corresponds to an AND clause, except 'mag' which\n returns OR clauses. Don't submit a request containing both 'evids' and\n 'orids' unless you want them joined by an AND clause. Otherwise process\n them individually, then collate and unique them afterwards.\n\n " Origin = origin Event = event t = etime q = session.query(Origin) if orids: q = q.filter(Origin.orid.in_(orids)) if t: if (t.count(None) == 0): q = q.filter(Origin.time.between(t[0], t[1])) else: if t[0]: q = q.filter((Origin.time > t[0])) if t[1]: q = q.filter((Origin.time < t[1])) if mag: magclause = [] for (magtype, vals) in mag.iteritems(): magclause.append(getattr(Origin, magtype).between(vals[0], vals[1])) q = q.filter(or_(*magclause)) if evids: q = q.filter((Origin.evid == Event.evid)) q = q.filter(Event.evid.in_(evids)) if prefor: q = q.filter((Origin.orid == Event.prefor)) q = geographic_query(q, Origin, region=region, depth=depth, asquery=True) if asquery: res = q else: res = distaz_query(q.all(), deg=deg, km=km, swath=swath) return res<|docstring|>Build common queries for events. Parameters ---------- session : sqlalchemy.orm.Session instance Must be bound. origin : mapped Origin table class event : mapped Event table class, optional region : list or tuple of numbers, optional (W, E, S, N) in degrees. Default, None. deg : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) . Default, None. minr, maxr in degrees or None for unconstrained. km : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) Default, None. minr, maxr in km or None for unconstrained. swath : list or tuple of numbers, optional (lat, lon, azimuth, tolerance) Azimuth (from North) +/-tolerance from lat,lon point in degrees. Not yet implemented. mag : dict, optional {'type1': [min1, max1], 'type2': [min2, max2], ...} 'type' can be 'mb', 'ms', or 'ml'. Produces OR clauses. depth : tuple or list, optional Depth interval [mindep, maxdep] in km. Use None for an unconstrained limit. etime : tuple or list, optional (tstart, tend) epoch event time window Use None for an unconstrained limit. orids, evids : list or tuple of int, optional orid, evid numbers < 1000 in length Evids requires event table. prefor : bool, optional Return preferred origins only. Default False. Requires event table be provided. asquery : bool, optional Return the query object instead of the results. Default, False. Useful if additional you desire additional sorting of filtering, or if you have your own in-database geographic query function(s). If supplied, deg, km, and/or swath are ignored in the returned query. Returns ------- sqlalchemy.orm.Query instance Notes ----- Each keyword argument corresponds to an AND clause, except 'mag' which returns OR clauses. Don't submit a request containing both 'evids' and 'orids' unless you want them joined by an AND clause. Otherwise process them individually, then collate and unique them afterwards.<|endoftext|>
158b24cbdb58ad9c12dff4e4a17a1f97d0a258f4030c038162aadc613db7e832
def get_stations(session, site, sitechan=None, affiliation=None, stations=None, channels=None, nets=None, loc=None, region=None, deg=None, km=None, swath=None, stime=None, asquery=False): '\n Build common queries for stations.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n site : mapped Site table class\n sitechan : mapped Sitechan table class, optional\n affiliation : mapped Affiliation table class, optional\n stations : list or tuple of strings\n Desired station code strings.\n channels, nets : list or tuple of strings, or single regex string, optional\n Desired channel, network code strings or regex\n loc : list/tuple, optional\n Location code.\n Not yet implemented.\n region : tuple or list of numbers, optional\n Geographic (W,E,S,N) in degrees, None values for unconstrained.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n Currently only works in gnem Oracle.\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering, or\n if you have your own in-database geographic query function(s). If \n supplied, deg, km, and/or swath are ignored in the returned query.\n\n Notes\n -----\n Each parameter produces an AND clause, list parameters produce IN \n clauses, a regex produces a REGEXP_LIKE clause (Oracle-specific?).\n\n deg, km, and swath are evaluated out-of-database by evaluating all other \n flags first, then masking. This can be memory-intensive. See "Examples"\n for how to perform in-database distance filters.\n \n To include channels or networks with your results use asquery=True, and\n\n >>> q = q.add_columns(Sitechan.chan)\n >>> q = q.add_columns(Affiliation.net)\n\n with the returned query.\n\n Examples\n --------\n Use your own in-database distance query function "km_from_point":\n\n >>> from sqlalchemy import func\n >>> q = get_stations(session, site, channels=[\'BHZ\'], region=(65,75,30,40), asquery=True)\n >>> stations = q.filter(func.km_from_point(site.lat, site.lon, 40, -110) < 100).all()\n\n ' Site = site Sitechan = sitechan Affiliation = affiliation d = deg t = stime q = session.query(Site) if stations: q = q.filter(Site.sta.in_(stations)) if nets: q = q.join(Affiliation, (Affiliation.sta == Site.sta)) if isinstance(nets, list): q = q.filter(Affiliation.net.in_(nets)) else: q = q.filter(func.regexp_like(Affiliation.net, nets)) if channels: q = q.join(Sitechan, (Sitechan.sta == Site.sta)) if isinstance(channels, str): q = q.filter(func.regexp_like(Sitechan.chan, channnels)) else: q = q.filter(Sitechan.chan.in_(channels)) q = geographic_query(q, Site, region=region, asquery=True) if asquery: res = q else: res = distaz_query(q.all(), deg=deg, km=km, swath=swath) return res
Build common queries for stations. Parameters ---------- session : sqlalchemy.orm.Session instance Must be bound. site : mapped Site table class sitechan : mapped Sitechan table class, optional affiliation : mapped Affiliation table class, optional stations : list or tuple of strings Desired station code strings. channels, nets : list or tuple of strings, or single regex string, optional Desired channel, network code strings or regex loc : list/tuple, optional Location code. Not yet implemented. region : tuple or list of numbers, optional Geographic (W,E,S,N) in degrees, None values for unconstrained. deg : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in degrees or None for unconstrained. km : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in km or None for unconstrained. swath : list or tuple of numbers, optional (lat, lon, azimuth, tolerance) Azimuth (from North) +/-tolerance from lat,lon point in degrees. Currently only works in gnem Oracle. asquery : bool, optional Return the query object instead of the results. Default, False. Useful if additional you desire additional sorting of filtering, or if you have your own in-database geographic query function(s). If supplied, deg, km, and/or swath are ignored in the returned query. Notes ----- Each parameter produces an AND clause, list parameters produce IN clauses, a regex produces a REGEXP_LIKE clause (Oracle-specific?). deg, km, and swath are evaluated out-of-database by evaluating all other flags first, then masking. This can be memory-intensive. See "Examples" for how to perform in-database distance filters. To include channels or networks with your results use asquery=True, and >>> q = q.add_columns(Sitechan.chan) >>> q = q.add_columns(Affiliation.net) with the returned query. Examples -------- Use your own in-database distance query function "km_from_point": >>> from sqlalchemy import func >>> q = get_stations(session, site, channels=['BHZ'], region=(65,75,30,40), asquery=True) >>> stations = q.filter(func.km_from_point(site.lat, site.lon, 40, -110) < 100).all()
pisces/request.py
get_stations
samuelchodur/pisces
12
python
def get_stations(session, site, sitechan=None, affiliation=None, stations=None, channels=None, nets=None, loc=None, region=None, deg=None, km=None, swath=None, stime=None, asquery=False): '\n Build common queries for stations.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n site : mapped Site table class\n sitechan : mapped Sitechan table class, optional\n affiliation : mapped Affiliation table class, optional\n stations : list or tuple of strings\n Desired station code strings.\n channels, nets : list or tuple of strings, or single regex string, optional\n Desired channel, network code strings or regex\n loc : list/tuple, optional\n Location code.\n Not yet implemented.\n region : tuple or list of numbers, optional\n Geographic (W,E,S,N) in degrees, None values for unconstrained.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n Currently only works in gnem Oracle.\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering, or\n if you have your own in-database geographic query function(s). If \n supplied, deg, km, and/or swath are ignored in the returned query.\n\n Notes\n -----\n Each parameter produces an AND clause, list parameters produce IN \n clauses, a regex produces a REGEXP_LIKE clause (Oracle-specific?).\n\n deg, km, and swath are evaluated out-of-database by evaluating all other \n flags first, then masking. This can be memory-intensive. See "Examples"\n for how to perform in-database distance filters.\n \n To include channels or networks with your results use asquery=True, and\n\n >>> q = q.add_columns(Sitechan.chan)\n >>> q = q.add_columns(Affiliation.net)\n\n with the returned query.\n\n Examples\n --------\n Use your own in-database distance query function "km_from_point":\n\n >>> from sqlalchemy import func\n >>> q = get_stations(session, site, channels=[\'BHZ\'], region=(65,75,30,40), asquery=True)\n >>> stations = q.filter(func.km_from_point(site.lat, site.lon, 40, -110) < 100).all()\n\n ' Site = site Sitechan = sitechan Affiliation = affiliation d = deg t = stime q = session.query(Site) if stations: q = q.filter(Site.sta.in_(stations)) if nets: q = q.join(Affiliation, (Affiliation.sta == Site.sta)) if isinstance(nets, list): q = q.filter(Affiliation.net.in_(nets)) else: q = q.filter(func.regexp_like(Affiliation.net, nets)) if channels: q = q.join(Sitechan, (Sitechan.sta == Site.sta)) if isinstance(channels, str): q = q.filter(func.regexp_like(Sitechan.chan, channnels)) else: q = q.filter(Sitechan.chan.in_(channels)) q = geographic_query(q, Site, region=region, asquery=True) if asquery: res = q else: res = distaz_query(q.all(), deg=deg, km=km, swath=swath) return res
def get_stations(session, site, sitechan=None, affiliation=None, stations=None, channels=None, nets=None, loc=None, region=None, deg=None, km=None, swath=None, stime=None, asquery=False): '\n Build common queries for stations.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n site : mapped Site table class\n sitechan : mapped Sitechan table class, optional\n affiliation : mapped Affiliation table class, optional\n stations : list or tuple of strings\n Desired station code strings.\n channels, nets : list or tuple of strings, or single regex string, optional\n Desired channel, network code strings or regex\n loc : list/tuple, optional\n Location code.\n Not yet implemented.\n region : tuple or list of numbers, optional\n Geographic (W,E,S,N) in degrees, None values for unconstrained.\n deg : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in degrees or None for unconstrained.\n km : list or tuple of numbers, optional\n (centerlat, centerlon, minr, maxr)\n minr, maxr in km or None for unconstrained.\n swath : list or tuple of numbers, optional\n (lat, lon, azimuth, tolerance)\n Azimuth (from North) +/-tolerance from lat,lon point in degrees.\n Currently only works in gnem Oracle.\n asquery : bool, optional\n Return the query object instead of the results. Default, False.\n Useful if additional you desire additional sorting of filtering, or\n if you have your own in-database geographic query function(s). If \n supplied, deg, km, and/or swath are ignored in the returned query.\n\n Notes\n -----\n Each parameter produces an AND clause, list parameters produce IN \n clauses, a regex produces a REGEXP_LIKE clause (Oracle-specific?).\n\n deg, km, and swath are evaluated out-of-database by evaluating all other \n flags first, then masking. This can be memory-intensive. See "Examples"\n for how to perform in-database distance filters.\n \n To include channels or networks with your results use asquery=True, and\n\n >>> q = q.add_columns(Sitechan.chan)\n >>> q = q.add_columns(Affiliation.net)\n\n with the returned query.\n\n Examples\n --------\n Use your own in-database distance query function "km_from_point":\n\n >>> from sqlalchemy import func\n >>> q = get_stations(session, site, channels=[\'BHZ\'], region=(65,75,30,40), asquery=True)\n >>> stations = q.filter(func.km_from_point(site.lat, site.lon, 40, -110) < 100).all()\n\n ' Site = site Sitechan = sitechan Affiliation = affiliation d = deg t = stime q = session.query(Site) if stations: q = q.filter(Site.sta.in_(stations)) if nets: q = q.join(Affiliation, (Affiliation.sta == Site.sta)) if isinstance(nets, list): q = q.filter(Affiliation.net.in_(nets)) else: q = q.filter(func.regexp_like(Affiliation.net, nets)) if channels: q = q.join(Sitechan, (Sitechan.sta == Site.sta)) if isinstance(channels, str): q = q.filter(func.regexp_like(Sitechan.chan, channnels)) else: q = q.filter(Sitechan.chan.in_(channels)) q = geographic_query(q, Site, region=region, asquery=True) if asquery: res = q else: res = distaz_query(q.all(), deg=deg, km=km, swath=swath) return res<|docstring|>Build common queries for stations. Parameters ---------- session : sqlalchemy.orm.Session instance Must be bound. site : mapped Site table class sitechan : mapped Sitechan table class, optional affiliation : mapped Affiliation table class, optional stations : list or tuple of strings Desired station code strings. channels, nets : list or tuple of strings, or single regex string, optional Desired channel, network code strings or regex loc : list/tuple, optional Location code. Not yet implemented. region : tuple or list of numbers, optional Geographic (W,E,S,N) in degrees, None values for unconstrained. deg : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in degrees or None for unconstrained. km : list or tuple of numbers, optional (centerlat, centerlon, minr, maxr) minr, maxr in km or None for unconstrained. swath : list or tuple of numbers, optional (lat, lon, azimuth, tolerance) Azimuth (from North) +/-tolerance from lat,lon point in degrees. Currently only works in gnem Oracle. asquery : bool, optional Return the query object instead of the results. Default, False. Useful if additional you desire additional sorting of filtering, or if you have your own in-database geographic query function(s). If supplied, deg, km, and/or swath are ignored in the returned query. Notes ----- Each parameter produces an AND clause, list parameters produce IN clauses, a regex produces a REGEXP_LIKE clause (Oracle-specific?). deg, km, and swath are evaluated out-of-database by evaluating all other flags first, then masking. This can be memory-intensive. See "Examples" for how to perform in-database distance filters. To include channels or networks with your results use asquery=True, and >>> q = q.add_columns(Sitechan.chan) >>> q = q.add_columns(Affiliation.net) with the returned query. Examples -------- Use your own in-database distance query function "km_from_point": >>> from sqlalchemy import func >>> q = get_stations(session, site, channels=['BHZ'], region=(65,75,30,40), asquery=True) >>> stations = q.filter(func.km_from_point(site.lat, site.lon, 40, -110) < 100).all()<|endoftext|>
d4a894f2bcc2c107819e4314a84f4743a963985f8735a0c28ec023f614c7402a
def get_arrivals(session, arrival, assoc=None, stations=None, channels=None, atime=None, phases=None, arids=None, orids=None, auth=None, asquery=False): '\n Build common queries for arrivals.\n \n Parameters\n ----------\n stations, channels : list or tuple of strings\n Desired station, channel strings.\n arrival: mapped Arrival table class\n assoc: mapped Assoc table class, optional\n atime : tuple or list of float, optional\n (tstart, tend) epoch arrival time window. Either can be None.\n phases: list or tuple of strings\n Arrival \'iphase\'.\n arids : list of integers\n Desired arid numbers.\n orids : list of integers\n orids from which associated arrivals will be returned. Requires Assoc\n table.\n auth : list/tuple of strings\n Arrival author list.\n\n Returns\n -------\n list or sqlalchemy.orm.Query instance\n Arrival results.\n\n Notes\n -----\n Each argument adds an AND clause to the SQL query.\n Unspecified (keyword) arguments are treated as wildcards. That is, no\n arguments means, "give me all arrivals everywhere ever."\n\n ' Arrival = arrival Assoc = assoc t = atime q = session.query(Arrival) if stations: q = q.filter(Arrival.sta.in_(stations)) if channels: q = q.filter(Arrival.chan.in_(channels)) if phases: q = q.filter(Arrival.iphase.in_(phase)) if t: if (t.count(None) == 0): q = q.filter(Arrival.time.between(t[0], t[1])) else: if t[0]: q = q.filter((Arrival.time > t[0])) if t[1]: q = q.filter((Arrival.time < t[1])) if arids: q = q.filter(Arrival.arid.in_(arids)) if orids: q = q.filter((Arrival.arid == Assoc.arid)) q = q.filter(Assoc.orid.in_(orids)) if auth: q = q.filter(Arrival.auth.in_(auth)) if asquery: res = q else: res = q.all() return res
Build common queries for arrivals. Parameters ---------- stations, channels : list or tuple of strings Desired station, channel strings. arrival: mapped Arrival table class assoc: mapped Assoc table class, optional atime : tuple or list of float, optional (tstart, tend) epoch arrival time window. Either can be None. phases: list or tuple of strings Arrival 'iphase'. arids : list of integers Desired arid numbers. orids : list of integers orids from which associated arrivals will be returned. Requires Assoc table. auth : list/tuple of strings Arrival author list. Returns ------- list or sqlalchemy.orm.Query instance Arrival results. Notes ----- Each argument adds an AND clause to the SQL query. Unspecified (keyword) arguments are treated as wildcards. That is, no arguments means, "give me all arrivals everywhere ever."
pisces/request.py
get_arrivals
samuelchodur/pisces
12
python
def get_arrivals(session, arrival, assoc=None, stations=None, channels=None, atime=None, phases=None, arids=None, orids=None, auth=None, asquery=False): '\n Build common queries for arrivals.\n \n Parameters\n ----------\n stations, channels : list or tuple of strings\n Desired station, channel strings.\n arrival: mapped Arrival table class\n assoc: mapped Assoc table class, optional\n atime : tuple or list of float, optional\n (tstart, tend) epoch arrival time window. Either can be None.\n phases: list or tuple of strings\n Arrival \'iphase\'.\n arids : list of integers\n Desired arid numbers.\n orids : list of integers\n orids from which associated arrivals will be returned. Requires Assoc\n table.\n auth : list/tuple of strings\n Arrival author list.\n\n Returns\n -------\n list or sqlalchemy.orm.Query instance\n Arrival results.\n\n Notes\n -----\n Each argument adds an AND clause to the SQL query.\n Unspecified (keyword) arguments are treated as wildcards. That is, no\n arguments means, "give me all arrivals everywhere ever."\n\n ' Arrival = arrival Assoc = assoc t = atime q = session.query(Arrival) if stations: q = q.filter(Arrival.sta.in_(stations)) if channels: q = q.filter(Arrival.chan.in_(channels)) if phases: q = q.filter(Arrival.iphase.in_(phase)) if t: if (t.count(None) == 0): q = q.filter(Arrival.time.between(t[0], t[1])) else: if t[0]: q = q.filter((Arrival.time > t[0])) if t[1]: q = q.filter((Arrival.time < t[1])) if arids: q = q.filter(Arrival.arid.in_(arids)) if orids: q = q.filter((Arrival.arid == Assoc.arid)) q = q.filter(Assoc.orid.in_(orids)) if auth: q = q.filter(Arrival.auth.in_(auth)) if asquery: res = q else: res = q.all() return res
def get_arrivals(session, arrival, assoc=None, stations=None, channels=None, atime=None, phases=None, arids=None, orids=None, auth=None, asquery=False): '\n Build common queries for arrivals.\n \n Parameters\n ----------\n stations, channels : list or tuple of strings\n Desired station, channel strings.\n arrival: mapped Arrival table class\n assoc: mapped Assoc table class, optional\n atime : tuple or list of float, optional\n (tstart, tend) epoch arrival time window. Either can be None.\n phases: list or tuple of strings\n Arrival \'iphase\'.\n arids : list of integers\n Desired arid numbers.\n orids : list of integers\n orids from which associated arrivals will be returned. Requires Assoc\n table.\n auth : list/tuple of strings\n Arrival author list.\n\n Returns\n -------\n list or sqlalchemy.orm.Query instance\n Arrival results.\n\n Notes\n -----\n Each argument adds an AND clause to the SQL query.\n Unspecified (keyword) arguments are treated as wildcards. That is, no\n arguments means, "give me all arrivals everywhere ever."\n\n ' Arrival = arrival Assoc = assoc t = atime q = session.query(Arrival) if stations: q = q.filter(Arrival.sta.in_(stations)) if channels: q = q.filter(Arrival.chan.in_(channels)) if phases: q = q.filter(Arrival.iphase.in_(phase)) if t: if (t.count(None) == 0): q = q.filter(Arrival.time.between(t[0], t[1])) else: if t[0]: q = q.filter((Arrival.time > t[0])) if t[1]: q = q.filter((Arrival.time < t[1])) if arids: q = q.filter(Arrival.arid.in_(arids)) if orids: q = q.filter((Arrival.arid == Assoc.arid)) q = q.filter(Assoc.orid.in_(orids)) if auth: q = q.filter(Arrival.auth.in_(auth)) if asquery: res = q else: res = q.all() return res<|docstring|>Build common queries for arrivals. Parameters ---------- stations, channels : list or tuple of strings Desired station, channel strings. arrival: mapped Arrival table class assoc: mapped Assoc table class, optional atime : tuple or list of float, optional (tstart, tend) epoch arrival time window. Either can be None. phases: list or tuple of strings Arrival 'iphase'. arids : list of integers Desired arid numbers. orids : list of integers orids from which associated arrivals will be returned. Requires Assoc table. auth : list/tuple of strings Arrival author list. Returns ------- list or sqlalchemy.orm.Query instance Arrival results. Notes ----- Each argument adds an AND clause to the SQL query. Unspecified (keyword) arguments are treated as wildcards. That is, no arguments means, "give me all arrivals everywhere ever."<|endoftext|>
44ffbde1e6a5c47e3842d14065574566a51271d4d153089c3863aefc466cb378
def get_waveforms(session, wfdisc, station=None, channel=None, starttime=None, endtime=None, wfids=None, tol=None): '\n Request waveforms.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n wfdisc : mapped Wfdisc table class\n station, channel : str, optional\n Desired station, channel code strings\n starttimes, endtimes : float, optional\n Epoch start times, end times.\n Traces will be cut to these times.\n wfids : iterable of int, optional\n Wfdisc wfids. Obviates the above arguments and just returns full Wfdisc\n row waveforms.\n tol : float\n If provided, a warning is fired if any Trace is not within tol seconds\n of starttime and endtime.\n\n Returns\n -------\n obspy.Stream\n Traces are merged and cut to requested times.\n\n Raises\n ------\n ValueError\n Returned Stream contains trace start/end times outside of the tolerance.\n\n ' Wfdisc = wfdisc st = Stream() if wfids: station = channel = starttime = endtime = None starttime = (float(starttime) if (starttime is not None) else None) endtime = (float(endtime) if (endtime is not None) else None) t1_utc = (UTCDateTime(starttime) if (starttime is not None) else None) t2_utc = (UTCDateTime(endtime) if (endtime is not None) else None) wfs = get_wfdisc_rows(session, Wfdisc, station, channel, starttime, endtime, wfids=wfids) for wf in wfs: try: tr = wfdisc2trace(wf) except IOError: tr = None if tr: tr.trim(t1_utc, t2_utc) st.append(tr) if all([tol, starttime, endtime]): (starttimes, endtimes) = zip(*[(t.stats.starttime, t.stats.endtime) for t in st]) min_t = float(min(starttimes)) max_t = float(max(endtimes)) if ((abs((min_t - starttime)) > tol) or (abs((max_t - endtime)) > tol)): msg = 'Trace times are outside of tolerance: {}'.format(tol) raise ValueError(msg) return st
Request waveforms. Parameters ---------- session : sqlalchemy.orm.Session instance Must be bound. wfdisc : mapped Wfdisc table class station, channel : str, optional Desired station, channel code strings starttimes, endtimes : float, optional Epoch start times, end times. Traces will be cut to these times. wfids : iterable of int, optional Wfdisc wfids. Obviates the above arguments and just returns full Wfdisc row waveforms. tol : float If provided, a warning is fired if any Trace is not within tol seconds of starttime and endtime. Returns ------- obspy.Stream Traces are merged and cut to requested times. Raises ------ ValueError Returned Stream contains trace start/end times outside of the tolerance.
pisces/request.py
get_waveforms
samuelchodur/pisces
12
python
def get_waveforms(session, wfdisc, station=None, channel=None, starttime=None, endtime=None, wfids=None, tol=None): '\n Request waveforms.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n wfdisc : mapped Wfdisc table class\n station, channel : str, optional\n Desired station, channel code strings\n starttimes, endtimes : float, optional\n Epoch start times, end times.\n Traces will be cut to these times.\n wfids : iterable of int, optional\n Wfdisc wfids. Obviates the above arguments and just returns full Wfdisc\n row waveforms.\n tol : float\n If provided, a warning is fired if any Trace is not within tol seconds\n of starttime and endtime.\n\n Returns\n -------\n obspy.Stream\n Traces are merged and cut to requested times.\n\n Raises\n ------\n ValueError\n Returned Stream contains trace start/end times outside of the tolerance.\n\n ' Wfdisc = wfdisc st = Stream() if wfids: station = channel = starttime = endtime = None starttime = (float(starttime) if (starttime is not None) else None) endtime = (float(endtime) if (endtime is not None) else None) t1_utc = (UTCDateTime(starttime) if (starttime is not None) else None) t2_utc = (UTCDateTime(endtime) if (endtime is not None) else None) wfs = get_wfdisc_rows(session, Wfdisc, station, channel, starttime, endtime, wfids=wfids) for wf in wfs: try: tr = wfdisc2trace(wf) except IOError: tr = None if tr: tr.trim(t1_utc, t2_utc) st.append(tr) if all([tol, starttime, endtime]): (starttimes, endtimes) = zip(*[(t.stats.starttime, t.stats.endtime) for t in st]) min_t = float(min(starttimes)) max_t = float(max(endtimes)) if ((abs((min_t - starttime)) > tol) or (abs((max_t - endtime)) > tol)): msg = 'Trace times are outside of tolerance: {}'.format(tol) raise ValueError(msg) return st
def get_waveforms(session, wfdisc, station=None, channel=None, starttime=None, endtime=None, wfids=None, tol=None): '\n Request waveforms.\n\n Parameters\n ----------\n session : sqlalchemy.orm.Session instance\n Must be bound.\n wfdisc : mapped Wfdisc table class\n station, channel : str, optional\n Desired station, channel code strings\n starttimes, endtimes : float, optional\n Epoch start times, end times.\n Traces will be cut to these times.\n wfids : iterable of int, optional\n Wfdisc wfids. Obviates the above arguments and just returns full Wfdisc\n row waveforms.\n tol : float\n If provided, a warning is fired if any Trace is not within tol seconds\n of starttime and endtime.\n\n Returns\n -------\n obspy.Stream\n Traces are merged and cut to requested times.\n\n Raises\n ------\n ValueError\n Returned Stream contains trace start/end times outside of the tolerance.\n\n ' Wfdisc = wfdisc st = Stream() if wfids: station = channel = starttime = endtime = None starttime = (float(starttime) if (starttime is not None) else None) endtime = (float(endtime) if (endtime is not None) else None) t1_utc = (UTCDateTime(starttime) if (starttime is not None) else None) t2_utc = (UTCDateTime(endtime) if (endtime is not None) else None) wfs = get_wfdisc_rows(session, Wfdisc, station, channel, starttime, endtime, wfids=wfids) for wf in wfs: try: tr = wfdisc2trace(wf) except IOError: tr = None if tr: tr.trim(t1_utc, t2_utc) st.append(tr) if all([tol, starttime, endtime]): (starttimes, endtimes) = zip(*[(t.stats.starttime, t.stats.endtime) for t in st]) min_t = float(min(starttimes)) max_t = float(max(endtimes)) if ((abs((min_t - starttime)) > tol) or (abs((max_t - endtime)) > tol)): msg = 'Trace times are outside of tolerance: {}'.format(tol) raise ValueError(msg) return st<|docstring|>Request waveforms. Parameters ---------- session : sqlalchemy.orm.Session instance Must be bound. wfdisc : mapped Wfdisc table class station, channel : str, optional Desired station, channel code strings starttimes, endtimes : float, optional Epoch start times, end times. Traces will be cut to these times. wfids : iterable of int, optional Wfdisc wfids. Obviates the above arguments and just returns full Wfdisc row waveforms. tol : float If provided, a warning is fired if any Trace is not within tol seconds of starttime and endtime. Returns ------- obspy.Stream Traces are merged and cut to requested times. Raises ------ ValueError Returned Stream contains trace start/end times outside of the tolerance.<|endoftext|>
f0990ba8df46284b428b33221dfbdf4ddd6b0cdb241b524617b99e963ffb8f93
def get_ids(session, lastid, ids, detach=False): "\n Get or create lastid rows.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session instance, bound\n lastid : sqlalchemy orm mapped lastid table\n ids : list\n Desired lastid keyname strings.\n detach : bool, optional\n If True, expunge results from session before returning.\n Useful if you don't have permission on lastid, and don't want\n session commits to throw a permission error.\n\n\n Returns\n -------\n list\n Corresponding existing or new rows from lastid table.\n\n Notes\n -----\n Keyvalue is 0 if id name not present in lastid table.\n\n " out = [] for idname in ids: iid = session.query(lastid).filter((lastid.keyname == idname)).first() if (not iid): iid = lastid(keyname=idname, keyvalue=0) out.append(iid.keyvalue) if detach: session.expunge_all(out) return out
Get or create lastid rows. Parameters ---------- session : sqlalchemy.orm.session instance, bound lastid : sqlalchemy orm mapped lastid table ids : list Desired lastid keyname strings. detach : bool, optional If True, expunge results from session before returning. Useful if you don't have permission on lastid, and don't want session commits to throw a permission error. Returns ------- list Corresponding existing or new rows from lastid table. Notes ----- Keyvalue is 0 if id name not present in lastid table.
pisces/request.py
get_ids
samuelchodur/pisces
12
python
def get_ids(session, lastid, ids, detach=False): "\n Get or create lastid rows.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session instance, bound\n lastid : sqlalchemy orm mapped lastid table\n ids : list\n Desired lastid keyname strings.\n detach : bool, optional\n If True, expunge results from session before returning.\n Useful if you don't have permission on lastid, and don't want\n session commits to throw a permission error.\n\n\n Returns\n -------\n list\n Corresponding existing or new rows from lastid table.\n\n Notes\n -----\n Keyvalue is 0 if id name not present in lastid table.\n\n " out = [] for idname in ids: iid = session.query(lastid).filter((lastid.keyname == idname)).first() if (not iid): iid = lastid(keyname=idname, keyvalue=0) out.append(iid.keyvalue) if detach: session.expunge_all(out) return out
def get_ids(session, lastid, ids, detach=False): "\n Get or create lastid rows.\n\n Parameters\n ----------\n session : sqlalchemy.orm.session instance, bound\n lastid : sqlalchemy orm mapped lastid table\n ids : list\n Desired lastid keyname strings.\n detach : bool, optional\n If True, expunge results from session before returning.\n Useful if you don't have permission on lastid, and don't want\n session commits to throw a permission error.\n\n\n Returns\n -------\n list\n Corresponding existing or new rows from lastid table.\n\n Notes\n -----\n Keyvalue is 0 if id name not present in lastid table.\n\n " out = [] for idname in ids: iid = session.query(lastid).filter((lastid.keyname == idname)).first() if (not iid): iid = lastid(keyname=idname, keyvalue=0) out.append(iid.keyvalue) if detach: session.expunge_all(out) return out<|docstring|>Get or create lastid rows. Parameters ---------- session : sqlalchemy.orm.session instance, bound lastid : sqlalchemy orm mapped lastid table ids : list Desired lastid keyname strings. detach : bool, optional If True, expunge results from session before returning. Useful if you don't have permission on lastid, and don't want session commits to throw a permission error. Returns ------- list Corresponding existing or new rows from lastid table. Notes ----- Keyvalue is 0 if id name not present in lastid table.<|endoftext|>
84974a6ff7566a8ec7a2a819464470e72b61d6555078aab0001b136cdc8b81eb
def get_positions(start_idx, end_idx, length): ' Get subj/obj position sequence. ' return ((list(range((- start_idx), 0)) + ([0] * ((end_idx - start_idx) + 1))) + list(range(1, (length - end_idx))))
Get subj/obj position sequence.
semeval/data/loader.py
get_positions
bsinghpratap/AGGCN
222
python
def get_positions(start_idx, end_idx, length): ' ' return ((list(range((- start_idx), 0)) + ([0] * ((end_idx - start_idx) + 1))) + list(range(1, (length - end_idx))))
def get_positions(start_idx, end_idx, length): ' ' return ((list(range((- start_idx), 0)) + ([0] * ((end_idx - start_idx) + 1))) + list(range(1, (length - end_idx))))<|docstring|>Get subj/obj position sequence.<|endoftext|>
b0bbfb5039d768d62bb4fce5925999faa5264c6c5eac8bc0a691f01c47927bbd
def get_long_tensor(tokens_list, batch_size): ' Convert list of list of tokens to a padded LongTensor. ' token_len = max((len(x) for x in tokens_list)) tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID) for (i, s) in enumerate(tokens_list): tokens[(i, :len(s))] = torch.LongTensor(s) return tokens
Convert list of list of tokens to a padded LongTensor.
semeval/data/loader.py
get_long_tensor
bsinghpratap/AGGCN
222
python
def get_long_tensor(tokens_list, batch_size): ' ' token_len = max((len(x) for x in tokens_list)) tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID) for (i, s) in enumerate(tokens_list): tokens[(i, :len(s))] = torch.LongTensor(s) return tokens
def get_long_tensor(tokens_list, batch_size): ' ' token_len = max((len(x) for x in tokens_list)) tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID) for (i, s) in enumerate(tokens_list): tokens[(i, :len(s))] = torch.LongTensor(s) return tokens<|docstring|>Convert list of list of tokens to a padded LongTensor.<|endoftext|>
de77380ad2921102cb929a74f4b38adc8c5e75a5149d59e65d33210d5e9d66d4
def sort_all(batch, lens): ' Sort all fields by descending order of lens, and return the original indices. ' unsorted_all = (([lens] + [range(len(lens))]) + list(batch)) sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))] return (sorted_all[2:], sorted_all[1])
Sort all fields by descending order of lens, and return the original indices.
semeval/data/loader.py
sort_all
bsinghpratap/AGGCN
222
python
def sort_all(batch, lens): ' ' unsorted_all = (([lens] + [range(len(lens))]) + list(batch)) sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))] return (sorted_all[2:], sorted_all[1])
def sort_all(batch, lens): ' ' unsorted_all = (([lens] + [range(len(lens))]) + list(batch)) sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))] return (sorted_all[2:], sorted_all[1])<|docstring|>Sort all fields by descending order of lens, and return the original indices.<|endoftext|>
ab9c43a8e4e4855c9a51144a80a2f5a0301e860301fd5164eb4df697df1414a0
def word_dropout(tokens, dropout): ' Randomly dropout tokens (IDs) and replace them with <UNK> tokens. ' return [(constant.UNK_ID if ((x != constant.UNK_ID) and (np.random.random() < dropout)) else x) for x in tokens]
Randomly dropout tokens (IDs) and replace them with <UNK> tokens.
semeval/data/loader.py
word_dropout
bsinghpratap/AGGCN
222
python
def word_dropout(tokens, dropout): ' ' return [(constant.UNK_ID if ((x != constant.UNK_ID) and (np.random.random() < dropout)) else x) for x in tokens]
def word_dropout(tokens, dropout): ' ' return [(constant.UNK_ID if ((x != constant.UNK_ID) and (np.random.random() < dropout)) else x) for x in tokens]<|docstring|>Randomly dropout tokens (IDs) and replace them with <UNK> tokens.<|endoftext|>
724950fa9c7e852c83d0bd18cc1f31617a24bcba6502ebdfcc6ac6677b0d8593
def preprocess(self, data, vocab, opt): ' Preprocess the data and convert to ids. ' processed = [] for d in data: tokens = list(d['token']) if opt['lower']: tokens = [t.lower() for t in tokens] (ss, se) = (d['subj_start'], d['subj_end']) (os, oe) = (d['obj_start'], d['obj_end']) tokens = map_to_ids(tokens, vocab.word2id) pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID) deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID) head = [int(x) for x in d['stanford_head']] assert any([(x == 0) for x in head]) l = len(tokens) subj_positions = get_positions(d['subj_start'], d['subj_end'], l) obj_positions = get_positions(d['obj_start'], d['obj_end'], l) relation = self.label2id[d['relation']] processed += [(tokens, pos, deprel, head, subj_positions, obj_positions, relation)] return processed
Preprocess the data and convert to ids.
semeval/data/loader.py
preprocess
bsinghpratap/AGGCN
222
python
def preprocess(self, data, vocab, opt): ' ' processed = [] for d in data: tokens = list(d['token']) if opt['lower']: tokens = [t.lower() for t in tokens] (ss, se) = (d['subj_start'], d['subj_end']) (os, oe) = (d['obj_start'], d['obj_end']) tokens = map_to_ids(tokens, vocab.word2id) pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID) deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID) head = [int(x) for x in d['stanford_head']] assert any([(x == 0) for x in head]) l = len(tokens) subj_positions = get_positions(d['subj_start'], d['subj_end'], l) obj_positions = get_positions(d['obj_start'], d['obj_end'], l) relation = self.label2id[d['relation']] processed += [(tokens, pos, deprel, head, subj_positions, obj_positions, relation)] return processed
def preprocess(self, data, vocab, opt): ' ' processed = [] for d in data: tokens = list(d['token']) if opt['lower']: tokens = [t.lower() for t in tokens] (ss, se) = (d['subj_start'], d['subj_end']) (os, oe) = (d['obj_start'], d['obj_end']) tokens = map_to_ids(tokens, vocab.word2id) pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID) deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID) head = [int(x) for x in d['stanford_head']] assert any([(x == 0) for x in head]) l = len(tokens) subj_positions = get_positions(d['subj_start'], d['subj_end'], l) obj_positions = get_positions(d['obj_start'], d['obj_end'], l) relation = self.label2id[d['relation']] processed += [(tokens, pos, deprel, head, subj_positions, obj_positions, relation)] return processed<|docstring|>Preprocess the data and convert to ids.<|endoftext|>
b3c0e4cfddf63c62be1f8eb47086a19c6c9709c15af93cf99ea2156d7cf4c6d3
def gold(self): ' Return gold labels as a list. ' return self.labels
Return gold labels as a list.
semeval/data/loader.py
gold
bsinghpratap/AGGCN
222
python
def gold(self): ' ' return self.labels
def gold(self): ' ' return self.labels<|docstring|>Return gold labels as a list.<|endoftext|>
f43eba897e0ad8bc9955e98f5a7f5704c689dc894973562464823216fbe8333f
def __getitem__(self, key): ' Get a batch with index. ' if (not isinstance(key, int)): raise TypeError if ((key < 0) or (key >= len(self.data))): raise IndexError batch = self.data[key] batch_size = len(batch) batch = list(zip(*batch)) if (dataset == 'dataset/tacred'): assert (len(batch) == 10) else: assert (len(batch) == 7) lens = [len(x) for x in batch[0]] (batch, orig_idx) = sort_all(batch, lens) if (not self.eval): words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]] else: words = batch[0] words = get_long_tensor(words, batch_size) masks = torch.eq(words, 0) pos = get_long_tensor(batch[1], batch_size) deprel = get_long_tensor(batch[2], batch_size) head = get_long_tensor(batch[3], batch_size) subj_positions = get_long_tensor(batch[4], batch_size) obj_positions = get_long_tensor(batch[5], batch_size) rels = torch.LongTensor(batch[6]) return (words, masks, pos, deprel, head, subj_positions, obj_positions, rels, orig_idx)
Get a batch with index.
semeval/data/loader.py
__getitem__
bsinghpratap/AGGCN
222
python
def __getitem__(self, key): ' ' if (not isinstance(key, int)): raise TypeError if ((key < 0) or (key >= len(self.data))): raise IndexError batch = self.data[key] batch_size = len(batch) batch = list(zip(*batch)) if (dataset == 'dataset/tacred'): assert (len(batch) == 10) else: assert (len(batch) == 7) lens = [len(x) for x in batch[0]] (batch, orig_idx) = sort_all(batch, lens) if (not self.eval): words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]] else: words = batch[0] words = get_long_tensor(words, batch_size) masks = torch.eq(words, 0) pos = get_long_tensor(batch[1], batch_size) deprel = get_long_tensor(batch[2], batch_size) head = get_long_tensor(batch[3], batch_size) subj_positions = get_long_tensor(batch[4], batch_size) obj_positions = get_long_tensor(batch[5], batch_size) rels = torch.LongTensor(batch[6]) return (words, masks, pos, deprel, head, subj_positions, obj_positions, rels, orig_idx)
def __getitem__(self, key): ' ' if (not isinstance(key, int)): raise TypeError if ((key < 0) or (key >= len(self.data))): raise IndexError batch = self.data[key] batch_size = len(batch) batch = list(zip(*batch)) if (dataset == 'dataset/tacred'): assert (len(batch) == 10) else: assert (len(batch) == 7) lens = [len(x) for x in batch[0]] (batch, orig_idx) = sort_all(batch, lens) if (not self.eval): words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]] else: words = batch[0] words = get_long_tensor(words, batch_size) masks = torch.eq(words, 0) pos = get_long_tensor(batch[1], batch_size) deprel = get_long_tensor(batch[2], batch_size) head = get_long_tensor(batch[3], batch_size) subj_positions = get_long_tensor(batch[4], batch_size) obj_positions = get_long_tensor(batch[5], batch_size) rels = torch.LongTensor(batch[6]) return (words, masks, pos, deprel, head, subj_positions, obj_positions, rels, orig_idx)<|docstring|>Get a batch with index.<|endoftext|>
dd4174bd5d46a1697eabf4abbd8fc6e1af24201c82e17d4d29b1818cfe9f4311
def test_create_and_retrieve_player(self): '\n Ensure we can create a new Player and then retrieve it\n ' new_player_name = 'New Player' new_player_gender = Player.MALE response = self.create_player(new_player_name, new_player_gender) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Player.objects.count(), 1) self.assertEqual(Player.objects.get().name, new_player_name)
Ensure we can create a new Player and then retrieve it
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_create_and_retrieve_player
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_create_and_retrieve_player(self): '\n \n ' new_player_name = 'New Player' new_player_gender = Player.MALE response = self.create_player(new_player_name, new_player_gender) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Player.objects.count(), 1) self.assertEqual(Player.objects.get().name, new_player_name)
def test_create_and_retrieve_player(self): '\n \n ' new_player_name = 'New Player' new_player_gender = Player.MALE response = self.create_player(new_player_name, new_player_gender) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Player.objects.count(), 1) self.assertEqual(Player.objects.get().name, new_player_name)<|docstring|>Ensure we can create a new Player and then retrieve it<|endoftext|>
fcf0a1e6a6172a9c9d03756dcf54ca5c13f9b36c8acbccd6f0754138507fed42
def test_create_duplicated_player(self): '\n Ensure we can create a new Player and we cannot create a duplicate.\n ' url = reverse('player-list') new_player_name = 'New Female Player' new_player_gender = Player.FEMALE response1 = self.create_player(new_player_name, new_player_gender) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_player(new_player_name, new_player_gender) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
Ensure we can create a new Player and we cannot create a duplicate.
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_create_duplicated_player
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_create_duplicated_player(self): '\n \n ' url = reverse('player-list') new_player_name = 'New Female Player' new_player_gender = Player.FEMALE response1 = self.create_player(new_player_name, new_player_gender) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_player(new_player_name, new_player_gender) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_duplicated_player(self): '\n \n ' url = reverse('player-list') new_player_name = 'New Female Player' new_player_gender = Player.FEMALE response1 = self.create_player(new_player_name, new_player_gender) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_player(new_player_name, new_player_gender) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can create a new Player and we cannot create a duplicate.<|endoftext|>
5fddc766b628309ade820179df66a416ecea58e2e88a4e1e8e52fa43aad647b5
def test_retrieve_players_list(self): '\n Ensure we can retrieve a player\n ' new_player_name = 'New Female Player' new_player_gender = Player.FEMALE self.create_player(new_player_name, new_player_gender) url = reverse('player-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_player_name) self.assertEqual(response.data['results'][0]['gender'], new_player_gender)
Ensure we can retrieve a player
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_retrieve_players_list
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_retrieve_players_list(self): '\n \n ' new_player_name = 'New Female Player' new_player_gender = Player.FEMALE self.create_player(new_player_name, new_player_gender) url = reverse('player-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_player_name) self.assertEqual(response.data['results'][0]['gender'], new_player_gender)
def test_retrieve_players_list(self): '\n \n ' new_player_name = 'New Female Player' new_player_gender = Player.FEMALE self.create_player(new_player_name, new_player_gender) url = reverse('player-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_player_name) self.assertEqual(response.data['results'][0]['gender'], new_player_gender)<|docstring|>Ensure we can retrieve a player<|endoftext|>
bb4a1609a5d6c3a3465a9f51e10f1a12d1913c94e193687366d40c58c7d05425
def test_create_and_retrieve_game_category(self): '\n Ensure we can create a new GameCategory and then retrieve it\n ' new_game_category_name = 'New Game Category' response = self.create_game_category(new_game_category_name) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(GameCategory.objects.count(), 1) self.assertEqual(GameCategory.objects.get().name, new_game_category_name) print('PK {0}'.format(GameCategory.objects.get().pk))
Ensure we can create a new GameCategory and then retrieve it
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_create_and_retrieve_game_category
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_create_and_retrieve_game_category(self): '\n \n ' new_game_category_name = 'New Game Category' response = self.create_game_category(new_game_category_name) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(GameCategory.objects.count(), 1) self.assertEqual(GameCategory.objects.get().name, new_game_category_name) print('PK {0}'.format(GameCategory.objects.get().pk))
def test_create_and_retrieve_game_category(self): '\n \n ' new_game_category_name = 'New Game Category' response = self.create_game_category(new_game_category_name) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(GameCategory.objects.count(), 1) self.assertEqual(GameCategory.objects.get().name, new_game_category_name) print('PK {0}'.format(GameCategory.objects.get().pk))<|docstring|>Ensure we can create a new GameCategory and then retrieve it<|endoftext|>
4c2ef2655df21d5661b319e49af8c8578fbd9cdf8cc089cbf74e1bf17d1aa92e
def test_create_duplicated_game_category(self): '\n Ensure we can create a new GameCategory.\n ' url = reverse('gamecategory-list') new_game_category_name = 'New Game Category' data = {'name': new_game_category_name} response1 = self.create_game_category(new_game_category_name) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_game_category(new_game_category_name) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
Ensure we can create a new GameCategory.
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_create_duplicated_game_category
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_create_duplicated_game_category(self): '\n \n ' url = reverse('gamecategory-list') new_game_category_name = 'New Game Category' data = {'name': new_game_category_name} response1 = self.create_game_category(new_game_category_name) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_game_category(new_game_category_name) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_duplicated_game_category(self): '\n \n ' url = reverse('gamecategory-list') new_game_category_name = 'New Game Category' data = {'name': new_game_category_name} response1 = self.create_game_category(new_game_category_name) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_game_category(new_game_category_name) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can create a new GameCategory.<|endoftext|>
0e4745ec870cdba4f263f0f32fd8b7ce6c757c76f3ec6943b7c34017ad336e7a
def test_retrieve_game_categories_list(self): '\n Ensure we can retrieve a game cagory\n ' new_game_category_name = 'New Game Category' self.create_game_category(new_game_category_name) url = reverse('gamecategory-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_game_category_name)
Ensure we can retrieve a game cagory
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_retrieve_game_categories_list
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_retrieve_game_categories_list(self): '\n \n ' new_game_category_name = 'New Game Category' self.create_game_category(new_game_category_name) url = reverse('gamecategory-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_game_category_name)
def test_retrieve_game_categories_list(self): '\n \n ' new_game_category_name = 'New Game Category' self.create_game_category(new_game_category_name) url = reverse('gamecategory-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_game_category_name)<|docstring|>Ensure we can retrieve a game cagory<|endoftext|>
12c2ecf07d64fa8c46b26ab7e13eaa9d58e648ad9ef8f9e6da54999b6022a330
def test_update_game_category(self): '\n Ensure we can update a single field for a game category\n ' new_game_category_name = 'Initial Name' response = self.create_game_category(new_game_category_name) url = reverse('gamecategory-detail', None, {response.data['pk']}) updated_game_category_name = 'Updated Game Category Name' data = {'name': updated_game_category_name} patch_response = self.client.patch(url, data, format='json') self.assertEqual(patch_response.status_code, status.HTTP_200_OK) self.assertEqual(patch_response.data['name'], updated_game_category_name)
Ensure we can update a single field for a game category
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_update_game_category
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_update_game_category(self): '\n \n ' new_game_category_name = 'Initial Name' response = self.create_game_category(new_game_category_name) url = reverse('gamecategory-detail', None, {response.data['pk']}) updated_game_category_name = 'Updated Game Category Name' data = {'name': updated_game_category_name} patch_response = self.client.patch(url, data, format='json') self.assertEqual(patch_response.status_code, status.HTTP_200_OK) self.assertEqual(patch_response.data['name'], updated_game_category_name)
def test_update_game_category(self): '\n \n ' new_game_category_name = 'Initial Name' response = self.create_game_category(new_game_category_name) url = reverse('gamecategory-detail', None, {response.data['pk']}) updated_game_category_name = 'Updated Game Category Name' data = {'name': updated_game_category_name} patch_response = self.client.patch(url, data, format='json') self.assertEqual(patch_response.status_code, status.HTTP_200_OK) self.assertEqual(patch_response.data['name'], updated_game_category_name)<|docstring|>Ensure we can update a single field for a game category<|endoftext|>
2d577827af1fba0794c1fe17ccddfe67e6dc48400b014307b531018210552a1b
def test_filter_game_category_by_name(self): '\n Ensure we can filter a game category by name\n ' game_category_name1 = 'First game category name' self.create_game_category(game_category_name1) game_caregory_name2 = 'Second game category name' self.create_game_category(game_caregory_name2) filter_by_name = {'name': game_category_name1} url = '{0}?{1}'.format(reverse('gamecategory-list'), urlencode(filter_by_name)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], game_category_name1)
Ensure we can filter a game category by name
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_filter_game_category_by_name
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_filter_game_category_by_name(self): '\n \n ' game_category_name1 = 'First game category name' self.create_game_category(game_category_name1) game_caregory_name2 = 'Second game category name' self.create_game_category(game_caregory_name2) filter_by_name = {'name': game_category_name1} url = '{0}?{1}'.format(reverse('gamecategory-list'), urlencode(filter_by_name)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], game_category_name1)
def test_filter_game_category_by_name(self): '\n \n ' game_category_name1 = 'First game category name' self.create_game_category(game_category_name1) game_caregory_name2 = 'Second game category name' self.create_game_category(game_caregory_name2) filter_by_name = {'name': game_category_name1} url = '{0}?{1}'.format(reverse('gamecategory-list'), urlencode(filter_by_name)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], game_category_name1)<|docstring|>Ensure we can filter a game category by name<|endoftext|>
4c2ef2655df21d5661b319e49af8c8578fbd9cdf8cc089cbf74e1bf17d1aa92e
def test_create_duplicated_game_category(self): '\n Ensure we can create a new GameCategory.\n ' url = reverse('gamecategory-list') new_game_category_name = 'New Game Category' data = {'name': new_game_category_name} response1 = self.create_game_category(new_game_category_name) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_game_category(new_game_category_name) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
Ensure we can create a new GameCategory.
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_create_duplicated_game_category
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_create_duplicated_game_category(self): '\n \n ' url = reverse('gamecategory-list') new_game_category_name = 'New Game Category' data = {'name': new_game_category_name} response1 = self.create_game_category(new_game_category_name) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_game_category(new_game_category_name) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_duplicated_game_category(self): '\n \n ' url = reverse('gamecategory-list') new_game_category_name = 'New Game Category' data = {'name': new_game_category_name} response1 = self.create_game_category(new_game_category_name) self.assertEqual(response1.status_code, status.HTTP_201_CREATED) response2 = self.create_game_category(new_game_category_name) self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can create a new GameCategory.<|endoftext|>
0e4745ec870cdba4f263f0f32fd8b7ce6c757c76f3ec6943b7c34017ad336e7a
def test_retrieve_game_categories_list(self): '\n Ensure we can retrieve a game cagory\n ' new_game_category_name = 'New Game Category' self.create_game_category(new_game_category_name) url = reverse('gamecategory-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_game_category_name)
Ensure we can retrieve a game cagory
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_retrieve_game_categories_list
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_retrieve_game_categories_list(self): '\n \n ' new_game_category_name = 'New Game Category' self.create_game_category(new_game_category_name) url = reverse('gamecategory-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_game_category_name)
def test_retrieve_game_categories_list(self): '\n \n ' new_game_category_name = 'New Game Category' self.create_game_category(new_game_category_name) url = reverse('gamecategory-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], new_game_category_name)<|docstring|>Ensure we can retrieve a game cagory<|endoftext|>
12c2ecf07d64fa8c46b26ab7e13eaa9d58e648ad9ef8f9e6da54999b6022a330
def test_update_game_category(self): '\n Ensure we can update a single field for a game category\n ' new_game_category_name = 'Initial Name' response = self.create_game_category(new_game_category_name) url = reverse('gamecategory-detail', None, {response.data['pk']}) updated_game_category_name = 'Updated Game Category Name' data = {'name': updated_game_category_name} patch_response = self.client.patch(url, data, format='json') self.assertEqual(patch_response.status_code, status.HTTP_200_OK) self.assertEqual(patch_response.data['name'], updated_game_category_name)
Ensure we can update a single field for a game category
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_update_game_category
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_update_game_category(self): '\n \n ' new_game_category_name = 'Initial Name' response = self.create_game_category(new_game_category_name) url = reverse('gamecategory-detail', None, {response.data['pk']}) updated_game_category_name = 'Updated Game Category Name' data = {'name': updated_game_category_name} patch_response = self.client.patch(url, data, format='json') self.assertEqual(patch_response.status_code, status.HTTP_200_OK) self.assertEqual(patch_response.data['name'], updated_game_category_name)
def test_update_game_category(self): '\n \n ' new_game_category_name = 'Initial Name' response = self.create_game_category(new_game_category_name) url = reverse('gamecategory-detail', None, {response.data['pk']}) updated_game_category_name = 'Updated Game Category Name' data = {'name': updated_game_category_name} patch_response = self.client.patch(url, data, format='json') self.assertEqual(patch_response.status_code, status.HTTP_200_OK) self.assertEqual(patch_response.data['name'], updated_game_category_name)<|docstring|>Ensure we can update a single field for a game category<|endoftext|>
2d577827af1fba0794c1fe17ccddfe67e6dc48400b014307b531018210552a1b
def test_filter_game_category_by_name(self): '\n Ensure we can filter a game category by name\n ' game_category_name1 = 'First game category name' self.create_game_category(game_category_name1) game_caregory_name2 = 'Second game category name' self.create_game_category(game_caregory_name2) filter_by_name = {'name': game_category_name1} url = '{0}?{1}'.format(reverse('gamecategory-list'), urlencode(filter_by_name)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], game_category_name1)
Ensure we can filter a game category by name
Chapter 4/restful_python_chapter_04_05/gamesapi/games/tests.py
test_filter_game_category_by_name
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
python
def test_filter_game_category_by_name(self): '\n \n ' game_category_name1 = 'First game category name' self.create_game_category(game_category_name1) game_caregory_name2 = 'Second game category name' self.create_game_category(game_caregory_name2) filter_by_name = {'name': game_category_name1} url = '{0}?{1}'.format(reverse('gamecategory-list'), urlencode(filter_by_name)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], game_category_name1)
def test_filter_game_category_by_name(self): '\n \n ' game_category_name1 = 'First game category name' self.create_game_category(game_category_name1) game_caregory_name2 = 'Second game category name' self.create_game_category(game_caregory_name2) filter_by_name = {'name': game_category_name1} url = '{0}?{1}'.format(reverse('gamecategory-list'), urlencode(filter_by_name)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual(response.data['results'][0]['name'], game_category_name1)<|docstring|>Ensure we can filter a game category by name<|endoftext|>
7013776be0e7a5579ac12ce8b5ff4bd68a30212bde8f1c4da60c2534167bc73a
def __init__(self) -> None: 'Initialize DataDownload.' self._make_data_dir() self._download_data()
Initialize DataDownload.
uta_tools/data/data_downloads.py
__init__
cancervariants/uta_tools
1
python
def __init__(self) -> None: self._make_data_dir() self._download_data()
def __init__(self) -> None: self._make_data_dir() self._download_data()<|docstring|>Initialize DataDownload.<|endoftext|>
4e7adb1ffc4a3243d82adb6367c417905ec1394383a1dc2753dd95835ab56d68
def _make_data_dir(self) -> None: 'Make data directory' self._data_dir = (APP_ROOT / 'data') self._data_dir.mkdir(exist_ok=True, parents=True)
Make data directory
uta_tools/data/data_downloads.py
_make_data_dir
cancervariants/uta_tools
1
python
def _make_data_dir(self) -> None: self._data_dir = (APP_ROOT / 'data') self._data_dir.mkdir(exist_ok=True, parents=True)
def _make_data_dir(self) -> None: self._data_dir = (APP_ROOT / 'data') self._data_dir.mkdir(exist_ok=True, parents=True)<|docstring|>Make data directory<|endoftext|>
ab0ec3201baf5faaba35da172ac973208c253f3f6fd7795036923b5f61f086f5
def _download_data(self) -> None: 'Download data files needed for uta_tools.' with FTP('ftp.ncbi.nlm.nih.gov') as ftp: ftp.login() self._download_mane_summary(ftp) self._download_lrg_refseq_gene_data(ftp)
Download data files needed for uta_tools.
uta_tools/data/data_downloads.py
_download_data
cancervariants/uta_tools
1
python
def _download_data(self) -> None: with FTP('ftp.ncbi.nlm.nih.gov') as ftp: ftp.login() self._download_mane_summary(ftp) self._download_lrg_refseq_gene_data(ftp)
def _download_data(self) -> None: with FTP('ftp.ncbi.nlm.nih.gov') as ftp: ftp.login() self._download_mane_summary(ftp) self._download_lrg_refseq_gene_data(ftp)<|docstring|>Download data files needed for uta_tools.<|endoftext|>
b70979a5d82fd5524aea2d6dcc77dc76335e7c6a79d3724443445e94a65609e4
def _download_mane_summary(self, ftp: FTP) -> None: 'Download latest MANE summary data and set path\n\n :param FTP ftp: FTP connection\n ' ftp.cwd('/refseq/MANE/MANE_human/current') files = ftp.nlst() mane_summary_file = [f for f in files if f.endswith('.summary.txt.gz')] if (not mane_summary_file): raise Exception('Unable to download MANE summary data') mane_summary_file = mane_summary_file[0] self._mane_summary_path = (self._data_dir / mane_summary_file[:(- 3)]) mane_data_path = (self._data_dir / mane_summary_file) if (not self._mane_summary_path.exists()): with open(mane_data_path, 'wb') as fp: ftp.retrbinary(f'RETR {mane_summary_file}', fp.write) with gzip.open(mane_data_path, 'rb') as f_in: with open(self._mane_summary_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) remove(mane_data_path)
Download latest MANE summary data and set path :param FTP ftp: FTP connection
uta_tools/data/data_downloads.py
_download_mane_summary
cancervariants/uta_tools
1
python
def _download_mane_summary(self, ftp: FTP) -> None: 'Download latest MANE summary data and set path\n\n :param FTP ftp: FTP connection\n ' ftp.cwd('/refseq/MANE/MANE_human/current') files = ftp.nlst() mane_summary_file = [f for f in files if f.endswith('.summary.txt.gz')] if (not mane_summary_file): raise Exception('Unable to download MANE summary data') mane_summary_file = mane_summary_file[0] self._mane_summary_path = (self._data_dir / mane_summary_file[:(- 3)]) mane_data_path = (self._data_dir / mane_summary_file) if (not self._mane_summary_path.exists()): with open(mane_data_path, 'wb') as fp: ftp.retrbinary(f'RETR {mane_summary_file}', fp.write) with gzip.open(mane_data_path, 'rb') as f_in: with open(self._mane_summary_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) remove(mane_data_path)
def _download_mane_summary(self, ftp: FTP) -> None: 'Download latest MANE summary data and set path\n\n :param FTP ftp: FTP connection\n ' ftp.cwd('/refseq/MANE/MANE_human/current') files = ftp.nlst() mane_summary_file = [f for f in files if f.endswith('.summary.txt.gz')] if (not mane_summary_file): raise Exception('Unable to download MANE summary data') mane_summary_file = mane_summary_file[0] self._mane_summary_path = (self._data_dir / mane_summary_file[:(- 3)]) mane_data_path = (self._data_dir / mane_summary_file) if (not self._mane_summary_path.exists()): with open(mane_data_path, 'wb') as fp: ftp.retrbinary(f'RETR {mane_summary_file}', fp.write) with gzip.open(mane_data_path, 'rb') as f_in: with open(self._mane_summary_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) remove(mane_data_path)<|docstring|>Download latest MANE summary data and set path :param FTP ftp: FTP connection<|endoftext|>
85d34efda643691ddc7e2300b3fc747f6513c3ab256eba6608b9b1c69fd3d6ea
def _download_lrg_refseq_gene_data(self, ftp: FTP) -> None: 'Download latest LRG_RefSeqGene and set path\n\n :param FTP ftp: FTP connection\n ' lrg_refseqgene_file = 'LRG_RefSeqGene' ftp_dir_path = '/refseq/H_sapiens/RefSeqGene/' ftp_file_path = f'{ftp_dir_path}{lrg_refseqgene_file}' timestamp = ftp.voidcmd(f'MDTM {ftp_file_path}')[4:].strip() date = str(parser.parse(timestamp)).split()[0] version = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%Y%m%d') fn_versioned = f'{lrg_refseqgene_file}_{version}' lrg_refseqgene_path = (self._data_dir / lrg_refseqgene_file) self._lrg_refseqgene_path = (self._data_dir / fn_versioned) if (not self._lrg_refseqgene_path.exists()): ftp.cwd(ftp_dir_path) with open(lrg_refseqgene_path, 'wb') as fp: ftp.retrbinary(f'RETR {lrg_refseqgene_file}', fp.write) with open(lrg_refseqgene_path, 'rb') as f_in: with open(self._lrg_refseqgene_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) remove(lrg_refseqgene_path)
Download latest LRG_RefSeqGene and set path :param FTP ftp: FTP connection
uta_tools/data/data_downloads.py
_download_lrg_refseq_gene_data
cancervariants/uta_tools
1
python
def _download_lrg_refseq_gene_data(self, ftp: FTP) -> None: 'Download latest LRG_RefSeqGene and set path\n\n :param FTP ftp: FTP connection\n ' lrg_refseqgene_file = 'LRG_RefSeqGene' ftp_dir_path = '/refseq/H_sapiens/RefSeqGene/' ftp_file_path = f'{ftp_dir_path}{lrg_refseqgene_file}' timestamp = ftp.voidcmd(f'MDTM {ftp_file_path}')[4:].strip() date = str(parser.parse(timestamp)).split()[0] version = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%Y%m%d') fn_versioned = f'{lrg_refseqgene_file}_{version}' lrg_refseqgene_path = (self._data_dir / lrg_refseqgene_file) self._lrg_refseqgene_path = (self._data_dir / fn_versioned) if (not self._lrg_refseqgene_path.exists()): ftp.cwd(ftp_dir_path) with open(lrg_refseqgene_path, 'wb') as fp: ftp.retrbinary(f'RETR {lrg_refseqgene_file}', fp.write) with open(lrg_refseqgene_path, 'rb') as f_in: with open(self._lrg_refseqgene_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) remove(lrg_refseqgene_path)
def _download_lrg_refseq_gene_data(self, ftp: FTP) -> None: 'Download latest LRG_RefSeqGene and set path\n\n :param FTP ftp: FTP connection\n ' lrg_refseqgene_file = 'LRG_RefSeqGene' ftp_dir_path = '/refseq/H_sapiens/RefSeqGene/' ftp_file_path = f'{ftp_dir_path}{lrg_refseqgene_file}' timestamp = ftp.voidcmd(f'MDTM {ftp_file_path}')[4:].strip() date = str(parser.parse(timestamp)).split()[0] version = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%Y%m%d') fn_versioned = f'{lrg_refseqgene_file}_{version}' lrg_refseqgene_path = (self._data_dir / lrg_refseqgene_file) self._lrg_refseqgene_path = (self._data_dir / fn_versioned) if (not self._lrg_refseqgene_path.exists()): ftp.cwd(ftp_dir_path) with open(lrg_refseqgene_path, 'wb') as fp: ftp.retrbinary(f'RETR {lrg_refseqgene_file}', fp.write) with open(lrg_refseqgene_path, 'rb') as f_in: with open(self._lrg_refseqgene_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) remove(lrg_refseqgene_path)<|docstring|>Download latest LRG_RefSeqGene and set path :param FTP ftp: FTP connection<|endoftext|>
b81e15fdd464f461d5a7a6a96e8ffabfe944b68d305c8183a03eca11686c4eae
@generic.schedule_injective.register(['hls']) def schedule_injective(outs): 'Schedule for injective op.\n\n Parameters\n ----------\n outs: Array of Tensor\n The computation graph description of reduce in the format\n of an array of tensors.\n\n Returns\n -------\n sch: Schedule\n The computation schedule for the op.\n ' outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs) s = tvm.create_schedule([x.op for x in outs]) tvm.schedule.AutoInlineInjective(s) for out in outs: fused = s[out].fuse(*s[out].op.axis) (px, x) = s[out].split(fused, nparts=1) s[out].bind(px, tvm.thread_axis('pipeline')) return s
Schedule for injective op. Parameters ---------- outs: Array of Tensor The computation graph description of reduce in the format of an array of tensors. Returns ------- sch: Schedule The computation schedule for the op.
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/python/topi/hls/injective.py
schedule_injective
mengkai94/training_results_v0.6
64
python
@generic.schedule_injective.register(['hls']) def schedule_injective(outs): 'Schedule for injective op.\n\n Parameters\n ----------\n outs: Array of Tensor\n The computation graph description of reduce in the format\n of an array of tensors.\n\n Returns\n -------\n sch: Schedule\n The computation schedule for the op.\n ' outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs) s = tvm.create_schedule([x.op for x in outs]) tvm.schedule.AutoInlineInjective(s) for out in outs: fused = s[out].fuse(*s[out].op.axis) (px, x) = s[out].split(fused, nparts=1) s[out].bind(px, tvm.thread_axis('pipeline')) return s
@generic.schedule_injective.register(['hls']) def schedule_injective(outs): 'Schedule for injective op.\n\n Parameters\n ----------\n outs: Array of Tensor\n The computation graph description of reduce in the format\n of an array of tensors.\n\n Returns\n -------\n sch: Schedule\n The computation schedule for the op.\n ' outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs) s = tvm.create_schedule([x.op for x in outs]) tvm.schedule.AutoInlineInjective(s) for out in outs: fused = s[out].fuse(*s[out].op.axis) (px, x) = s[out].split(fused, nparts=1) s[out].bind(px, tvm.thread_axis('pipeline')) return s<|docstring|>Schedule for injective op. Parameters ---------- outs: Array of Tensor The computation graph description of reduce in the format of an array of tensors. Returns ------- sch: Schedule The computation schedule for the op.<|endoftext|>
84f00d82cf62e892d9b0161d73ae7a88dc4d482c909799dc34d2c421750a95e2
def find_objects(img, mask, device, debug=None): 'Find all objects and color them blue.\n\n Inputs:\n img = image that the objects will be overlayed\n mask = what is used for object detection\n device = device number. Used to count steps in the pipeline\n debug = None, print, or plot. Print = save to file, Plot = print to screen.\n\n Returns:\n device = device number\n objects = list of contours\n hierarchy = contour hierarchy list\n\n :param img: numpy array\n :param mask: numpy array\n :param device: int\n :param debug: str\n :return device: int\n :return objects: list\n :return hierarchy: list\n ' device += 1 mask1 = np.copy(mask) ori_img = np.copy(img) (objects, hierarchy) = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) for (i, cnt) in enumerate(objects): cv2.drawContours(ori_img, objects, i, (255, 102, 255), (- 1), lineType=8, hierarchy=hierarchy) if (debug == 'print'): print_image(ori_img, (str(device) + '_id_objects.png')) elif (debug == 'plot'): plot_image(ori_img) return (device, objects, hierarchy)
Find all objects and color them blue. Inputs: img = image that the objects will be overlayed mask = what is used for object detection device = device number. Used to count steps in the pipeline debug = None, print, or plot. Print = save to file, Plot = print to screen. Returns: device = device number objects = list of contours hierarchy = contour hierarchy list :param img: numpy array :param mask: numpy array :param device: int :param debug: str :return device: int :return objects: list :return hierarchy: list
plantcv/find_objects.py
find_objects
mohithc/mohi
2
python
def find_objects(img, mask, device, debug=None): 'Find all objects and color them blue.\n\n Inputs:\n img = image that the objects will be overlayed\n mask = what is used for object detection\n device = device number. Used to count steps in the pipeline\n debug = None, print, or plot. Print = save to file, Plot = print to screen.\n\n Returns:\n device = device number\n objects = list of contours\n hierarchy = contour hierarchy list\n\n :param img: numpy array\n :param mask: numpy array\n :param device: int\n :param debug: str\n :return device: int\n :return objects: list\n :return hierarchy: list\n ' device += 1 mask1 = np.copy(mask) ori_img = np.copy(img) (objects, hierarchy) = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) for (i, cnt) in enumerate(objects): cv2.drawContours(ori_img, objects, i, (255, 102, 255), (- 1), lineType=8, hierarchy=hierarchy) if (debug == 'print'): print_image(ori_img, (str(device) + '_id_objects.png')) elif (debug == 'plot'): plot_image(ori_img) return (device, objects, hierarchy)
def find_objects(img, mask, device, debug=None): 'Find all objects and color them blue.\n\n Inputs:\n img = image that the objects will be overlayed\n mask = what is used for object detection\n device = device number. Used to count steps in the pipeline\n debug = None, print, or plot. Print = save to file, Plot = print to screen.\n\n Returns:\n device = device number\n objects = list of contours\n hierarchy = contour hierarchy list\n\n :param img: numpy array\n :param mask: numpy array\n :param device: int\n :param debug: str\n :return device: int\n :return objects: list\n :return hierarchy: list\n ' device += 1 mask1 = np.copy(mask) ori_img = np.copy(img) (objects, hierarchy) = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) for (i, cnt) in enumerate(objects): cv2.drawContours(ori_img, objects, i, (255, 102, 255), (- 1), lineType=8, hierarchy=hierarchy) if (debug == 'print'): print_image(ori_img, (str(device) + '_id_objects.png')) elif (debug == 'plot'): plot_image(ori_img) return (device, objects, hierarchy)<|docstring|>Find all objects and color them blue. Inputs: img = image that the objects will be overlayed mask = what is used for object detection device = device number. Used to count steps in the pipeline debug = None, print, or plot. Print = save to file, Plot = print to screen. Returns: device = device number objects = list of contours hierarchy = contour hierarchy list :param img: numpy array :param mask: numpy array :param device: int :param debug: str :return device: int :return objects: list :return hierarchy: list<|endoftext|>
1a1facaab075056992a71140adce4130c503e694c04efee946336556e5c38c6c
def main(argv=None): 'pyLint' parsed = cliargs.argvParse() outputs.handleArgs(parsed) sys.exit(0)
pyLint
gencodata/gencodata.py
main
drhaney/gencodata
1
python
def main(argv=None): parsed = cliargs.argvParse() outputs.handleArgs(parsed) sys.exit(0)
def main(argv=None): parsed = cliargs.argvParse() outputs.handleArgs(parsed) sys.exit(0)<|docstring|>pyLint<|endoftext|>
cd8ca8860ba7822878800ce50c92d27073708e56f34c260d0d35fdc2c56316bc
@classmethod def clusters_uri(cls, filters=None): "Construct clusters uri with optional filters\n\n :param filters: Optional k:v dict that's converted to url query\n :returns: url string\n " url = '/clusters' if filters: url = cls.add_filters(url, filters) return url
Construct clusters uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string
magnum/tests/functional/api/v1/clients/cluster_client.py
clusters_uri
QumulusTechnology/magnum
319
python
@classmethod def clusters_uri(cls, filters=None): "Construct clusters uri with optional filters\n\n :param filters: Optional k:v dict that's converted to url query\n :returns: url string\n " url = '/clusters' if filters: url = cls.add_filters(url, filters) return url
@classmethod def clusters_uri(cls, filters=None): "Construct clusters uri with optional filters\n\n :param filters: Optional k:v dict that's converted to url query\n :returns: url string\n " url = '/clusters' if filters: url = cls.add_filters(url, filters) return url<|docstring|>Construct clusters uri with optional filters :param filters: Optional k:v dict that's converted to url query :returns: url string<|endoftext|>
c01761bc2dc315ca23299f70530b35d8b79f1be212160b04f4be54f2d4807454
@classmethod def cluster_uri(cls, cluster_id): 'Construct cluster uri\n\n :param cluster_id: cluster uuid or name\n :returns: url string\n ' return '{0}/{1}'.format(cls.clusters_uri(), cluster_id)
Construct cluster uri :param cluster_id: cluster uuid or name :returns: url string
magnum/tests/functional/api/v1/clients/cluster_client.py
cluster_uri
QumulusTechnology/magnum
319
python
@classmethod def cluster_uri(cls, cluster_id): 'Construct cluster uri\n\n :param cluster_id: cluster uuid or name\n :returns: url string\n ' return '{0}/{1}'.format(cls.clusters_uri(), cluster_id)
@classmethod def cluster_uri(cls, cluster_id): 'Construct cluster uri\n\n :param cluster_id: cluster uuid or name\n :returns: url string\n ' return '{0}/{1}'.format(cls.clusters_uri(), cluster_id)<|docstring|>Construct cluster uri :param cluster_id: cluster uuid or name :returns: url string<|endoftext|>
7c2ad29f80707f9734c4d9e2a00f18fafd75b2d56f905b9e68535943b195f1c0
def list_clusters(self, filters=None, **kwargs): "Makes GET /clusters request and returns ClusterCollection\n\n Abstracts REST call to return all clusters\n\n :param filters: Optional k:v dict that's converted to url query\n :returns: response object and ClusterCollection object\n " (resp, body) = self.get(self.clusters_uri(filters), **kwargs) return self.deserialize(resp, body, cluster_model.ClusterCollection)
Makes GET /clusters request and returns ClusterCollection Abstracts REST call to return all clusters :param filters: Optional k:v dict that's converted to url query :returns: response object and ClusterCollection object
magnum/tests/functional/api/v1/clients/cluster_client.py
list_clusters
QumulusTechnology/magnum
319
python
def list_clusters(self, filters=None, **kwargs): "Makes GET /clusters request and returns ClusterCollection\n\n Abstracts REST call to return all clusters\n\n :param filters: Optional k:v dict that's converted to url query\n :returns: response object and ClusterCollection object\n " (resp, body) = self.get(self.clusters_uri(filters), **kwargs) return self.deserialize(resp, body, cluster_model.ClusterCollection)
def list_clusters(self, filters=None, **kwargs): "Makes GET /clusters request and returns ClusterCollection\n\n Abstracts REST call to return all clusters\n\n :param filters: Optional k:v dict that's converted to url query\n :returns: response object and ClusterCollection object\n " (resp, body) = self.get(self.clusters_uri(filters), **kwargs) return self.deserialize(resp, body, cluster_model.ClusterCollection)<|docstring|>Makes GET /clusters request and returns ClusterCollection Abstracts REST call to return all clusters :param filters: Optional k:v dict that's converted to url query :returns: response object and ClusterCollection object<|endoftext|>
817a6c11858b19973f7bd863493f3808d07d22b858bbc7f87e550d2426bd443c
def get_cluster(self, cluster_id, **kwargs): 'Makes GET /cluster request and returns ClusterEntity\n\n Abstracts REST call to return a single cluster based on uuid or name\n\n :param cluster_id: cluster uuid or name\n :returns: response object and ClusterCollection object\n ' (resp, body) = self.get(self.cluster_uri(cluster_id)) return self.deserialize(resp, body, cluster_model.ClusterEntity)
Makes GET /cluster request and returns ClusterEntity Abstracts REST call to return a single cluster based on uuid or name :param cluster_id: cluster uuid or name :returns: response object and ClusterCollection object
magnum/tests/functional/api/v1/clients/cluster_client.py
get_cluster
QumulusTechnology/magnum
319
python
def get_cluster(self, cluster_id, **kwargs): 'Makes GET /cluster request and returns ClusterEntity\n\n Abstracts REST call to return a single cluster based on uuid or name\n\n :param cluster_id: cluster uuid or name\n :returns: response object and ClusterCollection object\n ' (resp, body) = self.get(self.cluster_uri(cluster_id)) return self.deserialize(resp, body, cluster_model.ClusterEntity)
def get_cluster(self, cluster_id, **kwargs): 'Makes GET /cluster request and returns ClusterEntity\n\n Abstracts REST call to return a single cluster based on uuid or name\n\n :param cluster_id: cluster uuid or name\n :returns: response object and ClusterCollection object\n ' (resp, body) = self.get(self.cluster_uri(cluster_id)) return self.deserialize(resp, body, cluster_model.ClusterEntity)<|docstring|>Makes GET /cluster request and returns ClusterEntity Abstracts REST call to return a single cluster based on uuid or name :param cluster_id: cluster uuid or name :returns: response object and ClusterCollection object<|endoftext|>
9dd3fb01a1dcfd25c9e40b6ec6954fc175349cf3f74616b104cdaf575ff5aeea
def post_cluster(self, model, **kwargs): 'Makes POST /cluster request and returns ClusterIdEntity\n\n Abstracts REST call to create new cluster\n\n :param model: ClusterEntity\n :returns: response object and ClusterIdEntity object\n ' (resp, body) = self.post(self.clusters_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)
Makes POST /cluster request and returns ClusterIdEntity Abstracts REST call to create new cluster :param model: ClusterEntity :returns: response object and ClusterIdEntity object
magnum/tests/functional/api/v1/clients/cluster_client.py
post_cluster
QumulusTechnology/magnum
319
python
def post_cluster(self, model, **kwargs): 'Makes POST /cluster request and returns ClusterIdEntity\n\n Abstracts REST call to create new cluster\n\n :param model: ClusterEntity\n :returns: response object and ClusterIdEntity object\n ' (resp, body) = self.post(self.clusters_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)
def post_cluster(self, model, **kwargs): 'Makes POST /cluster request and returns ClusterIdEntity\n\n Abstracts REST call to create new cluster\n\n :param model: ClusterEntity\n :returns: response object and ClusterIdEntity object\n ' (resp, body) = self.post(self.clusters_uri(), body=model.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)<|docstring|>Makes POST /cluster request and returns ClusterIdEntity Abstracts REST call to create new cluster :param model: ClusterEntity :returns: response object and ClusterIdEntity object<|endoftext|>
6940f4d7fc2eacd504476d06d91d5cb9ae43b403df1e97455124c3901e0570af
def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs): 'Makes PATCH /cluster request and returns ClusterIdEntity\n\n Abstracts REST call to update cluster attributes\n\n :param cluster_id: UUID of cluster\n :param clusterpatch_listmodel: ClusterPatchCollection\n :returns: response object and ClusterIdEntity object\n ' (resp, body) = self.patch(self.cluster_uri(cluster_id), body=clusterpatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)
Makes PATCH /cluster request and returns ClusterIdEntity Abstracts REST call to update cluster attributes :param cluster_id: UUID of cluster :param clusterpatch_listmodel: ClusterPatchCollection :returns: response object and ClusterIdEntity object
magnum/tests/functional/api/v1/clients/cluster_client.py
patch_cluster
QumulusTechnology/magnum
319
python
def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs): 'Makes PATCH /cluster request and returns ClusterIdEntity\n\n Abstracts REST call to update cluster attributes\n\n :param cluster_id: UUID of cluster\n :param clusterpatch_listmodel: ClusterPatchCollection\n :returns: response object and ClusterIdEntity object\n ' (resp, body) = self.patch(self.cluster_uri(cluster_id), body=clusterpatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)
def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs): 'Makes PATCH /cluster request and returns ClusterIdEntity\n\n Abstracts REST call to update cluster attributes\n\n :param cluster_id: UUID of cluster\n :param clusterpatch_listmodel: ClusterPatchCollection\n :returns: response object and ClusterIdEntity object\n ' (resp, body) = self.patch(self.cluster_uri(cluster_id), body=clusterpatch_listmodel.to_json(), **kwargs) return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)<|docstring|>Makes PATCH /cluster request and returns ClusterIdEntity Abstracts REST call to update cluster attributes :param cluster_id: UUID of cluster :param clusterpatch_listmodel: ClusterPatchCollection :returns: response object and ClusterIdEntity object<|endoftext|>
8b542fe63bf812bca60594e1f4fae2daab9c8b483229c7d1a77bec37c8f8e695
def delete_cluster(self, cluster_id, **kwargs): 'Makes DELETE /cluster request and returns response object\n\n Abstracts REST call to delete cluster based on uuid or name\n\n :param cluster_id: UUID or name of cluster\n :returns: response object\n ' return self.delete(self.cluster_uri(cluster_id), **kwargs)
Makes DELETE /cluster request and returns response object Abstracts REST call to delete cluster based on uuid or name :param cluster_id: UUID or name of cluster :returns: response object
magnum/tests/functional/api/v1/clients/cluster_client.py
delete_cluster
QumulusTechnology/magnum
319
python
def delete_cluster(self, cluster_id, **kwargs): 'Makes DELETE /cluster request and returns response object\n\n Abstracts REST call to delete cluster based on uuid or name\n\n :param cluster_id: UUID or name of cluster\n :returns: response object\n ' return self.delete(self.cluster_uri(cluster_id), **kwargs)
def delete_cluster(self, cluster_id, **kwargs): 'Makes DELETE /cluster request and returns response object\n\n Abstracts REST call to delete cluster based on uuid or name\n\n :param cluster_id: UUID or name of cluster\n :returns: response object\n ' return self.delete(self.cluster_uri(cluster_id), **kwargs)<|docstring|>Makes DELETE /cluster request and returns response object Abstracts REST call to delete cluster based on uuid or name :param cluster_id: UUID or name of cluster :returns: response object<|endoftext|>
a7af05f25f26e1402f537c5f3c3c4da16b6f7b9cb83f0f826d3d0e3806690b15
def interpolateImages(image1, image2, dist1I, distI2): ' interpolate 2D images - \n ' imageInterp = (((image1 * distI2) + (image2 * dist1I)) / (dist1I + distI2)) return imageInterp
interpolate 2D images -
SPGPylibs/PHItools/phifdt_pipe_modules.py
interpolateImages
vivivum/SPGPylibs
3
python
def interpolateImages(image1, image2, dist1I, distI2): ' \n ' imageInterp = (((image1 * distI2) + (image2 * dist1I)) / (dist1I + distI2)) return imageInterp
def interpolateImages(image1, image2, dist1I, distI2): ' \n ' imageInterp = (((image1 * distI2) + (image2 * dist1I)) / (dist1I + distI2)) return imageInterp<|docstring|>interpolate 2D images -<|endoftext|>
bde0951b0375d8478a6e6f0d7f6090e036de978f8498fc75f2a41affee620c22
def applyPrefilter(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8, verbose=False): 'PHI prefilter. Version from K. Albert.\n ' prefToApply = np.zeros((6, prefilter.shape[1], prefilter.shape[2])) for i in range(0, 6): wvlCurr = wvltsData[i] valueClosest = min(wvltsPref, key=(lambda x: abs((x - wvlCurr)))) if verbose: print('iter', i, 'wvlCurr', wvlCurr) print('iter', i, 'valueClosest', valueClosest) indexClosest = wvltsPref.index(valueClosest) if verbose: print('iter', i, 'indexClosest', indexClosest) if (valueClosest < wvlCurr): indexBefore = indexClosest indexAfter = (indexClosest + 1) else: indexAfter = indexClosest indexBefore = (indexClosest - 1) dist1I = abs((wvltsPref[indexBefore] - wvltsData[i])) distI2 = abs((wvltsPref[indexAfter] - wvltsData[i])) prefToApply[(i, :, :)] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2) if verbose: print('mean prefValue Before:', (np.mean(prefilter[indexBefore]) * 256)) print('mean prefValue After:', (np.mean(prefilter[indexAfter]) * 256)) print('distance1:', dist1I) print('distance2:', distI2) print('percentage:', (distI2 / (dist1I + distI2))) if verbose: print('mean prefilter:', (np.mean(prefToApply[(i, :, :)]) * 256)) prefToApply[(i, :, :)] = (prefToApply[(i, :, :)] / prefScale) if verbose: print('mean prefilter:', np.mean(prefToApply[(i, :, :)])) if verbose: print('Reshaping prefilter:') print(prefToApply.shape) print(data.shape) if (data.shape[2] != prefToApply.shape[1]): FOV_Start_y = int(((prefToApply.shape[1] / 2) - (data.shape[2] / 2))) FOV_End_y = int(((prefToApply.shape[1] / 2) + (data.shape[2] / 2))) prefToApply = prefToApply[(:, FOV_Start_y:FOV_End_y, :)] if verbose: print(prefToApply.shape) if (data.shape[3] != prefToApply.shape[2]): FOV_Start_x = int(((prefToApply.shape[2] / 2) - (data.shape[3] / 2))) FOV_End_x = int(((prefToApply.shape[2] / 2) + (data.shape[3] / 2))) prefToApply = prefToApply[(:, :, FOV_Start_x:FOV_End_x)] if verbose: print(prefToApply.shape) dataPrefApplied = np.zeros(data.shape) for i in range(0, 4): if (direction == 1): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] * prefToApply) elif (direction == (- 1)): dataPrefApplied[(:, i, :, :)] = ((data[(:, i, :, :)] / prefToApply) / scaledown) else: print('Ivnalid direction! Must be 1 (mult) or -1 (div).') return dataPrefApplied
PHI prefilter. Version from K. Albert.
SPGPylibs/PHItools/phifdt_pipe_modules.py
applyPrefilter
vivivum/SPGPylibs
3
python
def applyPrefilter(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8, verbose=False): '\n ' prefToApply = np.zeros((6, prefilter.shape[1], prefilter.shape[2])) for i in range(0, 6): wvlCurr = wvltsData[i] valueClosest = min(wvltsPref, key=(lambda x: abs((x - wvlCurr)))) if verbose: print('iter', i, 'wvlCurr', wvlCurr) print('iter', i, 'valueClosest', valueClosest) indexClosest = wvltsPref.index(valueClosest) if verbose: print('iter', i, 'indexClosest', indexClosest) if (valueClosest < wvlCurr): indexBefore = indexClosest indexAfter = (indexClosest + 1) else: indexAfter = indexClosest indexBefore = (indexClosest - 1) dist1I = abs((wvltsPref[indexBefore] - wvltsData[i])) distI2 = abs((wvltsPref[indexAfter] - wvltsData[i])) prefToApply[(i, :, :)] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2) if verbose: print('mean prefValue Before:', (np.mean(prefilter[indexBefore]) * 256)) print('mean prefValue After:', (np.mean(prefilter[indexAfter]) * 256)) print('distance1:', dist1I) print('distance2:', distI2) print('percentage:', (distI2 / (dist1I + distI2))) if verbose: print('mean prefilter:', (np.mean(prefToApply[(i, :, :)]) * 256)) prefToApply[(i, :, :)] = (prefToApply[(i, :, :)] / prefScale) if verbose: print('mean prefilter:', np.mean(prefToApply[(i, :, :)])) if verbose: print('Reshaping prefilter:') print(prefToApply.shape) print(data.shape) if (data.shape[2] != prefToApply.shape[1]): FOV_Start_y = int(((prefToApply.shape[1] / 2) - (data.shape[2] / 2))) FOV_End_y = int(((prefToApply.shape[1] / 2) + (data.shape[2] / 2))) prefToApply = prefToApply[(:, FOV_Start_y:FOV_End_y, :)] if verbose: print(prefToApply.shape) if (data.shape[3] != prefToApply.shape[2]): FOV_Start_x = int(((prefToApply.shape[2] / 2) - (data.shape[3] / 2))) FOV_End_x = int(((prefToApply.shape[2] / 2) + (data.shape[3] / 2))) prefToApply = prefToApply[(:, :, FOV_Start_x:FOV_End_x)] if verbose: print(prefToApply.shape) dataPrefApplied = np.zeros(data.shape) for i in range(0, 4): if (direction == 1): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] * prefToApply) elif (direction == (- 1)): dataPrefApplied[(:, i, :, :)] = ((data[(:, i, :, :)] / prefToApply) / scaledown) else: print('Ivnalid direction! Must be 1 (mult) or -1 (div).') return dataPrefApplied
def applyPrefilter(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8, verbose=False): '\n ' prefToApply = np.zeros((6, prefilter.shape[1], prefilter.shape[2])) for i in range(0, 6): wvlCurr = wvltsData[i] valueClosest = min(wvltsPref, key=(lambda x: abs((x - wvlCurr)))) if verbose: print('iter', i, 'wvlCurr', wvlCurr) print('iter', i, 'valueClosest', valueClosest) indexClosest = wvltsPref.index(valueClosest) if verbose: print('iter', i, 'indexClosest', indexClosest) if (valueClosest < wvlCurr): indexBefore = indexClosest indexAfter = (indexClosest + 1) else: indexAfter = indexClosest indexBefore = (indexClosest - 1) dist1I = abs((wvltsPref[indexBefore] - wvltsData[i])) distI2 = abs((wvltsPref[indexAfter] - wvltsData[i])) prefToApply[(i, :, :)] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2) if verbose: print('mean prefValue Before:', (np.mean(prefilter[indexBefore]) * 256)) print('mean prefValue After:', (np.mean(prefilter[indexAfter]) * 256)) print('distance1:', dist1I) print('distance2:', distI2) print('percentage:', (distI2 / (dist1I + distI2))) if verbose: print('mean prefilter:', (np.mean(prefToApply[(i, :, :)]) * 256)) prefToApply[(i, :, :)] = (prefToApply[(i, :, :)] / prefScale) if verbose: print('mean prefilter:', np.mean(prefToApply[(i, :, :)])) if verbose: print('Reshaping prefilter:') print(prefToApply.shape) print(data.shape) if (data.shape[2] != prefToApply.shape[1]): FOV_Start_y = int(((prefToApply.shape[1] / 2) - (data.shape[2] / 2))) FOV_End_y = int(((prefToApply.shape[1] / 2) + (data.shape[2] / 2))) prefToApply = prefToApply[(:, FOV_Start_y:FOV_End_y, :)] if verbose: print(prefToApply.shape) if (data.shape[3] != prefToApply.shape[2]): FOV_Start_x = int(((prefToApply.shape[2] / 2) - (data.shape[3] / 2))) FOV_End_x = int(((prefToApply.shape[2] / 2) + (data.shape[3] / 2))) prefToApply = prefToApply[(:, :, FOV_Start_x:FOV_End_x)] if verbose: print(prefToApply.shape) dataPrefApplied = np.zeros(data.shape) for i in range(0, 4): if (direction == 1): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] * prefToApply) elif (direction == (- 1)): dataPrefApplied[(:, i, :, :)] = ((data[(:, i, :, :)] / prefToApply) / scaledown) else: print('Ivnalid direction! Must be 1 (mult) or -1 (div).') return dataPrefApplied<|docstring|>PHI prefilter. Version from K. Albert.<|endoftext|>
0e56a951ded4906a8d73ef7be1b3441593c1da946e80f0e5856442ccca18dbf2
def applyPrefilter_dos(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8, verbose=False): 'PHI prefilter. Modified version from K. Albert.\n ' prefToApply = np.zeros((6, prefilter.shape[1], prefilter.shape[2])) prefilter = (prefilter / prefScale) for i in range(0, 6): wvlCurr = wvltsData[i] valueClosest = min(wvltsPref, key=(lambda x: abs((x - wvlCurr)))) if verbose: print('iter', i, 'wvlCurr', wvlCurr) print('iter', i, 'valueClosest', valueClosest) indexClosest = wvltsPref.index(valueClosest) if verbose: print('iter', i, 'indexClosest', indexClosest) if (valueClosest < wvlCurr): indexBefore = indexClosest indexAfter = (indexClosest + 1) else: indexAfter = indexClosest indexBefore = (indexClosest - 1) dist1I = abs((wvltsPref[indexBefore] - wvltsData[i])) distI2 = abs((wvltsPref[indexAfter] - wvltsData[i])) prefToApply[(i, :, :)] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2) if verbose: print('mean prefValue Before:', (np.mean(prefilter[indexBefore]) * 256)) print('mean prefValue After:', (np.mean(prefilter[indexAfter]) * 256)) print('distance1:', dist1I) print('distance2:', distI2) print('percentage:', (distI2 / (dist1I + distI2))) if verbose: print('mean prefilter:', (np.mean(prefToApply[(i, :, :)]) * 256)) if verbose: print('mean prefilter:', np.mean(prefToApply[(i, :, :)])) if verbose: print('Reshaping prefilter:') print(prefToApply.shape) print(data.shape) if (data.shape[2] != prefToApply.shape[1]): FOV_Start_y = int(((prefToApply.shape[1] / 2) - (data.shape[2] / 2))) FOV_End_y = int(((prefToApply.shape[1] / 2) + (data.shape[2] / 2))) prefToApply = prefToApply[(:, FOV_Start_y:FOV_End_y, :)] if verbose: print(prefToApply.shape) if (data.shape[3] != prefToApply.shape[2]): FOV_Start_x = int(((prefToApply.shape[2] / 2) - (data.shape[3] / 2))) FOV_End_x = int(((prefToApply.shape[2] / 2) + (data.shape[3] / 2))) prefToApply = prefToApply[(:, :, FOV_Start_x:FOV_End_x)] if verbose: print(prefToApply.shape) dataPrefApplied = np.zeros(data.shape) for i in range(0, 4): if (direction == 1): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] * prefToApply) elif (direction == (- 1)): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] / prefToApply) else: print('Ivnalid direction! Must be 1 (mult) or -1 (div).') return dataPrefApplied
PHI prefilter. Modified version from K. Albert.
SPGPylibs/PHItools/phifdt_pipe_modules.py
applyPrefilter_dos
vivivum/SPGPylibs
3
python
def applyPrefilter_dos(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8, verbose=False): '\n ' prefToApply = np.zeros((6, prefilter.shape[1], prefilter.shape[2])) prefilter = (prefilter / prefScale) for i in range(0, 6): wvlCurr = wvltsData[i] valueClosest = min(wvltsPref, key=(lambda x: abs((x - wvlCurr)))) if verbose: print('iter', i, 'wvlCurr', wvlCurr) print('iter', i, 'valueClosest', valueClosest) indexClosest = wvltsPref.index(valueClosest) if verbose: print('iter', i, 'indexClosest', indexClosest) if (valueClosest < wvlCurr): indexBefore = indexClosest indexAfter = (indexClosest + 1) else: indexAfter = indexClosest indexBefore = (indexClosest - 1) dist1I = abs((wvltsPref[indexBefore] - wvltsData[i])) distI2 = abs((wvltsPref[indexAfter] - wvltsData[i])) prefToApply[(i, :, :)] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2) if verbose: print('mean prefValue Before:', (np.mean(prefilter[indexBefore]) * 256)) print('mean prefValue After:', (np.mean(prefilter[indexAfter]) * 256)) print('distance1:', dist1I) print('distance2:', distI2) print('percentage:', (distI2 / (dist1I + distI2))) if verbose: print('mean prefilter:', (np.mean(prefToApply[(i, :, :)]) * 256)) if verbose: print('mean prefilter:', np.mean(prefToApply[(i, :, :)])) if verbose: print('Reshaping prefilter:') print(prefToApply.shape) print(data.shape) if (data.shape[2] != prefToApply.shape[1]): FOV_Start_y = int(((prefToApply.shape[1] / 2) - (data.shape[2] / 2))) FOV_End_y = int(((prefToApply.shape[1] / 2) + (data.shape[2] / 2))) prefToApply = prefToApply[(:, FOV_Start_y:FOV_End_y, :)] if verbose: print(prefToApply.shape) if (data.shape[3] != prefToApply.shape[2]): FOV_Start_x = int(((prefToApply.shape[2] / 2) - (data.shape[3] / 2))) FOV_End_x = int(((prefToApply.shape[2] / 2) + (data.shape[3] / 2))) prefToApply = prefToApply[(:, :, FOV_Start_x:FOV_End_x)] if verbose: print(prefToApply.shape) dataPrefApplied = np.zeros(data.shape) for i in range(0, 4): if (direction == 1): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] * prefToApply) elif (direction == (- 1)): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] / prefToApply) else: print('Ivnalid direction! Must be 1 (mult) or -1 (div).') return dataPrefApplied
def applyPrefilter_dos(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8, verbose=False): '\n ' prefToApply = np.zeros((6, prefilter.shape[1], prefilter.shape[2])) prefilter = (prefilter / prefScale) for i in range(0, 6): wvlCurr = wvltsData[i] valueClosest = min(wvltsPref, key=(lambda x: abs((x - wvlCurr)))) if verbose: print('iter', i, 'wvlCurr', wvlCurr) print('iter', i, 'valueClosest', valueClosest) indexClosest = wvltsPref.index(valueClosest) if verbose: print('iter', i, 'indexClosest', indexClosest) if (valueClosest < wvlCurr): indexBefore = indexClosest indexAfter = (indexClosest + 1) else: indexAfter = indexClosest indexBefore = (indexClosest - 1) dist1I = abs((wvltsPref[indexBefore] - wvltsData[i])) distI2 = abs((wvltsPref[indexAfter] - wvltsData[i])) prefToApply[(i, :, :)] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2) if verbose: print('mean prefValue Before:', (np.mean(prefilter[indexBefore]) * 256)) print('mean prefValue After:', (np.mean(prefilter[indexAfter]) * 256)) print('distance1:', dist1I) print('distance2:', distI2) print('percentage:', (distI2 / (dist1I + distI2))) if verbose: print('mean prefilter:', (np.mean(prefToApply[(i, :, :)]) * 256)) if verbose: print('mean prefilter:', np.mean(prefToApply[(i, :, :)])) if verbose: print('Reshaping prefilter:') print(prefToApply.shape) print(data.shape) if (data.shape[2] != prefToApply.shape[1]): FOV_Start_y = int(((prefToApply.shape[1] / 2) - (data.shape[2] / 2))) FOV_End_y = int(((prefToApply.shape[1] / 2) + (data.shape[2] / 2))) prefToApply = prefToApply[(:, FOV_Start_y:FOV_End_y, :)] if verbose: print(prefToApply.shape) if (data.shape[3] != prefToApply.shape[2]): FOV_Start_x = int(((prefToApply.shape[2] / 2) - (data.shape[3] / 2))) FOV_End_x = int(((prefToApply.shape[2] / 2) + (data.shape[3] / 2))) prefToApply = prefToApply[(:, :, FOV_Start_x:FOV_End_x)] if verbose: print(prefToApply.shape) dataPrefApplied = np.zeros(data.shape) for i in range(0, 4): if (direction == 1): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] * prefToApply) elif (direction == (- 1)): dataPrefApplied[(:, i, :, :)] = (data[(:, i, :, :)] / prefToApply) else: print('Ivnalid direction! Must be 1 (mult) or -1 (div).') return dataPrefApplied<|docstring|>PHI prefilter. Modified version from K. Albert.<|endoftext|>
7873b3660a6a7f4d579233e6a97102263806f94c9c490d4f9b838f5daa943278
def phi_apply_demodulation(data, instrument, header=False, demod=False, verbose=False): '\n Use demodulation matrices to demodulate data size (n_wave*S_POL,N,M)\n ATTENTION: FDT40 is fixed to the one Johann is using!!!!\n ' if (instrument == 'FDT40'): mod_matrix_40 = np.array([[1.0006, (- 0.7132), 0.4002, (- 0.5693)], [1.0048, 0.4287, (- 0.7143), 0.5625], [0.9963, 0.4269, (- 0.3652), (- 0.8229)], [0.9983, (- 0.4022), 0.9001, 0.1495]]) demodM = np.linalg.inv(mod_matrix_40) demodM = np.array([[0.168258, 0.357277, 0.202212, 0.273266], [(- 0.660351), 0.314981, 0.650029, (- 0.299685)], [0.421242, 0.336994, (- 0.183068), (- 0.576202)], [(- 0.351933), 0.45982, (- 0.582167), 0.455458]]) elif (instrument == 'FDT45'): mod_matrix_45 = np.array([[1.0035, (- 0.6598), 0.5817, (- 0.4773)], [1.0032, 0.5647, 0.5275, 0.6403], [0.9966, 0.439, (- 0.5384), (- 0.715)], [0.9968, (- 0.6169), (- 0.6443), 0.4425]]) demodM = np.linalg.inv(mod_matrix_45) elif (instrument == 'HRT40'): mod_matrix_40 = np.array([[1.004, (- 0.6647), 0.5928, (- 0.4527)], [1.0018, 0.5647, 0.5093, 0.6483], [0.9964, 0.4348, (- 0.5135), (- 0.7325)], [0.9978, (- 0.6128), (- 0.6567), 0.4283]]) demodM = np.linalg.inv(mod_matrix_40) elif (instrument == 'HRT45'): mod_matrix_45_dos = np.array([[1.00159, (- 0.50032), 0.7093, (- 0.4931)], [1.004, 0.6615, 0.3925, 0.6494], [0.9954, 0.3356, (- 0.6126), (- 0.7143)], [0.9989, (- 0.7474), (- 0.5179), 0.4126]]) demodM = np.linalg.inv(mod_matrix_45_dos) else: printc('No demod available in demod_phi.py', color=bcolors.FAIL) raise SystemError() printc('Demodulation matrix for ', instrument, color=bcolors.WARNING) printc(demodM, color=bcolors.WARNING) if demod: return demodM (ls, ps, ys, xs) = data.shape for i in range(ls): data[(i, :, :, :)] = np.reshape(np.matmul(demodM, np.reshape(data[(i, :, :, :)], (ps, (xs * ys)))), (ps, ys, xs)) if (header != False): if ('CAL_IPOL' in header): header['CAL_IPOL'] = instrument else: header.set('CAL_IPOL', instrument, 'Onboard calibrated for instrumental polarization', after='CAL_DARK') return (data, header) else: return data
Use demodulation matrices to demodulate data size (n_wave*S_POL,N,M) ATTENTION: FDT40 is fixed to the one Johann is using!!!!
SPGPylibs/PHItools/phifdt_pipe_modules.py
phi_apply_demodulation
vivivum/SPGPylibs
3
python
def phi_apply_demodulation(data, instrument, header=False, demod=False, verbose=False): '\n Use demodulation matrices to demodulate data size (n_wave*S_POL,N,M)\n ATTENTION: FDT40 is fixed to the one Johann is using!!!!\n ' if (instrument == 'FDT40'): mod_matrix_40 = np.array([[1.0006, (- 0.7132), 0.4002, (- 0.5693)], [1.0048, 0.4287, (- 0.7143), 0.5625], [0.9963, 0.4269, (- 0.3652), (- 0.8229)], [0.9983, (- 0.4022), 0.9001, 0.1495]]) demodM = np.linalg.inv(mod_matrix_40) demodM = np.array([[0.168258, 0.357277, 0.202212, 0.273266], [(- 0.660351), 0.314981, 0.650029, (- 0.299685)], [0.421242, 0.336994, (- 0.183068), (- 0.576202)], [(- 0.351933), 0.45982, (- 0.582167), 0.455458]]) elif (instrument == 'FDT45'): mod_matrix_45 = np.array([[1.0035, (- 0.6598), 0.5817, (- 0.4773)], [1.0032, 0.5647, 0.5275, 0.6403], [0.9966, 0.439, (- 0.5384), (- 0.715)], [0.9968, (- 0.6169), (- 0.6443), 0.4425]]) demodM = np.linalg.inv(mod_matrix_45) elif (instrument == 'HRT40'): mod_matrix_40 = np.array([[1.004, (- 0.6647), 0.5928, (- 0.4527)], [1.0018, 0.5647, 0.5093, 0.6483], [0.9964, 0.4348, (- 0.5135), (- 0.7325)], [0.9978, (- 0.6128), (- 0.6567), 0.4283]]) demodM = np.linalg.inv(mod_matrix_40) elif (instrument == 'HRT45'): mod_matrix_45_dos = np.array([[1.00159, (- 0.50032), 0.7093, (- 0.4931)], [1.004, 0.6615, 0.3925, 0.6494], [0.9954, 0.3356, (- 0.6126), (- 0.7143)], [0.9989, (- 0.7474), (- 0.5179), 0.4126]]) demodM = np.linalg.inv(mod_matrix_45_dos) else: printc('No demod available in demod_phi.py', color=bcolors.FAIL) raise SystemError() printc('Demodulation matrix for ', instrument, color=bcolors.WARNING) printc(demodM, color=bcolors.WARNING) if demod: return demodM (ls, ps, ys, xs) = data.shape for i in range(ls): data[(i, :, :, :)] = np.reshape(np.matmul(demodM, np.reshape(data[(i, :, :, :)], (ps, (xs * ys)))), (ps, ys, xs)) if (header != False): if ('CAL_IPOL' in header): header['CAL_IPOL'] = instrument else: header.set('CAL_IPOL', instrument, 'Onboard calibrated for instrumental polarization', after='CAL_DARK') return (data, header) else: return data
def phi_apply_demodulation(data, instrument, header=False, demod=False, verbose=False): '\n Use demodulation matrices to demodulate data size (n_wave*S_POL,N,M)\n ATTENTION: FDT40 is fixed to the one Johann is using!!!!\n ' if (instrument == 'FDT40'): mod_matrix_40 = np.array([[1.0006, (- 0.7132), 0.4002, (- 0.5693)], [1.0048, 0.4287, (- 0.7143), 0.5625], [0.9963, 0.4269, (- 0.3652), (- 0.8229)], [0.9983, (- 0.4022), 0.9001, 0.1495]]) demodM = np.linalg.inv(mod_matrix_40) demodM = np.array([[0.168258, 0.357277, 0.202212, 0.273266], [(- 0.660351), 0.314981, 0.650029, (- 0.299685)], [0.421242, 0.336994, (- 0.183068), (- 0.576202)], [(- 0.351933), 0.45982, (- 0.582167), 0.455458]]) elif (instrument == 'FDT45'): mod_matrix_45 = np.array([[1.0035, (- 0.6598), 0.5817, (- 0.4773)], [1.0032, 0.5647, 0.5275, 0.6403], [0.9966, 0.439, (- 0.5384), (- 0.715)], [0.9968, (- 0.6169), (- 0.6443), 0.4425]]) demodM = np.linalg.inv(mod_matrix_45) elif (instrument == 'HRT40'): mod_matrix_40 = np.array([[1.004, (- 0.6647), 0.5928, (- 0.4527)], [1.0018, 0.5647, 0.5093, 0.6483], [0.9964, 0.4348, (- 0.5135), (- 0.7325)], [0.9978, (- 0.6128), (- 0.6567), 0.4283]]) demodM = np.linalg.inv(mod_matrix_40) elif (instrument == 'HRT45'): mod_matrix_45_dos = np.array([[1.00159, (- 0.50032), 0.7093, (- 0.4931)], [1.004, 0.6615, 0.3925, 0.6494], [0.9954, 0.3356, (- 0.6126), (- 0.7143)], [0.9989, (- 0.7474), (- 0.5179), 0.4126]]) demodM = np.linalg.inv(mod_matrix_45_dos) else: printc('No demod available in demod_phi.py', color=bcolors.FAIL) raise SystemError() printc('Demodulation matrix for ', instrument, color=bcolors.WARNING) printc(demodM, color=bcolors.WARNING) if demod: return demodM (ls, ps, ys, xs) = data.shape for i in range(ls): data[(i, :, :, :)] = np.reshape(np.matmul(demodM, np.reshape(data[(i, :, :, :)], (ps, (xs * ys)))), (ps, ys, xs)) if (header != False): if ('CAL_IPOL' in header): header['CAL_IPOL'] = instrument else: header.set('CAL_IPOL', instrument, 'Onboard calibrated for instrumental polarization', after='CAL_DARK') return (data, header) else: return data<|docstring|>Use demodulation matrices to demodulate data size (n_wave*S_POL,N,M) ATTENTION: FDT40 is fixed to the one Johann is using!!!!<|endoftext|>
c6579afdaea9bca01fe671d1154225c6aa93f77e5616da9c285aec5eb02a61a7
def phi_correct_ghost(data, header, rad, verbose=False): '\n Startup version on Jun 2021\n ' version = 'phi_correct_ghost V1.0 Jun 2021' only_one_vorbose = 1 center = np.array([header['CRPIX1'], header['CRPIX2']]).astype(int) printc(' Read center from header (updated): x=', center[0], ' y=', center[1], color=bcolors.OKBLUE) xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) PXBEG1 = (int(header['PXBEG1']) - 1) PXEND1 = (int(header['PXEND1']) - 1) PXBEG2 = (int(header['PXBEG2']) - 1) PXEND2 = (int(header['PXEND2']) - 1) if (verbose and only_one_vorbose): datap = np.copy(data) printc('-->>>>>>> Correcting ghost image ', color=bcolors.OKGREEN) coef = [(- 1.98787669), 1945.28944245] coef = [(- 1.9999), 1942.7] center_c = np.copy(center) center_c[0] += PXBEG1 center_c[1] += PXBEG2 poly1d_fn = np.poly1d(coef) sh = poly1d_fn(center_c).astype(int) sh_float = poly1d_fn(center_c) printc(' image center: x: ', center[0], ' y: ', center[1], color=bcolors.OKGREEN) printc(' image center [for 2048]: x: ', center_c[0], ' y: ', center_c[1], color=bcolors.OKGREEN) printc(' ghost displacements: x: ', sh_float[0], ' y: ', sh_float[1], color=bcolors.OKGREEN) mask_anulus = bin_annulus([yd, xd], (rad + 20), 10, full=False) mask_anulus = shift(mask_anulus, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx = np.where((mask_anulus == 1)) mask_anulus_big = bin_annulus([yd, xd], (rad - 150), 100, full=False) mask_anulus_big = shift(mask_anulus_big, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx_big = np.where(((data[(0, 0, :, :)] * mask_anulus_big) == 1)) printc(' computing azimuthal averages ', color=bcolors.OKGREEN) centers = np.zeros((2, 6)) radius = np.zeros(6) ints = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_rad = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_syn = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit_pars = np.zeros((6, 5)) factor = np.zeros((6, 4)) mean_intensity = np.zeros((6, 4)) for i in range((zd // 4)): dummy_data = np.mean(data[(i, :, :, :)], axis=0) (centers[(1, i)], centers[(0, i)], radius[i]) = find_center(dummy_data) (intensity, rad) = azimutal_average(dummy_data, [centers[(0, i)], centers[(1, i)]]) ints[(i, 0:len(intensity))] = intensity ints_rad[(i, 0:len(intensity))] = rad rrange = int((radius[i] + 2)) clv = ints[(i, 0:rrange)] clv_r = ints_rad[(i, 0:rrange)] mu = np.sqrt((1 - ((clv_r ** 2) / (clv_r[(- 1)] ** 2)))) if (verbose and only_one_vorbose): plt.plot(clv_r, clv) plt.xlabel('Solar radious [pixel]') plt.ylabel('Intensity [DN]') plt.show() u = 0.5 I0 = 100 ande = np.where((mu > 0.1)) pars = newton(clv[ande], mu[ande], [I0, u, 0.2, 0.2, 0.2], limb_darkening) (fit, _) = limb_darkening(mu, pars) ints_fit[(i, 0:len(fit))] = fit ints_fit_pars[(i, :)] = pars ints_syn[(i, :)] = ints[(i, :)] ints_syn[(i, 0:len(fit))] = fit ints_syn[(i, :)] = (ints_syn[(i, :)] / ints_fit_pars[i][0]) ints_fit[(i, :)] = (ints_fit[(i, :)] / ints_fit_pars[i][0]) ints[(i, :)] = (ints[(i, :)] / ints_fit_pars[i][0]) nc = (((PXEND2 - PXBEG2) + 1) // 2) limb_2d = np.zeros((((PXEND2 - PXBEG2) + 1), ((PXEND1 - PXBEG1) + 1))) s_of_gh = int((radius[i] * 1.1)) limb_2d[((nc - s_of_gh):((nc + s_of_gh) + 1), (nc - s_of_gh):((nc + s_of_gh) + 1))] = genera_2d(ints_syn[(i, 0:s_of_gh)]) (xl, yl) = limb_2d.shape limb_2d = gaussian_filter(limb_2d, sigma=(8, 8)) limb_2d = shift_subp(limb_2d, shift=[(centers[(1, i)] - (yd // 2)), (centers[(0, i)] - (xd // 2))]) if (verbose and only_one_vorbose): plib.show_one(limb_2d, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='limb 2D', cbarlabel=' ', cmap='gray') reflection = shift(limb_2d, shift=(sh[0], sh[1]), fill_value=0) if (verbose and only_one_vorbose): plib.show_one(reflection, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='reflection', cbarlabel=' ', cmap='gray') for j in range(4): dummy = data[(i, j, :, :)] mean_intensity[(i, j)] = np.mean(dummy[idx_big]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(i, j)] = (((m_r - m_l) * 100.0) / ints_fit_pars[i][0]) print('factor', factor[(i, j)]) if (verbose and only_one_vorbose): plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(((factor[(i, j)] * ints_fit_pars[i][0]) / 100.0), lw=2, color='green', alpha=0.4) plt.show() data[(i, j, :, :)] = (data[(i, j, :, :)] - (((reflection * factor[(i, j)]) / 100.0) * ints_fit_pars[i][0])) if (verbose and only_one_vorbose): plib.show_two(datap[(i, j, :, :)], data[(i, j, :, :)], vmin=[0, 0], vmax=[1, 1], block=True, pause=0.1, title=['Before', 'After'], xlabel='Pixel', ylabel='Pixel') plt.plot(datap[(0, 0, 0:200, 200)]) plt.plot(data[(0, 0, 0:200, 200)]) plt.ylim([0, 5]) plt.show() plt.plot(datap[(0, 0, 200, 0:200)]) plt.plot(data[(0, 0, 200, 0:200)]) plt.ylim([0, 5]) plt.show() only_one_vorbose = 0 if ('CAL_GHST' in header): header['CAL_GHST'] = version else: header.set('CAL_GHST', version, 'ghost correction version py module (phifdt_pipe_modules.py)', after='CAL_DARK') return (data, header)
Startup version on Jun 2021
SPGPylibs/PHItools/phifdt_pipe_modules.py
phi_correct_ghost
vivivum/SPGPylibs
3
python
def phi_correct_ghost(data, header, rad, verbose=False): '\n \n ' version = 'phi_correct_ghost V1.0 Jun 2021' only_one_vorbose = 1 center = np.array([header['CRPIX1'], header['CRPIX2']]).astype(int) printc(' Read center from header (updated): x=', center[0], ' y=', center[1], color=bcolors.OKBLUE) xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) PXBEG1 = (int(header['PXBEG1']) - 1) PXEND1 = (int(header['PXEND1']) - 1) PXBEG2 = (int(header['PXBEG2']) - 1) PXEND2 = (int(header['PXEND2']) - 1) if (verbose and only_one_vorbose): datap = np.copy(data) printc('-->>>>>>> Correcting ghost image ', color=bcolors.OKGREEN) coef = [(- 1.98787669), 1945.28944245] coef = [(- 1.9999), 1942.7] center_c = np.copy(center) center_c[0] += PXBEG1 center_c[1] += PXBEG2 poly1d_fn = np.poly1d(coef) sh = poly1d_fn(center_c).astype(int) sh_float = poly1d_fn(center_c) printc(' image center: x: ', center[0], ' y: ', center[1], color=bcolors.OKGREEN) printc(' image center [for 2048]: x: ', center_c[0], ' y: ', center_c[1], color=bcolors.OKGREEN) printc(' ghost displacements: x: ', sh_float[0], ' y: ', sh_float[1], color=bcolors.OKGREEN) mask_anulus = bin_annulus([yd, xd], (rad + 20), 10, full=False) mask_anulus = shift(mask_anulus, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx = np.where((mask_anulus == 1)) mask_anulus_big = bin_annulus([yd, xd], (rad - 150), 100, full=False) mask_anulus_big = shift(mask_anulus_big, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx_big = np.where(((data[(0, 0, :, :)] * mask_anulus_big) == 1)) printc(' computing azimuthal averages ', color=bcolors.OKGREEN) centers = np.zeros((2, 6)) radius = np.zeros(6) ints = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_rad = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_syn = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit_pars = np.zeros((6, 5)) factor = np.zeros((6, 4)) mean_intensity = np.zeros((6, 4)) for i in range((zd // 4)): dummy_data = np.mean(data[(i, :, :, :)], axis=0) (centers[(1, i)], centers[(0, i)], radius[i]) = find_center(dummy_data) (intensity, rad) = azimutal_average(dummy_data, [centers[(0, i)], centers[(1, i)]]) ints[(i, 0:len(intensity))] = intensity ints_rad[(i, 0:len(intensity))] = rad rrange = int((radius[i] + 2)) clv = ints[(i, 0:rrange)] clv_r = ints_rad[(i, 0:rrange)] mu = np.sqrt((1 - ((clv_r ** 2) / (clv_r[(- 1)] ** 2)))) if (verbose and only_one_vorbose): plt.plot(clv_r, clv) plt.xlabel('Solar radious [pixel]') plt.ylabel('Intensity [DN]') plt.show() u = 0.5 I0 = 100 ande = np.where((mu > 0.1)) pars = newton(clv[ande], mu[ande], [I0, u, 0.2, 0.2, 0.2], limb_darkening) (fit, _) = limb_darkening(mu, pars) ints_fit[(i, 0:len(fit))] = fit ints_fit_pars[(i, :)] = pars ints_syn[(i, :)] = ints[(i, :)] ints_syn[(i, 0:len(fit))] = fit ints_syn[(i, :)] = (ints_syn[(i, :)] / ints_fit_pars[i][0]) ints_fit[(i, :)] = (ints_fit[(i, :)] / ints_fit_pars[i][0]) ints[(i, :)] = (ints[(i, :)] / ints_fit_pars[i][0]) nc = (((PXEND2 - PXBEG2) + 1) // 2) limb_2d = np.zeros((((PXEND2 - PXBEG2) + 1), ((PXEND1 - PXBEG1) + 1))) s_of_gh = int((radius[i] * 1.1)) limb_2d[((nc - s_of_gh):((nc + s_of_gh) + 1), (nc - s_of_gh):((nc + s_of_gh) + 1))] = genera_2d(ints_syn[(i, 0:s_of_gh)]) (xl, yl) = limb_2d.shape limb_2d = gaussian_filter(limb_2d, sigma=(8, 8)) limb_2d = shift_subp(limb_2d, shift=[(centers[(1, i)] - (yd // 2)), (centers[(0, i)] - (xd // 2))]) if (verbose and only_one_vorbose): plib.show_one(limb_2d, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='limb 2D', cbarlabel=' ', cmap='gray') reflection = shift(limb_2d, shift=(sh[0], sh[1]), fill_value=0) if (verbose and only_one_vorbose): plib.show_one(reflection, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='reflection', cbarlabel=' ', cmap='gray') for j in range(4): dummy = data[(i, j, :, :)] mean_intensity[(i, j)] = np.mean(dummy[idx_big]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(i, j)] = (((m_r - m_l) * 100.0) / ints_fit_pars[i][0]) print('factor', factor[(i, j)]) if (verbose and only_one_vorbose): plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(((factor[(i, j)] * ints_fit_pars[i][0]) / 100.0), lw=2, color='green', alpha=0.4) plt.show() data[(i, j, :, :)] = (data[(i, j, :, :)] - (((reflection * factor[(i, j)]) / 100.0) * ints_fit_pars[i][0])) if (verbose and only_one_vorbose): plib.show_two(datap[(i, j, :, :)], data[(i, j, :, :)], vmin=[0, 0], vmax=[1, 1], block=True, pause=0.1, title=['Before', 'After'], xlabel='Pixel', ylabel='Pixel') plt.plot(datap[(0, 0, 0:200, 200)]) plt.plot(data[(0, 0, 0:200, 200)]) plt.ylim([0, 5]) plt.show() plt.plot(datap[(0, 0, 200, 0:200)]) plt.plot(data[(0, 0, 200, 0:200)]) plt.ylim([0, 5]) plt.show() only_one_vorbose = 0 if ('CAL_GHST' in header): header['CAL_GHST'] = version else: header.set('CAL_GHST', version, 'ghost correction version py module (phifdt_pipe_modules.py)', after='CAL_DARK') return (data, header)
def phi_correct_ghost(data, header, rad, verbose=False): '\n \n ' version = 'phi_correct_ghost V1.0 Jun 2021' only_one_vorbose = 1 center = np.array([header['CRPIX1'], header['CRPIX2']]).astype(int) printc(' Read center from header (updated): x=', center[0], ' y=', center[1], color=bcolors.OKBLUE) xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) PXBEG1 = (int(header['PXBEG1']) - 1) PXEND1 = (int(header['PXEND1']) - 1) PXBEG2 = (int(header['PXBEG2']) - 1) PXEND2 = (int(header['PXEND2']) - 1) if (verbose and only_one_vorbose): datap = np.copy(data) printc('-->>>>>>> Correcting ghost image ', color=bcolors.OKGREEN) coef = [(- 1.98787669), 1945.28944245] coef = [(- 1.9999), 1942.7] center_c = np.copy(center) center_c[0] += PXBEG1 center_c[1] += PXBEG2 poly1d_fn = np.poly1d(coef) sh = poly1d_fn(center_c).astype(int) sh_float = poly1d_fn(center_c) printc(' image center: x: ', center[0], ' y: ', center[1], color=bcolors.OKGREEN) printc(' image center [for 2048]: x: ', center_c[0], ' y: ', center_c[1], color=bcolors.OKGREEN) printc(' ghost displacements: x: ', sh_float[0], ' y: ', sh_float[1], color=bcolors.OKGREEN) mask_anulus = bin_annulus([yd, xd], (rad + 20), 10, full=False) mask_anulus = shift(mask_anulus, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx = np.where((mask_anulus == 1)) mask_anulus_big = bin_annulus([yd, xd], (rad - 150), 100, full=False) mask_anulus_big = shift(mask_anulus_big, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx_big = np.where(((data[(0, 0, :, :)] * mask_anulus_big) == 1)) printc(' computing azimuthal averages ', color=bcolors.OKGREEN) centers = np.zeros((2, 6)) radius = np.zeros(6) ints = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_rad = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_syn = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit_pars = np.zeros((6, 5)) factor = np.zeros((6, 4)) mean_intensity = np.zeros((6, 4)) for i in range((zd // 4)): dummy_data = np.mean(data[(i, :, :, :)], axis=0) (centers[(1, i)], centers[(0, i)], radius[i]) = find_center(dummy_data) (intensity, rad) = azimutal_average(dummy_data, [centers[(0, i)], centers[(1, i)]]) ints[(i, 0:len(intensity))] = intensity ints_rad[(i, 0:len(intensity))] = rad rrange = int((radius[i] + 2)) clv = ints[(i, 0:rrange)] clv_r = ints_rad[(i, 0:rrange)] mu = np.sqrt((1 - ((clv_r ** 2) / (clv_r[(- 1)] ** 2)))) if (verbose and only_one_vorbose): plt.plot(clv_r, clv) plt.xlabel('Solar radious [pixel]') plt.ylabel('Intensity [DN]') plt.show() u = 0.5 I0 = 100 ande = np.where((mu > 0.1)) pars = newton(clv[ande], mu[ande], [I0, u, 0.2, 0.2, 0.2], limb_darkening) (fit, _) = limb_darkening(mu, pars) ints_fit[(i, 0:len(fit))] = fit ints_fit_pars[(i, :)] = pars ints_syn[(i, :)] = ints[(i, :)] ints_syn[(i, 0:len(fit))] = fit ints_syn[(i, :)] = (ints_syn[(i, :)] / ints_fit_pars[i][0]) ints_fit[(i, :)] = (ints_fit[(i, :)] / ints_fit_pars[i][0]) ints[(i, :)] = (ints[(i, :)] / ints_fit_pars[i][0]) nc = (((PXEND2 - PXBEG2) + 1) // 2) limb_2d = np.zeros((((PXEND2 - PXBEG2) + 1), ((PXEND1 - PXBEG1) + 1))) s_of_gh = int((radius[i] * 1.1)) limb_2d[((nc - s_of_gh):((nc + s_of_gh) + 1), (nc - s_of_gh):((nc + s_of_gh) + 1))] = genera_2d(ints_syn[(i, 0:s_of_gh)]) (xl, yl) = limb_2d.shape limb_2d = gaussian_filter(limb_2d, sigma=(8, 8)) limb_2d = shift_subp(limb_2d, shift=[(centers[(1, i)] - (yd // 2)), (centers[(0, i)] - (xd // 2))]) if (verbose and only_one_vorbose): plib.show_one(limb_2d, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='limb 2D', cbarlabel=' ', cmap='gray') reflection = shift(limb_2d, shift=(sh[0], sh[1]), fill_value=0) if (verbose and only_one_vorbose): plib.show_one(reflection, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='reflection', cbarlabel=' ', cmap='gray') for j in range(4): dummy = data[(i, j, :, :)] mean_intensity[(i, j)] = np.mean(dummy[idx_big]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(i, j)] = (((m_r - m_l) * 100.0) / ints_fit_pars[i][0]) print('factor', factor[(i, j)]) if (verbose and only_one_vorbose): plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(((factor[(i, j)] * ints_fit_pars[i][0]) / 100.0), lw=2, color='green', alpha=0.4) plt.show() data[(i, j, :, :)] = (data[(i, j, :, :)] - (((reflection * factor[(i, j)]) / 100.0) * ints_fit_pars[i][0])) if (verbose and only_one_vorbose): plib.show_two(datap[(i, j, :, :)], data[(i, j, :, :)], vmin=[0, 0], vmax=[1, 1], block=True, pause=0.1, title=['Before', 'After'], xlabel='Pixel', ylabel='Pixel') plt.plot(datap[(0, 0, 0:200, 200)]) plt.plot(data[(0, 0, 0:200, 200)]) plt.ylim([0, 5]) plt.show() plt.plot(datap[(0, 0, 200, 0:200)]) plt.plot(data[(0, 0, 200, 0:200)]) plt.ylim([0, 5]) plt.show() only_one_vorbose = 0 if ('CAL_GHST' in header): header['CAL_GHST'] = version else: header.set('CAL_GHST', version, 'ghost correction version py module (phifdt_pipe_modules.py)', after='CAL_DARK') return (data, header)<|docstring|>Startup version on Jun 2021<|endoftext|>
95af2547edb0b1b948412228bee82b6976f9583a3b0ee79d0265e977244be91c
def phi_correct_ghost_dm(data, header, rad, verbose=False): '\n Startup version on Jun 2021\n ' version = 'phi_correct_ghost_dm V1.0 Sep 2021 - appied to demodulated images' only_one_vorbose = 1 center = np.array([header['CRPIX1'], header['CRPIX2']]).astype(int) printc(' Read center from header (updated): x=', center[0], ' y=', center[1], color=bcolors.OKBLUE) xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) PXBEG1 = (int(header['PXBEG1']) - 1) PXEND1 = (int(header['PXEND1']) - 1) PXBEG2 = (int(header['PXBEG2']) - 1) PXEND2 = (int(header['PXEND2']) - 1) if (verbose and only_one_vorbose): datap = np.copy(data) printc('-->>>>>>> Correcting ghost image ', color=bcolors.OKGREEN) coef = [(- 1.98787669), 1945.28944245] center_c = np.copy(center) center_c[0] += PXBEG1 center_c[1] += PXBEG2 poly1d_fn = np.poly1d(coef) sh = poly1d_fn(center_c).astype(int) sh_float = poly1d_fn(center_c) printc(' image center: x: ', center[0], ' y: ', center[1], color=bcolors.OKGREEN) printc(' image center [for 2048]: x: ', center_c[0], ' y: ', center_c[1], color=bcolors.OKGREEN) printc(' ghost displacements: x: ', sh_float[0], ' y: ', sh_float[1], color=bcolors.OKGREEN) mask_anulus = bin_annulus([yd, xd], (rad - 20), 10, full=False) mask_anulus = shift(mask_anulus, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx = np.where((mask_anulus == 1)) printc(' computing azimuthal averages ', color=bcolors.OKGREEN) centers = np.zeros((2, 6)) radius = np.zeros(6) ints = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_rad = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_syn = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit_pars = np.zeros((6, 5)) factor = np.zeros((6, 4)) mean_intensity = np.zeros((6, 4)) dummy = data[(0, 1, :, :)] mean_intensity[(0, 1)] = np.mean(dummy[idx]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(0, 1)] = (m_r - m_l) print('factor', factor[(0, 1)]) plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(factor[(0, 1)], lw=2, color='green', alpha=0.4) plt.show() stop for i in range((zd // 4)): dummy_data = np.mean(data[(i, :, :, :)], axis=0) (centers[(1, i)], centers[(0, i)], radius[i]) = find_center(dummy_data) (intensity, rad) = azimutal_average(dummy_data, [centers[(0, i)], centers[(1, i)]]) ints[(i, 0:len(intensity))] = intensity ints_rad[(i, 0:len(intensity))] = rad rrange = int((radius[i] + 2)) clv = ints[(i, 0:rrange)] clv_r = ints_rad[(i, 0:rrange)] mu = np.sqrt((1 - ((clv_r ** 2) / (clv_r[(- 1)] ** 2)))) if (verbose and only_one_vorbose): plt.plot(clv_r, clv) plt.xlabel('Solar radious [pixel]') plt.ylabel('Intensity [DN]') plt.show() u = 0.5 I0 = 100 ande = np.where((mu > 0.1)) pars = newton(clv[ande], mu[ande], [I0, u, 0.2, 0.2, 0.2], limb_darkening) (fit, _) = limb_darkening(mu, pars) ints_fit[(i, 0:len(fit))] = fit ints_fit_pars[(i, :)] = pars ints_syn[(i, :)] = ints[(i, :)] ints_syn[(i, 0:len(fit))] = fit ints_syn[(i, :)] = (ints_syn[(i, :)] / ints_fit_pars[i][0]) ints_fit[(i, :)] = (ints_fit[(i, :)] / ints_fit_pars[i][0]) ints[(i, :)] = (ints[(i, :)] / ints_fit_pars[i][0]) nc = (((PXEND2 - PXBEG2) + 1) // 2) limb_2d = np.zeros((((PXEND2 - PXBEG2) + 1), ((PXEND1 - PXBEG1) + 1))) s_of_gh = int((radius[i] * 1.1)) limb_2d[((nc - s_of_gh):((nc + s_of_gh) + 1), (nc - s_of_gh):((nc + s_of_gh) + 1))] = genera_2d(ints_syn[(i, 0:s_of_gh)]) (xl, yl) = limb_2d.shape limb_2d = shift_subp(limb_2d, shift=[(centers[(1, i)] - (yd // 2)), (centers[(0, i)] - (xd // 2))]) if (verbose and only_one_vorbose): plib.show_one(limb_2d, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='limb 2D', cbarlabel=' ', cmap='gray') reflection = shift(limb_2d, shift=(sh[0], sh[1]), fill_value=0) if (verbose and only_one_vorbose): plib.show_one(reflection, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='reflection', cbarlabel=' ', cmap='gray') for j in range(4): dummy = data[(i, j, :, :)] mean_intensity[(i, j)] = np.mean(dummy[idx_big]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(i, j)] = (((m_r - m_l) * 100.0) / ints_fit_pars[i][0]) print('factor', factor[(i, j)]) if (verbose and only_one_vorbose): plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(((factor[(i, j)] * ints_fit_pars[i][0]) / 100.0), lw=2, color='green', alpha=0.4) plt.show() data[(i, j, :, :)] = (data[(i, j, :, :)] - (((reflection * factor[(i, j)]) / 100.0) * ints_fit_pars[i][0])) if (verbose and only_one_vorbose): plib.show_two(datap[(i, j, :, :)], data[(i, j, :, :)], vmin=[0, 0], vmax=[1, 1], block=True, pause=0.1, title=['Before', 'After'], xlabel='Pixel', ylabel='Pixel') plt.plot(datap[(0, 0, 0:200, 200)]) plt.plot(data[(0, 0, 0:200, 200)]) plt.ylim([0, 5]) plt.show() plt.plot(datap[(0, 0, 200, 0:200)]) plt.plot(data[(0, 0, 200, 0:200)]) plt.ylim([0, 5]) plt.show() only_one_vorbose = 1 stop if ('CAL_GHST' in header): header['CAL_GHST'] = version else: header.set('CAL_GHST', version, 'ghost correction version py module (phifdt_pipe_modules.py)', after='CAL_DARK') return (data, header)
Startup version on Jun 2021
SPGPylibs/PHItools/phifdt_pipe_modules.py
phi_correct_ghost_dm
vivivum/SPGPylibs
3
python
def phi_correct_ghost_dm(data, header, rad, verbose=False): '\n \n ' version = 'phi_correct_ghost_dm V1.0 Sep 2021 - appied to demodulated images' only_one_vorbose = 1 center = np.array([header['CRPIX1'], header['CRPIX2']]).astype(int) printc(' Read center from header (updated): x=', center[0], ' y=', center[1], color=bcolors.OKBLUE) xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) PXBEG1 = (int(header['PXBEG1']) - 1) PXEND1 = (int(header['PXEND1']) - 1) PXBEG2 = (int(header['PXBEG2']) - 1) PXEND2 = (int(header['PXEND2']) - 1) if (verbose and only_one_vorbose): datap = np.copy(data) printc('-->>>>>>> Correcting ghost image ', color=bcolors.OKGREEN) coef = [(- 1.98787669), 1945.28944245] center_c = np.copy(center) center_c[0] += PXBEG1 center_c[1] += PXBEG2 poly1d_fn = np.poly1d(coef) sh = poly1d_fn(center_c).astype(int) sh_float = poly1d_fn(center_c) printc(' image center: x: ', center[0], ' y: ', center[1], color=bcolors.OKGREEN) printc(' image center [for 2048]: x: ', center_c[0], ' y: ', center_c[1], color=bcolors.OKGREEN) printc(' ghost displacements: x: ', sh_float[0], ' y: ', sh_float[1], color=bcolors.OKGREEN) mask_anulus = bin_annulus([yd, xd], (rad - 20), 10, full=False) mask_anulus = shift(mask_anulus, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx = np.where((mask_anulus == 1)) printc(' computing azimuthal averages ', color=bcolors.OKGREEN) centers = np.zeros((2, 6)) radius = np.zeros(6) ints = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_rad = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_syn = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit_pars = np.zeros((6, 5)) factor = np.zeros((6, 4)) mean_intensity = np.zeros((6, 4)) dummy = data[(0, 1, :, :)] mean_intensity[(0, 1)] = np.mean(dummy[idx]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(0, 1)] = (m_r - m_l) print('factor', factor[(0, 1)]) plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(factor[(0, 1)], lw=2, color='green', alpha=0.4) plt.show() stop for i in range((zd // 4)): dummy_data = np.mean(data[(i, :, :, :)], axis=0) (centers[(1, i)], centers[(0, i)], radius[i]) = find_center(dummy_data) (intensity, rad) = azimutal_average(dummy_data, [centers[(0, i)], centers[(1, i)]]) ints[(i, 0:len(intensity))] = intensity ints_rad[(i, 0:len(intensity))] = rad rrange = int((radius[i] + 2)) clv = ints[(i, 0:rrange)] clv_r = ints_rad[(i, 0:rrange)] mu = np.sqrt((1 - ((clv_r ** 2) / (clv_r[(- 1)] ** 2)))) if (verbose and only_one_vorbose): plt.plot(clv_r, clv) plt.xlabel('Solar radious [pixel]') plt.ylabel('Intensity [DN]') plt.show() u = 0.5 I0 = 100 ande = np.where((mu > 0.1)) pars = newton(clv[ande], mu[ande], [I0, u, 0.2, 0.2, 0.2], limb_darkening) (fit, _) = limb_darkening(mu, pars) ints_fit[(i, 0:len(fit))] = fit ints_fit_pars[(i, :)] = pars ints_syn[(i, :)] = ints[(i, :)] ints_syn[(i, 0:len(fit))] = fit ints_syn[(i, :)] = (ints_syn[(i, :)] / ints_fit_pars[i][0]) ints_fit[(i, :)] = (ints_fit[(i, :)] / ints_fit_pars[i][0]) ints[(i, :)] = (ints[(i, :)] / ints_fit_pars[i][0]) nc = (((PXEND2 - PXBEG2) + 1) // 2) limb_2d = np.zeros((((PXEND2 - PXBEG2) + 1), ((PXEND1 - PXBEG1) + 1))) s_of_gh = int((radius[i] * 1.1)) limb_2d[((nc - s_of_gh):((nc + s_of_gh) + 1), (nc - s_of_gh):((nc + s_of_gh) + 1))] = genera_2d(ints_syn[(i, 0:s_of_gh)]) (xl, yl) = limb_2d.shape limb_2d = shift_subp(limb_2d, shift=[(centers[(1, i)] - (yd // 2)), (centers[(0, i)] - (xd // 2))]) if (verbose and only_one_vorbose): plib.show_one(limb_2d, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='limb 2D', cbarlabel=' ', cmap='gray') reflection = shift(limb_2d, shift=(sh[0], sh[1]), fill_value=0) if (verbose and only_one_vorbose): plib.show_one(reflection, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='reflection', cbarlabel=' ', cmap='gray') for j in range(4): dummy = data[(i, j, :, :)] mean_intensity[(i, j)] = np.mean(dummy[idx_big]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(i, j)] = (((m_r - m_l) * 100.0) / ints_fit_pars[i][0]) print('factor', factor[(i, j)]) if (verbose and only_one_vorbose): plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(((factor[(i, j)] * ints_fit_pars[i][0]) / 100.0), lw=2, color='green', alpha=0.4) plt.show() data[(i, j, :, :)] = (data[(i, j, :, :)] - (((reflection * factor[(i, j)]) / 100.0) * ints_fit_pars[i][0])) if (verbose and only_one_vorbose): plib.show_two(datap[(i, j, :, :)], data[(i, j, :, :)], vmin=[0, 0], vmax=[1, 1], block=True, pause=0.1, title=['Before', 'After'], xlabel='Pixel', ylabel='Pixel') plt.plot(datap[(0, 0, 0:200, 200)]) plt.plot(data[(0, 0, 0:200, 200)]) plt.ylim([0, 5]) plt.show() plt.plot(datap[(0, 0, 200, 0:200)]) plt.plot(data[(0, 0, 200, 0:200)]) plt.ylim([0, 5]) plt.show() only_one_vorbose = 1 stop if ('CAL_GHST' in header): header['CAL_GHST'] = version else: header.set('CAL_GHST', version, 'ghost correction version py module (phifdt_pipe_modules.py)', after='CAL_DARK') return (data, header)
def phi_correct_ghost_dm(data, header, rad, verbose=False): '\n \n ' version = 'phi_correct_ghost_dm V1.0 Sep 2021 - appied to demodulated images' only_one_vorbose = 1 center = np.array([header['CRPIX1'], header['CRPIX2']]).astype(int) printc(' Read center from header (updated): x=', center[0], ' y=', center[1], color=bcolors.OKBLUE) xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) PXBEG1 = (int(header['PXBEG1']) - 1) PXEND1 = (int(header['PXEND1']) - 1) PXBEG2 = (int(header['PXBEG2']) - 1) PXEND2 = (int(header['PXEND2']) - 1) if (verbose and only_one_vorbose): datap = np.copy(data) printc('-->>>>>>> Correcting ghost image ', color=bcolors.OKGREEN) coef = [(- 1.98787669), 1945.28944245] center_c = np.copy(center) center_c[0] += PXBEG1 center_c[1] += PXBEG2 poly1d_fn = np.poly1d(coef) sh = poly1d_fn(center_c).astype(int) sh_float = poly1d_fn(center_c) printc(' image center: x: ', center[0], ' y: ', center[1], color=bcolors.OKGREEN) printc(' image center [for 2048]: x: ', center_c[0], ' y: ', center_c[1], color=bcolors.OKGREEN) printc(' ghost displacements: x: ', sh_float[0], ' y: ', sh_float[1], color=bcolors.OKGREEN) mask_anulus = bin_annulus([yd, xd], (rad - 20), 10, full=False) mask_anulus = shift(mask_anulus, shift=((center[0] - (xd // 2)), (center[1] - (yd // 2))), fill_value=0) idx = np.where((mask_anulus == 1)) printc(' computing azimuthal averages ', color=bcolors.OKGREEN) centers = np.zeros((2, 6)) radius = np.zeros(6) ints = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_rad = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_syn = np.zeros((6, int(np.sqrt(((xd ** 2) + (yd ** 2)))))) ints_fit_pars = np.zeros((6, 5)) factor = np.zeros((6, 4)) mean_intensity = np.zeros((6, 4)) dummy = data[(0, 1, :, :)] mean_intensity[(0, 1)] = np.mean(dummy[idx]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(0, 1)] = (m_r - m_l) print('factor', factor[(0, 1)]) plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(factor[(0, 1)], lw=2, color='green', alpha=0.4) plt.show() stop for i in range((zd // 4)): dummy_data = np.mean(data[(i, :, :, :)], axis=0) (centers[(1, i)], centers[(0, i)], radius[i]) = find_center(dummy_data) (intensity, rad) = azimutal_average(dummy_data, [centers[(0, i)], centers[(1, i)]]) ints[(i, 0:len(intensity))] = intensity ints_rad[(i, 0:len(intensity))] = rad rrange = int((radius[i] + 2)) clv = ints[(i, 0:rrange)] clv_r = ints_rad[(i, 0:rrange)] mu = np.sqrt((1 - ((clv_r ** 2) / (clv_r[(- 1)] ** 2)))) if (verbose and only_one_vorbose): plt.plot(clv_r, clv) plt.xlabel('Solar radious [pixel]') plt.ylabel('Intensity [DN]') plt.show() u = 0.5 I0 = 100 ande = np.where((mu > 0.1)) pars = newton(clv[ande], mu[ande], [I0, u, 0.2, 0.2, 0.2], limb_darkening) (fit, _) = limb_darkening(mu, pars) ints_fit[(i, 0:len(fit))] = fit ints_fit_pars[(i, :)] = pars ints_syn[(i, :)] = ints[(i, :)] ints_syn[(i, 0:len(fit))] = fit ints_syn[(i, :)] = (ints_syn[(i, :)] / ints_fit_pars[i][0]) ints_fit[(i, :)] = (ints_fit[(i, :)] / ints_fit_pars[i][0]) ints[(i, :)] = (ints[(i, :)] / ints_fit_pars[i][0]) nc = (((PXEND2 - PXBEG2) + 1) // 2) limb_2d = np.zeros((((PXEND2 - PXBEG2) + 1), ((PXEND1 - PXBEG1) + 1))) s_of_gh = int((radius[i] * 1.1)) limb_2d[((nc - s_of_gh):((nc + s_of_gh) + 1), (nc - s_of_gh):((nc + s_of_gh) + 1))] = genera_2d(ints_syn[(i, 0:s_of_gh)]) (xl, yl) = limb_2d.shape limb_2d = shift_subp(limb_2d, shift=[(centers[(1, i)] - (yd // 2)), (centers[(0, i)] - (xd // 2))]) if (verbose and only_one_vorbose): plib.show_one(limb_2d, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='limb 2D', cbarlabel=' ', cmap='gray') reflection = shift(limb_2d, shift=(sh[0], sh[1]), fill_value=0) if (verbose and only_one_vorbose): plib.show_one(reflection, vmax=1, vmin=0, xlabel='pixel', ylabel='pixel', title='reflection', cbarlabel=' ', cmap='gray') for j in range(4): dummy = data[(i, j, :, :)] mean_intensity[(i, j)] = np.mean(dummy[idx_big]) values = dummy[idx].flatten() meanv = np.mean(values) idx_l = np.where((values <= meanv)) m_l = np.mean(values[idx_l]) idx_r = np.where((values >= meanv)) m_r = np.mean(values[idx_r]) factor[(i, j)] = (((m_r - m_l) * 100.0) / ints_fit_pars[i][0]) print('factor', factor[(i, j)]) if (verbose and only_one_vorbose): plt.hist(values, bins=40) plt.title('signal') plt.axvline(meanv, lw=2, color='yellow', alpha=0.4) plt.axvline(m_l, lw=2, color='red', alpha=0.4) plt.axvline(m_r, lw=2, color='blue', alpha=0.4) plt.axvline(((factor[(i, j)] * ints_fit_pars[i][0]) / 100.0), lw=2, color='green', alpha=0.4) plt.show() data[(i, j, :, :)] = (data[(i, j, :, :)] - (((reflection * factor[(i, j)]) / 100.0) * ints_fit_pars[i][0])) if (verbose and only_one_vorbose): plib.show_two(datap[(i, j, :, :)], data[(i, j, :, :)], vmin=[0, 0], vmax=[1, 1], block=True, pause=0.1, title=['Before', 'After'], xlabel='Pixel', ylabel='Pixel') plt.plot(datap[(0, 0, 0:200, 200)]) plt.plot(data[(0, 0, 0:200, 200)]) plt.ylim([0, 5]) plt.show() plt.plot(datap[(0, 0, 200, 0:200)]) plt.plot(data[(0, 0, 200, 0:200)]) plt.ylim([0, 5]) plt.show() only_one_vorbose = 1 stop if ('CAL_GHST' in header): header['CAL_GHST'] = version else: header.set('CAL_GHST', version, 'ghost correction version py module (phifdt_pipe_modules.py)', after='CAL_DARK') return (data, header)<|docstring|>Startup version on Jun 2021<|endoftext|>
a2b290f1bb2d6b9959fc00382c1f0d768b261dc264562cbd8ad8f455668a43c0
def phi_correct_fringes(data, header, option, verbose=False): '\n Startup version on Jun 2021\n ' version = 'phi_correct_fringes V1.0 Jun 2021' xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) if (option == 'auto'): printc('-->>>>>>> Looking for fringes and removing them --', color=bcolors.OKGREEN) freq_x = np.zeros(((zd // 4), 3, 50)) freq_y = np.zeros(((zd // 4), 3, 50)) freq_x2 = np.zeros(((zd // 4), 3, 50)) freq_y2 = np.zeros(((zd // 4), 3, 50)) rad_min = 10 rad_max = 30 wsize = 50 wbin = 1 win_halfw = 2 win = apod(((win_halfw * 2) + 1), 0.6) (x, y) = np.ogrid[(0:((win_halfw * 2) + 1), 0:((win_halfw * 2) + 1))] level_theshold = [1.5, 1.5, 2] plt.ion() for i in range((zd // 4)): for j in np.arange(1, 4): print('Wavelengh ', i, ' pol state: ', j) data_fringes = rebin(data[(i, j, :, :)], [(yd // wbin), (xd // wbin)]) F = np.fft.fft2(data_fringes) F = np.fft.fftshift(F) h = F.shape[0] w = F.shape[1] power2d = np.log10(np.abs((F * np.conj(F)).astype(np.float))) power2d = gaussian_filter(power2d, sigma=(1, 1)) im = power2d[(((w // 2) - wsize):(((w // 2) + wsize) + 1), ((h // 2) - wsize):(((h // 2) + wsize) + 1))] imc = im[(2:(- 2), 2:(- 2))] minimum = np.min(imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))]) mean = np.mean((imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))] - minimum)) rms = np.std(imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))]) stack = ((((((((((((((((((((((((im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), (- 2)])[(2:(- 2), 2:(- 2))]) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 2])[(2:(- 2), 2:(- 2))])) idx = np.where((stack == 1)) sm = imc.shape plt.imshow(imc) if (len(idx[0]) > 0): loop = 0 for idx_i in range(len(idx[0])): if ((imc[(idx[0][idx_i], idx[1][idx_i])] - minimum) > (level_theshold[(j - 1)] * mean)): if ((np.abs(np.sqrt((((idx[0][idx_i] - (sm[0] // 2)) ** 2) + ((idx[1][idx_i] - (sm[1] // 2)) ** 2)))) > rad_min) and (np.abs(np.sqrt((((idx[0][idx_i] - (sm[0] // 2)) ** 2) + ((idx[1][idx_i] - (sm[1] // 2)) ** 2)))) < rad_max)): plt.plot(idx[1][idx_i], idx[0][idx_i], 'og', markersize=3) subm = imc[((idx[0][idx_i] - win_halfw):((idx[0][idx_i] + win_halfw) + 1), (idx[1][idx_i] - win_halfw):((idx[1][idx_i] + win_halfw) + 1))] if np.max((subm < 0)): subm = (1 - subm) (height, xcoor, ycoor, width_x, width_y) = moments(subm) freq_x2[(i, (j - 1), loop)] = (((((idx[0][idx_i] - win_halfw) + xcoor) - wsize) + 2) / h) freq_y2[(i, (j - 1), loop)] = (((((idx[1][idx_i] - win_halfw) + ycoor) - wsize) + 2) / w) freq_x[(i, (j - 1), loop)] = (((idx[0][idx_i] - wsize) + 2) / h) freq_y[(i, (j - 1), loop)] = (((idx[1][idx_i] - wsize) + 2) / w) f_gauss = (1 - np.exp((- ((((x - xcoor) ** 2) / (2 * ((width_x * 3) ** 2))) + (((y - ycoor) ** 2) / (2 * ((width_y * 3) ** 2))))))) F[(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) - win_halfw):(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) + win_halfw) + 1), ((idx[1][idx_i] + (((w // 2) - wsize) + 2)) - win_halfw):(((idx[1][idx_i] + (((w // 2) - wsize) + 2)) + win_halfw) + 1))] *= f_gauss power2d[(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) - win_halfw):(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) + win_halfw) + 1), ((idx[1][idx_i] + (((w // 2) - wsize) + 2)) - win_halfw):(((idx[1][idx_i] + (((w // 2) - wsize) + 2)) + win_halfw) + 1))] *= f_gauss print(freq_x[(i, (j - 1), loop)], freq_y[(i, (j - 1), loop)]) print(i, j, (level_theshold[(j - 1)] * mean), ((3.0 * level_theshold[(j - 1)]) * mean), rms, (3 * rms), (imc[(idx[0][idx_i], idx[1][idx_i])] - minimum), freq_x[(i, (j - 1), loop)], freq_y[(i, (j - 1), loop)]) loop += 1 plt.colorbar() plt.show(block=True) plt.pause(1) plt.clf() dum = np.copy(data_fringes) data_fringes = np.fft.ifft2(np.fft.fftshift(F)).astype(np.float) data[(i, j, :, :)] = np.fft.ifft2(np.fft.fftshift(F)).astype(np.float) plt.ioff() for i in range((zd // 4)): for j in np.arange(1, 3): print(i, j, freq_y[(i, j, :6)], freq_x[(i, j, :6)]) if ('CAL_FRIN' in header): header['CAL_FRIN'] = version else: header.set('CAL_FRIN', version, 'Fringe correction ( name+version of py module if True )', after='CAL_DARK') elif (option == 'manual'): printc('-->>>>>>> Removing fringes with fixed freq. --', color=bcolors.OKGREEN) printc(' ', version, '--', color=bcolors.OKGREEN) printc('Freq. updated on 11-August-2021 (H. Strecker and D. Orozco Suarez', color=bcolors.WARNING) freq_x_Q = np.array([0.01318359375, 0.01318359375]) freq_y_Q = np.array([0.001953125, 0.00732421875]) freq_x_U = np.array([0.01318359375, 0.01318359375]) freq_y_U = np.array([0.001953125, 0.00732421875]) freq_x_V = np.array([0.01318359375, 0.01318359375, 0.009765625, 0.0078125]) freq_y_V = np.array([0.001953125, 0.00732421875, 0.00830078125, 0.0107421875]) px_x_Q = (freq_x_Q * xd) px_y_Q = (freq_y_Q * yd) px_x_U = (freq_x_U * xd) px_y_U = (freq_y_U * yd) px_x_V = (freq_x_V * xd) px_y_V = (freq_y_V * yd) printc(px_x_Q, (xd - px_x_Q), color=bcolors.OKBLUE) printc(px_x_Q, (xd - px_x_Q).astype(int), color=bcolors.OKBLUE) px_x_Q = np.append(px_x_Q, ((xd - px_x_Q) - 1)).astype(int) px_y_Q = np.append(px_y_Q, ((yd - px_y_Q) - 1)).astype(int) px_x_U = np.append(px_x_U, ((xd - px_x_U) - 1)).astype(int) px_y_U = np.append(px_y_U, ((yd - px_y_U) - 1)).astype(int) px_x_V = np.append(px_x_V, ((xd - px_x_V) - 1)).astype(int) px_y_V = np.append(px_y_V, ((yd - px_y_V) - 1)).astype(int) wsize = 50 win_halfw = 2 printc('freq_x_Q [f,px] ', freq_x_Q, px_x_Q, color=bcolors.OKBLUE) printc('freq_y_Q [f,px] ', freq_y_Q, px_y_Q, color=bcolors.OKBLUE) printc('freq_x_U [f,px] ', freq_x_U, px_x_U, color=bcolors.OKBLUE) printc('freq_y_U [f,px] ', freq_y_U, px_y_U, color=bcolors.OKBLUE) printc('freq_x_V [f,px] ', freq_x_V, px_x_V, color=bcolors.OKBLUE) printc('freq_y_V [f,px] ', freq_y_V, px_y_V, color=bcolors.OKBLUE) printc('win_halfw ', win_halfw, color=bcolors.OKBLUE) mask_QUV = np.ones((3, yd, xd)) (maski, coords) = generate_circular_mask([(2 * win_halfw), (2 * win_halfw)], win_halfw, win_halfw) print(maski) print(KeyboardInterrupt) for k in range(len(px_x_Q)): print(k, (px_y_Q[k] - win_halfw), ((px_y_Q[k] + win_halfw) + 1), (px_x_Q[k] - win_halfw), ((px_x_Q[k] + win_halfw) + 1)) mask_QUV[(0, (px_y_Q[k] - win_halfw):((px_y_Q[k] + win_halfw) + 1), (px_x_Q[k] - win_halfw):((px_x_Q[k] + win_halfw) + 1))] *= (1 - maski) for k in range(len(px_x_U)): mask_QUV[(1, (px_y_U[k] - win_halfw):((px_y_U[k] + win_halfw) + 1), (px_x_U[k] - win_halfw):((px_x_U[k] + win_halfw) + 1))] *= (1 - maski) for k in range(len(px_x_V)): mask_QUV[(2, (px_y_V[k] - win_halfw):((px_y_V[k] + win_halfw) + 1), (px_x_V[k] - win_halfw):((px_x_V[k] + win_halfw) + 1))] *= (1 - maski) for i in range((zd // 4)): for j in np.arange(1, 4): F = np.fft.fft2(data[(i, j, :, :)]) F *= mask_QUV[((j - 1), :, :)] data[(i, j, :, :)] = np.fft.ifft2(F) if ('CAL_FRIN' in header): header['CAL_FRIN'] = version else: header.set('CAL_FRIN', version, 'Fringe correction ( name+version of py module if True )', after='CAL_DARK') else: print('No fringe correction') return (data, header) return (data, header)
Startup version on Jun 2021
SPGPylibs/PHItools/phifdt_pipe_modules.py
phi_correct_fringes
vivivum/SPGPylibs
3
python
def phi_correct_fringes(data, header, option, verbose=False): '\n \n ' version = 'phi_correct_fringes V1.0 Jun 2021' xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) if (option == 'auto'): printc('-->>>>>>> Looking for fringes and removing them --', color=bcolors.OKGREEN) freq_x = np.zeros(((zd // 4), 3, 50)) freq_y = np.zeros(((zd // 4), 3, 50)) freq_x2 = np.zeros(((zd // 4), 3, 50)) freq_y2 = np.zeros(((zd // 4), 3, 50)) rad_min = 10 rad_max = 30 wsize = 50 wbin = 1 win_halfw = 2 win = apod(((win_halfw * 2) + 1), 0.6) (x, y) = np.ogrid[(0:((win_halfw * 2) + 1), 0:((win_halfw * 2) + 1))] level_theshold = [1.5, 1.5, 2] plt.ion() for i in range((zd // 4)): for j in np.arange(1, 4): print('Wavelengh ', i, ' pol state: ', j) data_fringes = rebin(data[(i, j, :, :)], [(yd // wbin), (xd // wbin)]) F = np.fft.fft2(data_fringes) F = np.fft.fftshift(F) h = F.shape[0] w = F.shape[1] power2d = np.log10(np.abs((F * np.conj(F)).astype(np.float))) power2d = gaussian_filter(power2d, sigma=(1, 1)) im = power2d[(((w // 2) - wsize):(((w // 2) + wsize) + 1), ((h // 2) - wsize):(((h // 2) + wsize) + 1))] imc = im[(2:(- 2), 2:(- 2))] minimum = np.min(imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))]) mean = np.mean((imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))] - minimum)) rms = np.std(imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))]) stack = ((((((((((((((((((((((((im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), (- 2)])[(2:(- 2), 2:(- 2))]) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 2])[(2:(- 2), 2:(- 2))])) idx = np.where((stack == 1)) sm = imc.shape plt.imshow(imc) if (len(idx[0]) > 0): loop = 0 for idx_i in range(len(idx[0])): if ((imc[(idx[0][idx_i], idx[1][idx_i])] - minimum) > (level_theshold[(j - 1)] * mean)): if ((np.abs(np.sqrt((((idx[0][idx_i] - (sm[0] // 2)) ** 2) + ((idx[1][idx_i] - (sm[1] // 2)) ** 2)))) > rad_min) and (np.abs(np.sqrt((((idx[0][idx_i] - (sm[0] // 2)) ** 2) + ((idx[1][idx_i] - (sm[1] // 2)) ** 2)))) < rad_max)): plt.plot(idx[1][idx_i], idx[0][idx_i], 'og', markersize=3) subm = imc[((idx[0][idx_i] - win_halfw):((idx[0][idx_i] + win_halfw) + 1), (idx[1][idx_i] - win_halfw):((idx[1][idx_i] + win_halfw) + 1))] if np.max((subm < 0)): subm = (1 - subm) (height, xcoor, ycoor, width_x, width_y) = moments(subm) freq_x2[(i, (j - 1), loop)] = (((((idx[0][idx_i] - win_halfw) + xcoor) - wsize) + 2) / h) freq_y2[(i, (j - 1), loop)] = (((((idx[1][idx_i] - win_halfw) + ycoor) - wsize) + 2) / w) freq_x[(i, (j - 1), loop)] = (((idx[0][idx_i] - wsize) + 2) / h) freq_y[(i, (j - 1), loop)] = (((idx[1][idx_i] - wsize) + 2) / w) f_gauss = (1 - np.exp((- ((((x - xcoor) ** 2) / (2 * ((width_x * 3) ** 2))) + (((y - ycoor) ** 2) / (2 * ((width_y * 3) ** 2))))))) F[(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) - win_halfw):(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) + win_halfw) + 1), ((idx[1][idx_i] + (((w // 2) - wsize) + 2)) - win_halfw):(((idx[1][idx_i] + (((w // 2) - wsize) + 2)) + win_halfw) + 1))] *= f_gauss power2d[(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) - win_halfw):(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) + win_halfw) + 1), ((idx[1][idx_i] + (((w // 2) - wsize) + 2)) - win_halfw):(((idx[1][idx_i] + (((w // 2) - wsize) + 2)) + win_halfw) + 1))] *= f_gauss print(freq_x[(i, (j - 1), loop)], freq_y[(i, (j - 1), loop)]) print(i, j, (level_theshold[(j - 1)] * mean), ((3.0 * level_theshold[(j - 1)]) * mean), rms, (3 * rms), (imc[(idx[0][idx_i], idx[1][idx_i])] - minimum), freq_x[(i, (j - 1), loop)], freq_y[(i, (j - 1), loop)]) loop += 1 plt.colorbar() plt.show(block=True) plt.pause(1) plt.clf() dum = np.copy(data_fringes) data_fringes = np.fft.ifft2(np.fft.fftshift(F)).astype(np.float) data[(i, j, :, :)] = np.fft.ifft2(np.fft.fftshift(F)).astype(np.float) plt.ioff() for i in range((zd // 4)): for j in np.arange(1, 3): print(i, j, freq_y[(i, j, :6)], freq_x[(i, j, :6)]) if ('CAL_FRIN' in header): header['CAL_FRIN'] = version else: header.set('CAL_FRIN', version, 'Fringe correction ( name+version of py module if True )', after='CAL_DARK') elif (option == 'manual'): printc('-->>>>>>> Removing fringes with fixed freq. --', color=bcolors.OKGREEN) printc(' ', version, '--', color=bcolors.OKGREEN) printc('Freq. updated on 11-August-2021 (H. Strecker and D. Orozco Suarez', color=bcolors.WARNING) freq_x_Q = np.array([0.01318359375, 0.01318359375]) freq_y_Q = np.array([0.001953125, 0.00732421875]) freq_x_U = np.array([0.01318359375, 0.01318359375]) freq_y_U = np.array([0.001953125, 0.00732421875]) freq_x_V = np.array([0.01318359375, 0.01318359375, 0.009765625, 0.0078125]) freq_y_V = np.array([0.001953125, 0.00732421875, 0.00830078125, 0.0107421875]) px_x_Q = (freq_x_Q * xd) px_y_Q = (freq_y_Q * yd) px_x_U = (freq_x_U * xd) px_y_U = (freq_y_U * yd) px_x_V = (freq_x_V * xd) px_y_V = (freq_y_V * yd) printc(px_x_Q, (xd - px_x_Q), color=bcolors.OKBLUE) printc(px_x_Q, (xd - px_x_Q).astype(int), color=bcolors.OKBLUE) px_x_Q = np.append(px_x_Q, ((xd - px_x_Q) - 1)).astype(int) px_y_Q = np.append(px_y_Q, ((yd - px_y_Q) - 1)).astype(int) px_x_U = np.append(px_x_U, ((xd - px_x_U) - 1)).astype(int) px_y_U = np.append(px_y_U, ((yd - px_y_U) - 1)).astype(int) px_x_V = np.append(px_x_V, ((xd - px_x_V) - 1)).astype(int) px_y_V = np.append(px_y_V, ((yd - px_y_V) - 1)).astype(int) wsize = 50 win_halfw = 2 printc('freq_x_Q [f,px] ', freq_x_Q, px_x_Q, color=bcolors.OKBLUE) printc('freq_y_Q [f,px] ', freq_y_Q, px_y_Q, color=bcolors.OKBLUE) printc('freq_x_U [f,px] ', freq_x_U, px_x_U, color=bcolors.OKBLUE) printc('freq_y_U [f,px] ', freq_y_U, px_y_U, color=bcolors.OKBLUE) printc('freq_x_V [f,px] ', freq_x_V, px_x_V, color=bcolors.OKBLUE) printc('freq_y_V [f,px] ', freq_y_V, px_y_V, color=bcolors.OKBLUE) printc('win_halfw ', win_halfw, color=bcolors.OKBLUE) mask_QUV = np.ones((3, yd, xd)) (maski, coords) = generate_circular_mask([(2 * win_halfw), (2 * win_halfw)], win_halfw, win_halfw) print(maski) print(KeyboardInterrupt) for k in range(len(px_x_Q)): print(k, (px_y_Q[k] - win_halfw), ((px_y_Q[k] + win_halfw) + 1), (px_x_Q[k] - win_halfw), ((px_x_Q[k] + win_halfw) + 1)) mask_QUV[(0, (px_y_Q[k] - win_halfw):((px_y_Q[k] + win_halfw) + 1), (px_x_Q[k] - win_halfw):((px_x_Q[k] + win_halfw) + 1))] *= (1 - maski) for k in range(len(px_x_U)): mask_QUV[(1, (px_y_U[k] - win_halfw):((px_y_U[k] + win_halfw) + 1), (px_x_U[k] - win_halfw):((px_x_U[k] + win_halfw) + 1))] *= (1 - maski) for k in range(len(px_x_V)): mask_QUV[(2, (px_y_V[k] - win_halfw):((px_y_V[k] + win_halfw) + 1), (px_x_V[k] - win_halfw):((px_x_V[k] + win_halfw) + 1))] *= (1 - maski) for i in range((zd // 4)): for j in np.arange(1, 4): F = np.fft.fft2(data[(i, j, :, :)]) F *= mask_QUV[((j - 1), :, :)] data[(i, j, :, :)] = np.fft.ifft2(F) if ('CAL_FRIN' in header): header['CAL_FRIN'] = version else: header.set('CAL_FRIN', version, 'Fringe correction ( name+version of py module if True )', after='CAL_DARK') else: print('No fringe correction') return (data, header) return (data, header)
def phi_correct_fringes(data, header, option, verbose=False): '\n \n ' version = 'phi_correct_fringes V1.0 Jun 2021' xd = int(header['NAXIS1']) yd = int(header['NAXIS2']) zd = int(header['NAXIS3']) if (option == 'auto'): printc('-->>>>>>> Looking for fringes and removing them --', color=bcolors.OKGREEN) freq_x = np.zeros(((zd // 4), 3, 50)) freq_y = np.zeros(((zd // 4), 3, 50)) freq_x2 = np.zeros(((zd // 4), 3, 50)) freq_y2 = np.zeros(((zd // 4), 3, 50)) rad_min = 10 rad_max = 30 wsize = 50 wbin = 1 win_halfw = 2 win = apod(((win_halfw * 2) + 1), 0.6) (x, y) = np.ogrid[(0:((win_halfw * 2) + 1), 0:((win_halfw * 2) + 1))] level_theshold = [1.5, 1.5, 2] plt.ion() for i in range((zd // 4)): for j in np.arange(1, 4): print('Wavelengh ', i, ' pol state: ', j) data_fringes = rebin(data[(i, j, :, :)], [(yd // wbin), (xd // wbin)]) F = np.fft.fft2(data_fringes) F = np.fft.fftshift(F) h = F.shape[0] w = F.shape[1] power2d = np.log10(np.abs((F * np.conj(F)).astype(np.float))) power2d = gaussian_filter(power2d, sigma=(1, 1)) im = power2d[(((w // 2) - wsize):(((w // 2) + wsize) + 1), ((h // 2) - wsize):(((h // 2) + wsize) + 1))] imc = im[(2:(- 2), 2:(- 2))] minimum = np.min(imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))]) mean = np.mean((imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))] - minimum)) rms = np.std(imc[((wsize - rad_max):((wsize + rad_max) + 1), (wsize - rad_max):((wsize + rad_max) + 1))]) stack = ((((((((((((((((((((((((im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), (- 2)])[(2:(- 2), 2:(- 2))]) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 2), 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [(- 1), 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [0, 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [1, 2])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, (- 2)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, (- 1)])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 0])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 1])[(2:(- 2), 2:(- 2))])) * (im[(2:(- 2), 2:(- 2))] > shift(im, [2, 2])[(2:(- 2), 2:(- 2))])) idx = np.where((stack == 1)) sm = imc.shape plt.imshow(imc) if (len(idx[0]) > 0): loop = 0 for idx_i in range(len(idx[0])): if ((imc[(idx[0][idx_i], idx[1][idx_i])] - minimum) > (level_theshold[(j - 1)] * mean)): if ((np.abs(np.sqrt((((idx[0][idx_i] - (sm[0] // 2)) ** 2) + ((idx[1][idx_i] - (sm[1] // 2)) ** 2)))) > rad_min) and (np.abs(np.sqrt((((idx[0][idx_i] - (sm[0] // 2)) ** 2) + ((idx[1][idx_i] - (sm[1] // 2)) ** 2)))) < rad_max)): plt.plot(idx[1][idx_i], idx[0][idx_i], 'og', markersize=3) subm = imc[((idx[0][idx_i] - win_halfw):((idx[0][idx_i] + win_halfw) + 1), (idx[1][idx_i] - win_halfw):((idx[1][idx_i] + win_halfw) + 1))] if np.max((subm < 0)): subm = (1 - subm) (height, xcoor, ycoor, width_x, width_y) = moments(subm) freq_x2[(i, (j - 1), loop)] = (((((idx[0][idx_i] - win_halfw) + xcoor) - wsize) + 2) / h) freq_y2[(i, (j - 1), loop)] = (((((idx[1][idx_i] - win_halfw) + ycoor) - wsize) + 2) / w) freq_x[(i, (j - 1), loop)] = (((idx[0][idx_i] - wsize) + 2) / h) freq_y[(i, (j - 1), loop)] = (((idx[1][idx_i] - wsize) + 2) / w) f_gauss = (1 - np.exp((- ((((x - xcoor) ** 2) / (2 * ((width_x * 3) ** 2))) + (((y - ycoor) ** 2) / (2 * ((width_y * 3) ** 2))))))) F[(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) - win_halfw):(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) + win_halfw) + 1), ((idx[1][idx_i] + (((w // 2) - wsize) + 2)) - win_halfw):(((idx[1][idx_i] + (((w // 2) - wsize) + 2)) + win_halfw) + 1))] *= f_gauss power2d[(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) - win_halfw):(((idx[0][idx_i] + (((h // 2) - wsize) + 2)) + win_halfw) + 1), ((idx[1][idx_i] + (((w // 2) - wsize) + 2)) - win_halfw):(((idx[1][idx_i] + (((w // 2) - wsize) + 2)) + win_halfw) + 1))] *= f_gauss print(freq_x[(i, (j - 1), loop)], freq_y[(i, (j - 1), loop)]) print(i, j, (level_theshold[(j - 1)] * mean), ((3.0 * level_theshold[(j - 1)]) * mean), rms, (3 * rms), (imc[(idx[0][idx_i], idx[1][idx_i])] - minimum), freq_x[(i, (j - 1), loop)], freq_y[(i, (j - 1), loop)]) loop += 1 plt.colorbar() plt.show(block=True) plt.pause(1) plt.clf() dum = np.copy(data_fringes) data_fringes = np.fft.ifft2(np.fft.fftshift(F)).astype(np.float) data[(i, j, :, :)] = np.fft.ifft2(np.fft.fftshift(F)).astype(np.float) plt.ioff() for i in range((zd // 4)): for j in np.arange(1, 3): print(i, j, freq_y[(i, j, :6)], freq_x[(i, j, :6)]) if ('CAL_FRIN' in header): header['CAL_FRIN'] = version else: header.set('CAL_FRIN', version, 'Fringe correction ( name+version of py module if True )', after='CAL_DARK') elif (option == 'manual'): printc('-->>>>>>> Removing fringes with fixed freq. --', color=bcolors.OKGREEN) printc(' ', version, '--', color=bcolors.OKGREEN) printc('Freq. updated on 11-August-2021 (H. Strecker and D. Orozco Suarez', color=bcolors.WARNING) freq_x_Q = np.array([0.01318359375, 0.01318359375]) freq_y_Q = np.array([0.001953125, 0.00732421875]) freq_x_U = np.array([0.01318359375, 0.01318359375]) freq_y_U = np.array([0.001953125, 0.00732421875]) freq_x_V = np.array([0.01318359375, 0.01318359375, 0.009765625, 0.0078125]) freq_y_V = np.array([0.001953125, 0.00732421875, 0.00830078125, 0.0107421875]) px_x_Q = (freq_x_Q * xd) px_y_Q = (freq_y_Q * yd) px_x_U = (freq_x_U * xd) px_y_U = (freq_y_U * yd) px_x_V = (freq_x_V * xd) px_y_V = (freq_y_V * yd) printc(px_x_Q, (xd - px_x_Q), color=bcolors.OKBLUE) printc(px_x_Q, (xd - px_x_Q).astype(int), color=bcolors.OKBLUE) px_x_Q = np.append(px_x_Q, ((xd - px_x_Q) - 1)).astype(int) px_y_Q = np.append(px_y_Q, ((yd - px_y_Q) - 1)).astype(int) px_x_U = np.append(px_x_U, ((xd - px_x_U) - 1)).astype(int) px_y_U = np.append(px_y_U, ((yd - px_y_U) - 1)).astype(int) px_x_V = np.append(px_x_V, ((xd - px_x_V) - 1)).astype(int) px_y_V = np.append(px_y_V, ((yd - px_y_V) - 1)).astype(int) wsize = 50 win_halfw = 2 printc('freq_x_Q [f,px] ', freq_x_Q, px_x_Q, color=bcolors.OKBLUE) printc('freq_y_Q [f,px] ', freq_y_Q, px_y_Q, color=bcolors.OKBLUE) printc('freq_x_U [f,px] ', freq_x_U, px_x_U, color=bcolors.OKBLUE) printc('freq_y_U [f,px] ', freq_y_U, px_y_U, color=bcolors.OKBLUE) printc('freq_x_V [f,px] ', freq_x_V, px_x_V, color=bcolors.OKBLUE) printc('freq_y_V [f,px] ', freq_y_V, px_y_V, color=bcolors.OKBLUE) printc('win_halfw ', win_halfw, color=bcolors.OKBLUE) mask_QUV = np.ones((3, yd, xd)) (maski, coords) = generate_circular_mask([(2 * win_halfw), (2 * win_halfw)], win_halfw, win_halfw) print(maski) print(KeyboardInterrupt) for k in range(len(px_x_Q)): print(k, (px_y_Q[k] - win_halfw), ((px_y_Q[k] + win_halfw) + 1), (px_x_Q[k] - win_halfw), ((px_x_Q[k] + win_halfw) + 1)) mask_QUV[(0, (px_y_Q[k] - win_halfw):((px_y_Q[k] + win_halfw) + 1), (px_x_Q[k] - win_halfw):((px_x_Q[k] + win_halfw) + 1))] *= (1 - maski) for k in range(len(px_x_U)): mask_QUV[(1, (px_y_U[k] - win_halfw):((px_y_U[k] + win_halfw) + 1), (px_x_U[k] - win_halfw):((px_x_U[k] + win_halfw) + 1))] *= (1 - maski) for k in range(len(px_x_V)): mask_QUV[(2, (px_y_V[k] - win_halfw):((px_y_V[k] + win_halfw) + 1), (px_x_V[k] - win_halfw):((px_x_V[k] + win_halfw) + 1))] *= (1 - maski) for i in range((zd // 4)): for j in np.arange(1, 4): F = np.fft.fft2(data[(i, j, :, :)]) F *= mask_QUV[((j - 1), :, :)] data[(i, j, :, :)] = np.fft.ifft2(F) if ('CAL_FRIN' in header): header['CAL_FRIN'] = version else: header.set('CAL_FRIN', version, 'Fringe correction ( name+version of py module if True )', after='CAL_DARK') else: print('No fringe correction') return (data, header) return (data, header)<|docstring|>Startup version on Jun 2021<|endoftext|>
c724089a9125140d93c2a55469a2f5ab5632969958f347c57f310c13cbc1f48c
def read_yaml(self, encoding='utf-8'): '读取yaml数据' with open(self.file, encoding=encoding) as f: ret = yaml.safe_load(f.read()) return ret
读取yaml数据
utils/yaml_wrapper.py
read_yaml
xdr940/TSLa
0
python
def read_yaml(self, encoding='utf-8'): with open(self.file, encoding=encoding) as f: ret = yaml.safe_load(f.read()) return ret
def read_yaml(self, encoding='utf-8'): with open(self.file, encoding=encoding) as f: ret = yaml.safe_load(f.read()) return ret<|docstring|>读取yaml数据<|endoftext|>
a76ddd269b3a58a904da56a2d78c0c21b53ad836fee447431776828dad111230
def write_yaml(self, data, encoding='utf-8'): '向yaml文件写入数据' with open(self.file, encoding=encoding, mode='w') as f: return yaml.safe_dump(data, stream=f, sort_keys=False, default_flow_style=False)
向yaml文件写入数据
utils/yaml_wrapper.py
write_yaml
xdr940/TSLa
0
python
def write_yaml(self, data, encoding='utf-8'): with open(self.file, encoding=encoding, mode='w') as f: return yaml.safe_dump(data, stream=f, sort_keys=False, default_flow_style=False)
def write_yaml(self, data, encoding='utf-8'): with open(self.file, encoding=encoding, mode='w') as f: return yaml.safe_dump(data, stream=f, sort_keys=False, default_flow_style=False)<|docstring|>向yaml文件写入数据<|endoftext|>
bc431126979ef7f8deb4f43b267364ccf0819afee84052d401b7cd29b1cf6028
@staticmethod def __calculate_line_degree(pt1, pt2): '\n Calculate the line degree(the angle between the line and the x axis)\n :param pt1: start point of the line\n :param pt2: end point of the line\n :return: the degree of the line\n ' if ((pt1[0] - pt2[0]) != 0): curlineangle = math.atan(((pt2[1] - pt1[1]) / (pt2[0] - pt1[0]))) if (curlineangle < 0): curlineangle += math.pi else: curlineangle = (math.pi / 2.0) return ((curlineangle * 180.0) / math.pi)
Calculate the line degree(the angle between the line and the x axis) :param pt1: start point of the line :param pt2: end point of the line :return: the degree of the line
Extract_line_candidates/binarized_filter_result.py
__calculate_line_degree
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
@staticmethod def __calculate_line_degree(pt1, pt2): '\n Calculate the line degree(the angle between the line and the x axis)\n :param pt1: start point of the line\n :param pt2: end point of the line\n :return: the degree of the line\n ' if ((pt1[0] - pt2[0]) != 0): curlineangle = math.atan(((pt2[1] - pt1[1]) / (pt2[0] - pt1[0]))) if (curlineangle < 0): curlineangle += math.pi else: curlineangle = (math.pi / 2.0) return ((curlineangle * 180.0) / math.pi)
@staticmethod def __calculate_line_degree(pt1, pt2): '\n Calculate the line degree(the angle between the line and the x axis)\n :param pt1: start point of the line\n :param pt2: end point of the line\n :return: the degree of the line\n ' if ((pt1[0] - pt2[0]) != 0): curlineangle = math.atan(((pt2[1] - pt1[1]) / (pt2[0] - pt1[0]))) if (curlineangle < 0): curlineangle += math.pi else: curlineangle = (math.pi / 2.0) return ((curlineangle * 180.0) / math.pi)<|docstring|>Calculate the line degree(the angle between the line and the x axis) :param pt1: start point of the line :param pt2: end point of the line :return: the degree of the line<|endoftext|>
d5be389d7153eecdf0e0596ef62ad553ee5d681efcea6bffee98565ca0e5518d
@staticmethod def __get_rrect_degree(_rrect): '\n Calculate the rotate degree of the rotate rect(angle between the longer side of the rotate rect and the x axis)\n :param _rrect: Rotate degree\n :return:\n ' points = cv2.boxPoints(box=_rrect) firstline_length = (math.pow((points[1][0] - points[0][0]), 2) + math.pow((points[1][1] - points[0][1]), 2)) secondline_length = (math.pow((points[2][0] - points[1][0]), 2) + math.pow((points[2][1] - points[1][1]), 2)) if (firstline_length > secondline_length): return FilterBinarizer.__calculate_line_degree(points[0], points[1]) else: return FilterBinarizer.__calculate_line_degree(points[2], points[1])
Calculate the rotate degree of the rotate rect(angle between the longer side of the rotate rect and the x axis) :param _rrect: Rotate degree :return:
Extract_line_candidates/binarized_filter_result.py
__get_rrect_degree
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
@staticmethod def __get_rrect_degree(_rrect): '\n Calculate the rotate degree of the rotate rect(angle between the longer side of the rotate rect and the x axis)\n :param _rrect: Rotate degree\n :return:\n ' points = cv2.boxPoints(box=_rrect) firstline_length = (math.pow((points[1][0] - points[0][0]), 2) + math.pow((points[1][1] - points[0][1]), 2)) secondline_length = (math.pow((points[2][0] - points[1][0]), 2) + math.pow((points[2][1] - points[1][1]), 2)) if (firstline_length > secondline_length): return FilterBinarizer.__calculate_line_degree(points[0], points[1]) else: return FilterBinarizer.__calculate_line_degree(points[2], points[1])
@staticmethod def __get_rrect_degree(_rrect): '\n Calculate the rotate degree of the rotate rect(angle between the longer side of the rotate rect and the x axis)\n :param _rrect: Rotate degree\n :return:\n ' points = cv2.boxPoints(box=_rrect) firstline_length = (math.pow((points[1][0] - points[0][0]), 2) + math.pow((points[1][1] - points[0][1]), 2)) secondline_length = (math.pow((points[2][0] - points[1][0]), 2) + math.pow((points[2][1] - points[1][1]), 2)) if (firstline_length > secondline_length): return FilterBinarizer.__calculate_line_degree(points[0], points[1]) else: return FilterBinarizer.__calculate_line_degree(points[2], points[1])<|docstring|>Calculate the rotate degree of the rotate rect(angle between the longer side of the rotate rect and the x axis) :param _rrect: Rotate degree :return:<|endoftext|>
fdbcd7e0b1145de8bdaf8a3bdb18031d181bb6943c830ef1b8f73899d70671f7
@staticmethod def __get_rrect_area(_rrect): '\n Get the area of the rotate rect\n :param _rrect:\n :return:\n ' points = cv2.boxPoints(box=_rrect) firstline_length = math.sqrt((math.pow((points[1][0] - points[0][0]), 2) + math.pow((points[1][1] - points[0][1]), 2))) secondline_length = math.sqrt((math.pow((points[2][0] - points[1][0]), 2) + math.pow((points[2][1] - points[1][1]), 2))) return (firstline_length * secondline_length)
Get the area of the rotate rect :param _rrect: :return:
Extract_line_candidates/binarized_filter_result.py
__get_rrect_area
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
@staticmethod def __get_rrect_area(_rrect): '\n Get the area of the rotate rect\n :param _rrect:\n :return:\n ' points = cv2.boxPoints(box=_rrect) firstline_length = math.sqrt((math.pow((points[1][0] - points[0][0]), 2) + math.pow((points[1][1] - points[0][1]), 2))) secondline_length = math.sqrt((math.pow((points[2][0] - points[1][0]), 2) + math.pow((points[2][1] - points[1][1]), 2))) return (firstline_length * secondline_length)
@staticmethod def __get_rrect_area(_rrect): '\n Get the area of the rotate rect\n :param _rrect:\n :return:\n ' points = cv2.boxPoints(box=_rrect) firstline_length = math.sqrt((math.pow((points[1][0] - points[0][0]), 2) + math.pow((points[1][1] - points[0][1]), 2))) secondline_length = math.sqrt((math.pow((points[2][0] - points[1][0]), 2) + math.pow((points[2][1] - points[1][1]), 2))) return (firstline_length * secondline_length)<|docstring|>Get the area of the rotate rect :param _rrect: :return:<|endoftext|>
f3aa75b7fdeeb79aed394b6be5a46814b5ca0072977ae84465e2a7969d23def7
@staticmethod def __is_rrect_valid(rrect): '\n Thresh the invalid rotate rect through the angle and area\n :param rrect:\n :return:\n ' rrect_angle = FilterBinarizer.__get_rrect_degree(rrect) if ((rrect_angle < 45) or (rrect_angle > 135)): return False rrect_area = FilterBinarizer.__get_rrect_area(rrect) if (rrect_area < (12 * 12)): return False return True
Thresh the invalid rotate rect through the angle and area :param rrect: :return:
Extract_line_candidates/binarized_filter_result.py
__is_rrect_valid
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
@staticmethod def __is_rrect_valid(rrect): '\n Thresh the invalid rotate rect through the angle and area\n :param rrect:\n :return:\n ' rrect_angle = FilterBinarizer.__get_rrect_degree(rrect) if ((rrect_angle < 45) or (rrect_angle > 135)): return False rrect_area = FilterBinarizer.__get_rrect_area(rrect) if (rrect_area < (12 * 12)): return False return True
@staticmethod def __is_rrect_valid(rrect): '\n Thresh the invalid rotate rect through the angle and area\n :param rrect:\n :return:\n ' rrect_angle = FilterBinarizer.__get_rrect_degree(rrect) if ((rrect_angle < 45) or (rrect_angle > 135)): return False rrect_area = FilterBinarizer.__get_rrect_area(rrect) if (rrect_area < (12 * 12)): return False return True<|docstring|>Thresh the invalid rotate rect through the angle and area :param rrect: :return:<|endoftext|>
210914dce398f4793dc6dabe56bc09bd7de507fe4d7d962fc30b3901ef236287
def __map_roi_to_front_view(self, roidb): "\n Map the roidb to the front view image through perspective mapping function\n :param roidb: top view roidb\n :return: front view roidb , if the converted front view roidb's bndbox or contours is invalid (mainly because the\n mapped points on the front view image may be out of the image boundry) the return false as the roi flag to show this\n roi is a invalid roi that can't compose a roi pair\n " top_roi_index = roidb.get_roi_index() top_roi_contours = roidb.get_roi_contours() top_roi_response_points = roidb.get_roi_response_points() roidb_is_valid = True fv_roi_contours = [] fv_roi_response_points = [] transformer = inverse_perspective_map.PerspectiveTransformer(_cfg=self.__cfg) for (index, point) in enumerate(top_roi_contours): pt1 = [(point[0] + self.__start_x), (point[1] + self.__start_y)] fv_point = transformer.perspective_point(pt1=pt1) if ((fv_point[0] < 0) or (fv_point[0] >= self.__warpped_image_width) or (fv_point[1] < 0) or (fv_point[1] >= self.__warpped_image_height)): roidb_is_valid = False break fv_roi_contours.append(fv_point) for (index, point) in enumerate(top_roi_response_points): pt1 = [(point[0] + self.__start_x), (point[1] + self.__start_y)] fv_point = transformer.perspective_point(pt1=pt1) if ((fv_point[0] < 0) or (fv_point[0] >= self.__warpped_image_width) or (fv_point[1] < 0) or (fv_point[1] >= self.__warpped_image_height)): roidb_is_valid = False break fv_roi_response_points.append(fv_point) fv_roi_contours = np.array(fv_roi_contours) fv_roi_response_points = np.array(fv_roi_contours) fv_roi = imdb.Roidb(roi_index=top_roi_index, roi_contours=fv_roi_contours, roi_response_points=fv_roi_response_points) return (fv_roi, roidb_is_valid)
Map the roidb to the front view image through perspective mapping function :param roidb: top view roidb :return: front view roidb , if the converted front view roidb's bndbox or contours is invalid (mainly because the mapped points on the front view image may be out of the image boundry) the return false as the roi flag to show this roi is a invalid roi that can't compose a roi pair
Extract_line_candidates/binarized_filter_result.py
__map_roi_to_front_view
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
def __map_roi_to_front_view(self, roidb): "\n Map the roidb to the front view image through perspective mapping function\n :param roidb: top view roidb\n :return: front view roidb , if the converted front view roidb's bndbox or contours is invalid (mainly because the\n mapped points on the front view image may be out of the image boundry) the return false as the roi flag to show this\n roi is a invalid roi that can't compose a roi pair\n " top_roi_index = roidb.get_roi_index() top_roi_contours = roidb.get_roi_contours() top_roi_response_points = roidb.get_roi_response_points() roidb_is_valid = True fv_roi_contours = [] fv_roi_response_points = [] transformer = inverse_perspective_map.PerspectiveTransformer(_cfg=self.__cfg) for (index, point) in enumerate(top_roi_contours): pt1 = [(point[0] + self.__start_x), (point[1] + self.__start_y)] fv_point = transformer.perspective_point(pt1=pt1) if ((fv_point[0] < 0) or (fv_point[0] >= self.__warpped_image_width) or (fv_point[1] < 0) or (fv_point[1] >= self.__warpped_image_height)): roidb_is_valid = False break fv_roi_contours.append(fv_point) for (index, point) in enumerate(top_roi_response_points): pt1 = [(point[0] + self.__start_x), (point[1] + self.__start_y)] fv_point = transformer.perspective_point(pt1=pt1) if ((fv_point[0] < 0) or (fv_point[0] >= self.__warpped_image_width) or (fv_point[1] < 0) or (fv_point[1] >= self.__warpped_image_height)): roidb_is_valid = False break fv_roi_response_points.append(fv_point) fv_roi_contours = np.array(fv_roi_contours) fv_roi_response_points = np.array(fv_roi_contours) fv_roi = imdb.Roidb(roi_index=top_roi_index, roi_contours=fv_roi_contours, roi_response_points=fv_roi_response_points) return (fv_roi, roidb_is_valid)
def __map_roi_to_front_view(self, roidb): "\n Map the roidb to the front view image through perspective mapping function\n :param roidb: top view roidb\n :return: front view roidb , if the converted front view roidb's bndbox or contours is invalid (mainly because the\n mapped points on the front view image may be out of the image boundry) the return false as the roi flag to show this\n roi is a invalid roi that can't compose a roi pair\n " top_roi_index = roidb.get_roi_index() top_roi_contours = roidb.get_roi_contours() top_roi_response_points = roidb.get_roi_response_points() roidb_is_valid = True fv_roi_contours = [] fv_roi_response_points = [] transformer = inverse_perspective_map.PerspectiveTransformer(_cfg=self.__cfg) for (index, point) in enumerate(top_roi_contours): pt1 = [(point[0] + self.__start_x), (point[1] + self.__start_y)] fv_point = transformer.perspective_point(pt1=pt1) if ((fv_point[0] < 0) or (fv_point[0] >= self.__warpped_image_width) or (fv_point[1] < 0) or (fv_point[1] >= self.__warpped_image_height)): roidb_is_valid = False break fv_roi_contours.append(fv_point) for (index, point) in enumerate(top_roi_response_points): pt1 = [(point[0] + self.__start_x), (point[1] + self.__start_y)] fv_point = transformer.perspective_point(pt1=pt1) if ((fv_point[0] < 0) or (fv_point[0] >= self.__warpped_image_width) or (fv_point[1] < 0) or (fv_point[1] >= self.__warpped_image_height)): roidb_is_valid = False break fv_roi_response_points.append(fv_point) fv_roi_contours = np.array(fv_roi_contours) fv_roi_response_points = np.array(fv_roi_contours) fv_roi = imdb.Roidb(roi_index=top_roi_index, roi_contours=fv_roi_contours, roi_response_points=fv_roi_response_points) return (fv_roi, roidb_is_valid)<|docstring|>Map the roidb to the front view image through perspective mapping function :param roidb: top view roidb :return: front view roidb , if the converted front view roidb's bndbox or contours is invalid (mainly because the mapped points on the front view image may be out of the image boundry) the return false as the roi flag to show this roi is a invalid roi that can't compose a roi pair<|endoftext|>
e0ff2d8e5df6801b1c7bfbcc4b284e824e7cacf96ecf399e013825cfca2ea1c1
@staticmethod def __find_response_points_in_contours(contours, image): "\n find responding points in contours' bndbox and responding points are those points with value 255 in the\n OTSU result of weight hat like filtered image\n :param contours:\n :param image: OTSU threshold image\n :return:\n " assert (len(contours) > 0) result = [] for (index, contour) in enumerate(contours): bndbox = cv2.boundingRect(contour) roi = image[(bndbox[1]:(bndbox[1] + bndbox[3]), bndbox[0]:(bndbox[0] + bndbox[2]))] response_points = np.vstack((np.where((np.array(roi) == 255))[1], np.where((np.array(roi) == 255))[0])).T response_points[(:, 0)] += bndbox[0] response_points[(:, 1)] += bndbox[1] result.append(response_points) return np.array(result)
find responding points in contours' bndbox and responding points are those points with value 255 in the OTSU result of weight hat like filtered image :param contours: :param image: OTSU threshold image :return:
Extract_line_candidates/binarized_filter_result.py
__find_response_points_in_contours
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
@staticmethod def __find_response_points_in_contours(contours, image): "\n find responding points in contours' bndbox and responding points are those points with value 255 in the\n OTSU result of weight hat like filtered image\n :param contours:\n :param image: OTSU threshold image\n :return:\n " assert (len(contours) > 0) result = [] for (index, contour) in enumerate(contours): bndbox = cv2.boundingRect(contour) roi = image[(bndbox[1]:(bndbox[1] + bndbox[3]), bndbox[0]:(bndbox[0] + bndbox[2]))] response_points = np.vstack((np.where((np.array(roi) == 255))[1], np.where((np.array(roi) == 255))[0])).T response_points[(:, 0)] += bndbox[0] response_points[(:, 1)] += bndbox[1] result.append(response_points) return np.array(result)
@staticmethod def __find_response_points_in_contours(contours, image): "\n find responding points in contours' bndbox and responding points are those points with value 255 in the\n OTSU result of weight hat like filtered image\n :param contours:\n :param image: OTSU threshold image\n :return:\n " assert (len(contours) > 0) result = [] for (index, contour) in enumerate(contours): bndbox = cv2.boundingRect(contour) roi = image[(bndbox[1]:(bndbox[1] + bndbox[3]), bndbox[0]:(bndbox[0] + bndbox[2]))] response_points = np.vstack((np.where((np.array(roi) == 255))[1], np.where((np.array(roi) == 255))[0])).T response_points[(:, 0)] += bndbox[0] response_points[(:, 1)] += bndbox[1] result.append(response_points) return np.array(result)<|docstring|>find responding points in contours' bndbox and responding points are those points with value 255 in the OTSU result of weight hat like filtered image :param contours: :param image: OTSU threshold image :return:<|endoftext|>
20e7cbf2e181f0a7adf691071c18875347b6e387d3667aee4b2ce8f26cdaced8
def binarized_whatlike_filtered_image(self, img): '\n Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate\n :param img: input image\n :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py\n ' if (img is None): raise ValueError('Image data is invalid') image = img[(:, :, 0)] inds = np.where((image[(:, :)] > 650)) norm_thresh_img = np.zeros(image.shape).astype(np.uint8) norm_thresh_img[inds] = 255 (image, contours, hierarchy) = cv2.findContours(image=norm_thresh_img, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_TC89_KCOS) response_points = self.__find_response_points_in_contours(contours=contours, image=norm_thresh_img) result = [] valid_contours = 0 for (index, contour) in enumerate(contours): rotrect = cv2.minAreaRect(contour) if self.__is_rrect_valid(rotrect): roi_contours = contour roi_contours = np.reshape(roi_contours, newshape=(roi_contours.shape[0], roi_contours.shape[2])) roi_index = valid_contours valid_contours += 1 top_roi_db = imdb.Roidb(roi_index=roi_index, roi_contours=roi_contours, roi_response_points=response_points[index]) (fv_roi_db, roi_is_valid) = self.__map_roi_to_front_view(roidb=top_roi_db) if roi_is_valid: result.append((top_roi_db, fv_roi_db)) return (result, norm_thresh_img)
Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate :param img: input image :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py
Extract_line_candidates/binarized_filter_result.py
binarized_whatlike_filtered_image
MaybeShewill-CV/DVCNN_Lane_Detection
19
python
def binarized_whatlike_filtered_image(self, img): '\n Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate\n :param img: input image\n :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py\n ' if (img is None): raise ValueError('Image data is invalid') image = img[(:, :, 0)] inds = np.where((image[(:, :)] > 650)) norm_thresh_img = np.zeros(image.shape).astype(np.uint8) norm_thresh_img[inds] = 255 (image, contours, hierarchy) = cv2.findContours(image=norm_thresh_img, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_TC89_KCOS) response_points = self.__find_response_points_in_contours(contours=contours, image=norm_thresh_img) result = [] valid_contours = 0 for (index, contour) in enumerate(contours): rotrect = cv2.minAreaRect(contour) if self.__is_rrect_valid(rotrect): roi_contours = contour roi_contours = np.reshape(roi_contours, newshape=(roi_contours.shape[0], roi_contours.shape[2])) roi_index = valid_contours valid_contours += 1 top_roi_db = imdb.Roidb(roi_index=roi_index, roi_contours=roi_contours, roi_response_points=response_points[index]) (fv_roi_db, roi_is_valid) = self.__map_roi_to_front_view(roidb=top_roi_db) if roi_is_valid: result.append((top_roi_db, fv_roi_db)) return (result, norm_thresh_img)
def binarized_whatlike_filtered_image(self, img): '\n Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate\n :param img: input image\n :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py\n ' if (img is None): raise ValueError('Image data is invalid') image = img[(:, :, 0)] inds = np.where((image[(:, :)] > 650)) norm_thresh_img = np.zeros(image.shape).astype(np.uint8) norm_thresh_img[inds] = 255 (image, contours, hierarchy) = cv2.findContours(image=norm_thresh_img, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_TC89_KCOS) response_points = self.__find_response_points_in_contours(contours=contours, image=norm_thresh_img) result = [] valid_contours = 0 for (index, contour) in enumerate(contours): rotrect = cv2.minAreaRect(contour) if self.__is_rrect_valid(rotrect): roi_contours = contour roi_contours = np.reshape(roi_contours, newshape=(roi_contours.shape[0], roi_contours.shape[2])) roi_index = valid_contours valid_contours += 1 top_roi_db = imdb.Roidb(roi_index=roi_index, roi_contours=roi_contours, roi_response_points=response_points[index]) (fv_roi_db, roi_is_valid) = self.__map_roi_to_front_view(roidb=top_roi_db) if roi_is_valid: result.append((top_roi_db, fv_roi_db)) return (result, norm_thresh_img)<|docstring|>Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate :param img: input image :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py<|endoftext|>
71cb72370ecf16400b3ba8ee64e72393567b83214c540f783a06fda5408fb193
def next_states(self, state, action): '\n Returns a list of possible next environment states\n :rtype: list\n ' raise NotImplementedError
Returns a list of possible next environment states :rtype: list
source/environments.py
next_states
treszkai/pandor
1
python
def next_states(self, state, action): '\n Returns a list of possible next environment states\n :rtype: list\n ' raise NotImplementedError
def next_states(self, state, action): '\n Returns a list of possible next environment states\n :rtype: list\n ' raise NotImplementedError<|docstring|>Returns a list of possible next environment states :rtype: list<|endoftext|>
2973e2050eb4035bb54fbdca3ac6c4bc20057ec427f622400657b5a66e4c98ff
@property def init_states_p(self): 'Initial belief distribution\n A list of states and their probabilities\n Either init_states_p() or init_states() must be overwritten.\n ' sl_0 = self.init_states p_0 = (1.0 / len(sl_0)) return [(s_0, p_0) for s_0 in self.init_states]
Initial belief distribution A list of states and their probabilities Either init_states_p() or init_states() must be overwritten.
source/environments.py
init_states_p
treszkai/pandor
1
python
@property def init_states_p(self): 'Initial belief distribution\n A list of states and their probabilities\n Either init_states_p() or init_states() must be overwritten.\n ' sl_0 = self.init_states p_0 = (1.0 / len(sl_0)) return [(s_0, p_0) for s_0 in self.init_states]
@property def init_states_p(self): 'Initial belief distribution\n A list of states and their probabilities\n Either init_states_p() or init_states() must be overwritten.\n ' sl_0 = self.init_states p_0 = (1.0 / len(sl_0)) return [(s_0, p_0) for s_0 in self.init_states]<|docstring|>Initial belief distribution A list of states and their probabilities Either init_states_p() or init_states() must be overwritten.<|endoftext|>
9fab1e0c97de4782d4b18c9ae7c794c1908ee578d916fdc99fbc8362d04ca6f1
def next_states_p(self, state, action): '\n Returns a list of possible next environment states and their transition probabilities\n\n :rtype: list(state, probability)\n ' raise NotImplementedError
Returns a list of possible next environment states and their transition probabilities :rtype: list(state, probability)
source/environments.py
next_states_p
treszkai/pandor
1
python
def next_states_p(self, state, action): '\n Returns a list of possible next environment states and their transition probabilities\n\n :rtype: list(state, probability)\n ' raise NotImplementedError
def next_states_p(self, state, action): '\n Returns a list of possible next environment states and their transition probabilities\n\n :rtype: list(state, probability)\n ' raise NotImplementedError<|docstring|>Returns a list of possible next environment states and their transition probabilities :rtype: list(state, probability)<|endoftext|>
0ffc73677266e2591964f4c9603179ad6b4238027debb71c74421c81173a04fe
def isValidBST(self, root): '\n :type root: TreeNode\n :rtype: bool\n ' INT_MIN = (- (1 << 61)) INT_MAX = (1 << (61 - 1)) return validate(root, INT_MIN, INT_MAX)
:type root: TreeNode :rtype: bool
crack-data-structures-and-algorithms/leetcode/validate_binary_search_tree_q98.py
isValidBST
Watch-Later/Eureka
20
python
def isValidBST(self, root): '\n :type root: TreeNode\n :rtype: bool\n ' INT_MIN = (- (1 << 61)) INT_MAX = (1 << (61 - 1)) return validate(root, INT_MIN, INT_MAX)
def isValidBST(self, root): '\n :type root: TreeNode\n :rtype: bool\n ' INT_MIN = (- (1 << 61)) INT_MAX = (1 << (61 - 1)) return validate(root, INT_MIN, INT_MAX)<|docstring|>:type root: TreeNode :rtype: bool<|endoftext|>
97a8aecb9a76c77a3873a7f905678ba3692aec73f16615532878bd2a36bd0fda
def load_yaml_args(parser: HyperOptArgumentParser, log): ' Function that load the args defined in a YAML file and replaces the values\n parsed by the HyperOptArgumentParser ' old_args = vars(parser.parse_args()) configs = old_args.get('config') if configs: yaml_file = yaml.load(open(configs).read(), Loader=yaml.FullLoader) for (key, value) in yaml_file.items(): if (key in old_args): old_args[key] = value else: raise Exception('{} argument defined in {} is not valid!'.format(key, configs)) else: log.warning('We recommend the usage of YAML files to keep track of the hyperparameter during testing and training.') return TTNamespace(**old_args)
Function that load the args defined in a YAML file and replaces the values parsed by the HyperOptArgumentParser
caption/utils.py
load_yaml_args
Unbabel/caption
3
python
def load_yaml_args(parser: HyperOptArgumentParser, log): ' Function that load the args defined in a YAML file and replaces the values\n parsed by the HyperOptArgumentParser ' old_args = vars(parser.parse_args()) configs = old_args.get('config') if configs: yaml_file = yaml.load(open(configs).read(), Loader=yaml.FullLoader) for (key, value) in yaml_file.items(): if (key in old_args): old_args[key] = value else: raise Exception('{} argument defined in {} is not valid!'.format(key, configs)) else: log.warning('We recommend the usage of YAML files to keep track of the hyperparameter during testing and training.') return TTNamespace(**old_args)
def load_yaml_args(parser: HyperOptArgumentParser, log): ' Function that load the args defined in a YAML file and replaces the values\n parsed by the HyperOptArgumentParser ' old_args = vars(parser.parse_args()) configs = old_args.get('config') if configs: yaml_file = yaml.load(open(configs).read(), Loader=yaml.FullLoader) for (key, value) in yaml_file.items(): if (key in old_args): old_args[key] = value else: raise Exception('{} argument defined in {} is not valid!'.format(key, configs)) else: log.warning('We recommend the usage of YAML files to keep track of the hyperparameter during testing and training.') return TTNamespace(**old_args)<|docstring|>Function that load the args defined in a YAML file and replaces the values parsed by the HyperOptArgumentParser<|endoftext|>
d880a72364d4c2aabff91245a012c61633fafa8be3965d122e4ad06fa5384afb
def get_main_args_from_yaml(args): ' Function for loading the __main__ arguments directly from the YAML ' if (not args.config): raise Exception('You must pass a YAML file if not using the command line.') try: yaml_file = yaml.load(open(args.config).read(), Loader=yaml.FullLoader) return (yaml_file['optimizer'], yaml_file['scheduler'], yaml_file['model']) except KeyError as e: raise Exception('YAML file is missing the {} parameter.'.format(e.args[0]))
Function for loading the __main__ arguments directly from the YAML
caption/utils.py
get_main_args_from_yaml
Unbabel/caption
3
python
def get_main_args_from_yaml(args): ' ' if (not args.config): raise Exception('You must pass a YAML file if not using the command line.') try: yaml_file = yaml.load(open(args.config).read(), Loader=yaml.FullLoader) return (yaml_file['optimizer'], yaml_file['scheduler'], yaml_file['model']) except KeyError as e: raise Exception('YAML file is missing the {} parameter.'.format(e.args[0]))
def get_main_args_from_yaml(args): ' ' if (not args.config): raise Exception('You must pass a YAML file if not using the command line.') try: yaml_file = yaml.load(open(args.config).read(), Loader=yaml.FullLoader) return (yaml_file['optimizer'], yaml_file['scheduler'], yaml_file['model']) except KeyError as e: raise Exception('YAML file is missing the {} parameter.'.format(e.args[0]))<|docstring|>Function for loading the __main__ arguments directly from the YAML<|endoftext|>
d3f578d1c02626ac47e0b645a7f82a11368c11bc497a38b903b8399c0dff8caf
def setup_testube_logger(): ' Function that sets the TestTubeLogger to be used. ' try: job_id = os.environ['SLURM_JOB_ID'] except Exception: job_id = None now = datetime.now() dt_string = now.strftime('%d-%m-%Y--%H-%M-%S') return TestTubeLogger(save_dir='experiments/', version=(job_id if job_id else dt_string), name='lightning_logs')
Function that sets the TestTubeLogger to be used.
caption/utils.py
setup_testube_logger
Unbabel/caption
3
python
def setup_testube_logger(): ' ' try: job_id = os.environ['SLURM_JOB_ID'] except Exception: job_id = None now = datetime.now() dt_string = now.strftime('%d-%m-%Y--%H-%M-%S') return TestTubeLogger(save_dir='experiments/', version=(job_id if job_id else dt_string), name='lightning_logs')
def setup_testube_logger(): ' ' try: job_id = os.environ['SLURM_JOB_ID'] except Exception: job_id = None now = datetime.now() dt_string = now.strftime('%d-%m-%Y--%H-%M-%S') return TestTubeLogger(save_dir='experiments/', version=(job_id if job_id else dt_string), name='lightning_logs')<|docstring|>Function that sets the TestTubeLogger to be used.<|endoftext|>
a2eec506097b3e131752e09e458727db96cf90efb85470d5a834c8c070f643e5
def scatterdots(data, x, axh=None, width=0.8, returnx=False, rseed=820, **kwargs): 'Dots plotted with random x-coordinates and y-coordinates from data array.\n\n Parameters\n ----------\n data : ndarray\n x : float\n Specifies the center of the dot cloud on the x-axis.\n axh : matplotlib figure handle\n If None then use plt.gca()\n width : float\n Specifies the range of the dots along the x-axis.\n returnx : bool\n If True, return the x-coordinates of the plotted data points.\n rseed : float\n Random seed. Defaults to a constant so that regenerated figures of\n the same data are identical.\n\n Returns\n -------\n Optionally returns the x-coordinates as plotted.' if (axh is None): axh = plt.gca() np.random.seed(rseed) if ((data is None) or (len(data) == 0)): if returnx: return None return if (not isinstance(data, np.ndarray)): data = np.array(data) validi = np.arange(len(data)) if any(np.isnan(data)): validi = np.where(np.logical_not(np.isnan(data)))[0] ploty = data[validi] if (len(ploty) == 0): if returnx: return None return w = width plotx = np.random.permutation((np.linspace(((- w) / 2.0), (w / 2.0), len(ploty)) + x)) axh.scatter(plotx, ploty, **kwargs) if returnx: outx = (np.nan * np.ones(data.shape)) outx[validi] = plotx return outx
Dots plotted with random x-coordinates and y-coordinates from data array. Parameters ---------- data : ndarray x : float Specifies the center of the dot cloud on the x-axis. axh : matplotlib figure handle If None then use plt.gca() width : float Specifies the range of the dots along the x-axis. returnx : bool If True, return the x-coordinates of the plotted data points. rseed : float Random seed. Defaults to a constant so that regenerated figures of the same data are identical. Returns ------- Optionally returns the x-coordinates as plotted.
myboxplot.py
scatterdots
big0tim1/Cycluster
0
python
def scatterdots(data, x, axh=None, width=0.8, returnx=False, rseed=820, **kwargs): 'Dots plotted with random x-coordinates and y-coordinates from data array.\n\n Parameters\n ----------\n data : ndarray\n x : float\n Specifies the center of the dot cloud on the x-axis.\n axh : matplotlib figure handle\n If None then use plt.gca()\n width : float\n Specifies the range of the dots along the x-axis.\n returnx : bool\n If True, return the x-coordinates of the plotted data points.\n rseed : float\n Random seed. Defaults to a constant so that regenerated figures of\n the same data are identical.\n\n Returns\n -------\n Optionally returns the x-coordinates as plotted.' if (axh is None): axh = plt.gca() np.random.seed(rseed) if ((data is None) or (len(data) == 0)): if returnx: return None return if (not isinstance(data, np.ndarray)): data = np.array(data) validi = np.arange(len(data)) if any(np.isnan(data)): validi = np.where(np.logical_not(np.isnan(data)))[0] ploty = data[validi] if (len(ploty) == 0): if returnx: return None return w = width plotx = np.random.permutation((np.linspace(((- w) / 2.0), (w / 2.0), len(ploty)) + x)) axh.scatter(plotx, ploty, **kwargs) if returnx: outx = (np.nan * np.ones(data.shape)) outx[validi] = plotx return outx
def scatterdots(data, x, axh=None, width=0.8, returnx=False, rseed=820, **kwargs): 'Dots plotted with random x-coordinates and y-coordinates from data array.\n\n Parameters\n ----------\n data : ndarray\n x : float\n Specifies the center of the dot cloud on the x-axis.\n axh : matplotlib figure handle\n If None then use plt.gca()\n width : float\n Specifies the range of the dots along the x-axis.\n returnx : bool\n If True, return the x-coordinates of the plotted data points.\n rseed : float\n Random seed. Defaults to a constant so that regenerated figures of\n the same data are identical.\n\n Returns\n -------\n Optionally returns the x-coordinates as plotted.' if (axh is None): axh = plt.gca() np.random.seed(rseed) if ((data is None) or (len(data) == 0)): if returnx: return None return if (not isinstance(data, np.ndarray)): data = np.array(data) validi = np.arange(len(data)) if any(np.isnan(data)): validi = np.where(np.logical_not(np.isnan(data)))[0] ploty = data[validi] if (len(ploty) == 0): if returnx: return None return w = width plotx = np.random.permutation((np.linspace(((- w) / 2.0), (w / 2.0), len(ploty)) + x)) axh.scatter(plotx, ploty, **kwargs) if returnx: outx = (np.nan * np.ones(data.shape)) outx[validi] = plotx return outx<|docstring|>Dots plotted with random x-coordinates and y-coordinates from data array. Parameters ---------- data : ndarray x : float Specifies the center of the dot cloud on the x-axis. axh : matplotlib figure handle If None then use plt.gca() width : float Specifies the range of the dots along the x-axis. returnx : bool If True, return the x-coordinates of the plotted data points. rseed : float Random seed. Defaults to a constant so that regenerated figures of the same data are identical. Returns ------- Optionally returns the x-coordinates as plotted.<|endoftext|>
589a725237c0f5337a795d3ceb66a593b22d992cd7354903d80f1eb84952148b
def myboxplot(data, x=1, axh=None, width=0.8, boxcolor='black', scatterwidth=0.6, dotcolor='red', returnx=False, subsetInd=None, altDotcolor='gray', violin=False, **kwargs): 'Make a boxplot with scatterdots overlaid.\n\n Parameters\n ----------\n data : np.ndarray or pd.Series\n x : float\n Position of box along x-axis.\n axh : matplotlib figure handle\n If None then use plt.gca()\n width : float\n Width of the box.\n boxcolor : mpl color\n scatterwidth : float\n Width of the spread of the data points.\n dotcolor : mpl color\n subsetInd : boolean or int index\n Indicates a subset of the data that should be summarized in the boxplot.\n However, all data points will be plotted.\n altDotcolor : mpl color\n Specify the color of the data points that are not in the subset.\n returnx : bool\n Return the x-coordinates of the data points.\n violin : bool\n Specify whether the box is a violin plot.\n\n Returns\n -------\n outx : np.ndarray\n Optionall, an array of the x-coordinates as plotted.' if (axh is None): axh = plt.gca() if isinstance(data, pd.Series): data = data.values if (not (subsetInd is None)): if (not (subsetInd.dtype == np.array([0, 1], dtype=bool).dtype)): tmp = np.zeros(data.shape, dtype=bool) tmp[subsetInd] = True subsetInd = tmp else: subsetInd = np.ones(data.shape, dtype=bool) subsetInd = np.asarray(subsetInd) if (not ('s' in kwargs)): kwargs['s'] = 20 if (not ('marker' in kwargs)): kwargs['marker'] = 'o' if (not ('linewidths' in kwargs)): kwargs['linewidths'] = 0.5 'Boxplot with dots overlaid' outx = np.zeros(data.shape) if (subsetInd.sum() > 0): if ((not (boxcolor == 'none')) and (not (boxcolor is None))): if (violin and False): sns.violinplot(data[subsetInd], color=boxcolor, positions=[x], alpha=0.5) else: bp = axh.boxplot(data[subsetInd], positions=[x], widths=width, sym='') for element in list(bp.keys()): for b in bp[element]: b.set_color(boxcolor) kwargs['c'] = dotcolor subsetx = scatterdots(data[subsetInd], x=x, axh=axh, width=scatterwidth, returnx=True, **kwargs) outx[subsetInd] = subsetx if ((~ subsetInd).sum() > 0): kwargs['c'] = altDotcolor subsetx = scatterdots(data[(~ subsetInd)], x=x, axh=axh, width=scatterwidth, returnx=True, **kwargs) outx[(~ subsetInd)] = subsetx if returnx: return outx
Make a boxplot with scatterdots overlaid. Parameters ---------- data : np.ndarray or pd.Series x : float Position of box along x-axis. axh : matplotlib figure handle If None then use plt.gca() width : float Width of the box. boxcolor : mpl color scatterwidth : float Width of the spread of the data points. dotcolor : mpl color subsetInd : boolean or int index Indicates a subset of the data that should be summarized in the boxplot. However, all data points will be plotted. altDotcolor : mpl color Specify the color of the data points that are not in the subset. returnx : bool Return the x-coordinates of the data points. violin : bool Specify whether the box is a violin plot. Returns ------- outx : np.ndarray Optionall, an array of the x-coordinates as plotted.
myboxplot.py
myboxplot
big0tim1/Cycluster
0
python
def myboxplot(data, x=1, axh=None, width=0.8, boxcolor='black', scatterwidth=0.6, dotcolor='red', returnx=False, subsetInd=None, altDotcolor='gray', violin=False, **kwargs): 'Make a boxplot with scatterdots overlaid.\n\n Parameters\n ----------\n data : np.ndarray or pd.Series\n x : float\n Position of box along x-axis.\n axh : matplotlib figure handle\n If None then use plt.gca()\n width : float\n Width of the box.\n boxcolor : mpl color\n scatterwidth : float\n Width of the spread of the data points.\n dotcolor : mpl color\n subsetInd : boolean or int index\n Indicates a subset of the data that should be summarized in the boxplot.\n However, all data points will be plotted.\n altDotcolor : mpl color\n Specify the color of the data points that are not in the subset.\n returnx : bool\n Return the x-coordinates of the data points.\n violin : bool\n Specify whether the box is a violin plot.\n\n Returns\n -------\n outx : np.ndarray\n Optionall, an array of the x-coordinates as plotted.' if (axh is None): axh = plt.gca() if isinstance(data, pd.Series): data = data.values if (not (subsetInd is None)): if (not (subsetInd.dtype == np.array([0, 1], dtype=bool).dtype)): tmp = np.zeros(data.shape, dtype=bool) tmp[subsetInd] = True subsetInd = tmp else: subsetInd = np.ones(data.shape, dtype=bool) subsetInd = np.asarray(subsetInd) if (not ('s' in kwargs)): kwargs['s'] = 20 if (not ('marker' in kwargs)): kwargs['marker'] = 'o' if (not ('linewidths' in kwargs)): kwargs['linewidths'] = 0.5 'Boxplot with dots overlaid' outx = np.zeros(data.shape) if (subsetInd.sum() > 0): if ((not (boxcolor == 'none')) and (not (boxcolor is None))): if (violin and False): sns.violinplot(data[subsetInd], color=boxcolor, positions=[x], alpha=0.5) else: bp = axh.boxplot(data[subsetInd], positions=[x], widths=width, sym=) for element in list(bp.keys()): for b in bp[element]: b.set_color(boxcolor) kwargs['c'] = dotcolor subsetx = scatterdots(data[subsetInd], x=x, axh=axh, width=scatterwidth, returnx=True, **kwargs) outx[subsetInd] = subsetx if ((~ subsetInd).sum() > 0): kwargs['c'] = altDotcolor subsetx = scatterdots(data[(~ subsetInd)], x=x, axh=axh, width=scatterwidth, returnx=True, **kwargs) outx[(~ subsetInd)] = subsetx if returnx: return outx
def myboxplot(data, x=1, axh=None, width=0.8, boxcolor='black', scatterwidth=0.6, dotcolor='red', returnx=False, subsetInd=None, altDotcolor='gray', violin=False, **kwargs): 'Make a boxplot with scatterdots overlaid.\n\n Parameters\n ----------\n data : np.ndarray or pd.Series\n x : float\n Position of box along x-axis.\n axh : matplotlib figure handle\n If None then use plt.gca()\n width : float\n Width of the box.\n boxcolor : mpl color\n scatterwidth : float\n Width of the spread of the data points.\n dotcolor : mpl color\n subsetInd : boolean or int index\n Indicates a subset of the data that should be summarized in the boxplot.\n However, all data points will be plotted.\n altDotcolor : mpl color\n Specify the color of the data points that are not in the subset.\n returnx : bool\n Return the x-coordinates of the data points.\n violin : bool\n Specify whether the box is a violin plot.\n\n Returns\n -------\n outx : np.ndarray\n Optionall, an array of the x-coordinates as plotted.' if (axh is None): axh = plt.gca() if isinstance(data, pd.Series): data = data.values if (not (subsetInd is None)): if (not (subsetInd.dtype == np.array([0, 1], dtype=bool).dtype)): tmp = np.zeros(data.shape, dtype=bool) tmp[subsetInd] = True subsetInd = tmp else: subsetInd = np.ones(data.shape, dtype=bool) subsetInd = np.asarray(subsetInd) if (not ('s' in kwargs)): kwargs['s'] = 20 if (not ('marker' in kwargs)): kwargs['marker'] = 'o' if (not ('linewidths' in kwargs)): kwargs['linewidths'] = 0.5 'Boxplot with dots overlaid' outx = np.zeros(data.shape) if (subsetInd.sum() > 0): if ((not (boxcolor == 'none')) and (not (boxcolor is None))): if (violin and False): sns.violinplot(data[subsetInd], color=boxcolor, positions=[x], alpha=0.5) else: bp = axh.boxplot(data[subsetInd], positions=[x], widths=width, sym=) for element in list(bp.keys()): for b in bp[element]: b.set_color(boxcolor) kwargs['c'] = dotcolor subsetx = scatterdots(data[subsetInd], x=x, axh=axh, width=scatterwidth, returnx=True, **kwargs) outx[subsetInd] = subsetx if ((~ subsetInd).sum() > 0): kwargs['c'] = altDotcolor subsetx = scatterdots(data[(~ subsetInd)], x=x, axh=axh, width=scatterwidth, returnx=True, **kwargs) outx[(~ subsetInd)] = subsetx if returnx: return outx<|docstring|>Make a boxplot with scatterdots overlaid. Parameters ---------- data : np.ndarray or pd.Series x : float Position of box along x-axis. axh : matplotlib figure handle If None then use plt.gca() width : float Width of the box. boxcolor : mpl color scatterwidth : float Width of the spread of the data points. dotcolor : mpl color subsetInd : boolean or int index Indicates a subset of the data that should be summarized in the boxplot. However, all data points will be plotted. altDotcolor : mpl color Specify the color of the data points that are not in the subset. returnx : bool Return the x-coordinates of the data points. violin : bool Specify whether the box is a violin plot. Returns ------- outx : np.ndarray Optionall, an array of the x-coordinates as plotted.<|endoftext|>
4d1d20ca188800e25d0e954827ba4a0f8862e2bb6f762f39bd53ff792c205207
def manyboxplots(df, cols=None, axh=None, colLabels=None, annotation='N', horizontal=False, vRange=None, xRot=0, **kwargs): 'Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED])\n\n WORK IN PROGRESS\n\n Optionally add annotation for each boxplot with:\n (1) "N"\n (2) "pctpos" (response rate, by additionally specifying responders)\n NOT YET IMPLEMENTED\n\n Parameters\n ----------\n df : pd.DataFrame\n cols : list\n Column names to be plotted\n axh : matplotlib figure handle\n If None then use plt.gca()\n colLabels : list\n Column labels (optional)\n annotation : str or None\n Specifies what the annotation should be: "N" or "pctpos"\n horizontal : bool\n Specifies whether boxplots should be vertical (default, False) or horizontal (True)\n kwargs : additional arguments\n Passed to myboxplot function to specify colors etc.' if (axh is None): axh = plt.gca() if (cols is None): cols = df.columns if (colLabels is None): colLabels = cols elif (len(colLabels) < cols): colLabels += cols[len(colLabels):] for (x, c) in enumerate(cols): myboxplot(df[c].dropna(), x=x, axh=axh, **kwargs) if (not (vRange is None)): plt.ylim(vRange) yl = plt.ylim() annotationKwargs = dict(xytext=(0, (- 10)), textcoords='offset points', ha='center', va='top', size='medium') for (x, c) in enumerate(cols): tmp = df[c].dropna() if (annotation == 'N'): plt.annotate(('%d' % len(tmp)), xy=(x, yl[1]), **annotationKwargs) elif (annotation == 'pctpos'): pass plt.xlim(((- 1), (x + 1))) plt.xticks(np.arange((x + 1))) xlabelsL = axh.set_xticklabels(colLabels, fontsize='large', rotation=xRot, fontname='Consolas')
Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED]) WORK IN PROGRESS Optionally add annotation for each boxplot with: (1) "N" (2) "pctpos" (response rate, by additionally specifying responders) NOT YET IMPLEMENTED Parameters ---------- df : pd.DataFrame cols : list Column names to be plotted axh : matplotlib figure handle If None then use plt.gca() colLabels : list Column labels (optional) annotation : str or None Specifies what the annotation should be: "N" or "pctpos" horizontal : bool Specifies whether boxplots should be vertical (default, False) or horizontal (True) kwargs : additional arguments Passed to myboxplot function to specify colors etc.
myboxplot.py
manyboxplots
big0tim1/Cycluster
0
python
def manyboxplots(df, cols=None, axh=None, colLabels=None, annotation='N', horizontal=False, vRange=None, xRot=0, **kwargs): 'Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED])\n\n WORK IN PROGRESS\n\n Optionally add annotation for each boxplot with:\n (1) "N"\n (2) "pctpos" (response rate, by additionally specifying responders)\n NOT YET IMPLEMENTED\n\n Parameters\n ----------\n df : pd.DataFrame\n cols : list\n Column names to be plotted\n axh : matplotlib figure handle\n If None then use plt.gca()\n colLabels : list\n Column labels (optional)\n annotation : str or None\n Specifies what the annotation should be: "N" or "pctpos"\n horizontal : bool\n Specifies whether boxplots should be vertical (default, False) or horizontal (True)\n kwargs : additional arguments\n Passed to myboxplot function to specify colors etc.' if (axh is None): axh = plt.gca() if (cols is None): cols = df.columns if (colLabels is None): colLabels = cols elif (len(colLabels) < cols): colLabels += cols[len(colLabels):] for (x, c) in enumerate(cols): myboxplot(df[c].dropna(), x=x, axh=axh, **kwargs) if (not (vRange is None)): plt.ylim(vRange) yl = plt.ylim() annotationKwargs = dict(xytext=(0, (- 10)), textcoords='offset points', ha='center', va='top', size='medium') for (x, c) in enumerate(cols): tmp = df[c].dropna() if (annotation == 'N'): plt.annotate(('%d' % len(tmp)), xy=(x, yl[1]), **annotationKwargs) elif (annotation == 'pctpos'): pass plt.xlim(((- 1), (x + 1))) plt.xticks(np.arange((x + 1))) xlabelsL = axh.set_xticklabels(colLabels, fontsize='large', rotation=xRot, fontname='Consolas')
def manyboxplots(df, cols=None, axh=None, colLabels=None, annotation='N', horizontal=False, vRange=None, xRot=0, **kwargs): 'Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED])\n\n WORK IN PROGRESS\n\n Optionally add annotation for each boxplot with:\n (1) "N"\n (2) "pctpos" (response rate, by additionally specifying responders)\n NOT YET IMPLEMENTED\n\n Parameters\n ----------\n df : pd.DataFrame\n cols : list\n Column names to be plotted\n axh : matplotlib figure handle\n If None then use plt.gca()\n colLabels : list\n Column labels (optional)\n annotation : str or None\n Specifies what the annotation should be: "N" or "pctpos"\n horizontal : bool\n Specifies whether boxplots should be vertical (default, False) or horizontal (True)\n kwargs : additional arguments\n Passed to myboxplot function to specify colors etc.' if (axh is None): axh = plt.gca() if (cols is None): cols = df.columns if (colLabels is None): colLabels = cols elif (len(colLabels) < cols): colLabels += cols[len(colLabels):] for (x, c) in enumerate(cols): myboxplot(df[c].dropna(), x=x, axh=axh, **kwargs) if (not (vRange is None)): plt.ylim(vRange) yl = plt.ylim() annotationKwargs = dict(xytext=(0, (- 10)), textcoords='offset points', ha='center', va='top', size='medium') for (x, c) in enumerate(cols): tmp = df[c].dropna() if (annotation == 'N'): plt.annotate(('%d' % len(tmp)), xy=(x, yl[1]), **annotationKwargs) elif (annotation == 'pctpos'): pass plt.xlim(((- 1), (x + 1))) plt.xticks(np.arange((x + 1))) xlabelsL = axh.set_xticklabels(colLabels, fontsize='large', rotation=xRot, fontname='Consolas')<|docstring|>Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED]) WORK IN PROGRESS Optionally add annotation for each boxplot with: (1) "N" (2) "pctpos" (response rate, by additionally specifying responders) NOT YET IMPLEMENTED Parameters ---------- df : pd.DataFrame cols : list Column names to be plotted axh : matplotlib figure handle If None then use plt.gca() colLabels : list Column labels (optional) annotation : str or None Specifies what the annotation should be: "N" or "pctpos" horizontal : bool Specifies whether boxplots should be vertical (default, False) or horizontal (True) kwargs : additional arguments Passed to myboxplot function to specify colors etc.<|endoftext|>
3a972a58d6c40dc0ac59bd6a1c57c20f91ba8f0b87908dd695a0b6767e1a31ea
def swarmbox(x, y, data, hue=None, palette=None, order=None, hue_order=None, connect=False, connect_on=[], legend_loc=0, legend_bbox=None, swarm_alpha=1, swarm_size=5, box_alpha=1, box_edgecolor='k', box_facewhite=False): 'Based on seaborn boxplots and swarmplots.\n Adds the option to connect dots by joining on an identifier columns' if ((palette is None) and (not (hue is None))): palette = sns.color_palette('Set2', n_colors=data[hue].unique().shape[0]) if ((hue_order is None) and (not (hue is None))): hue_order = sorted(data[hue].unique()) if (order is None): order = sorted(data[x].unique()) params = dict(data=data, x=x, y=y, hue=hue, order=order, hue_order=hue_order) box_axh = sns.boxplot(**params, fliersize=0, linewidth=1.5, palette=palette) for patch in box_axh.artists: patch.set_edgecolor((0, 0, 0, 1)) (r, g, b, a) = patch.get_facecolor() if box_facewhite: patch.set_facecolor((1, 1, 1, 1)) else: patch.set_facecolor((r, g, b, box_alpha)) for line in box_axh.lines: line.set_color(box_edgecolor) swarm = sns.swarmplot(**params, linewidth=0.5, edgecolor='black', dodge=True, alpha=swarm_alpha, size=swarm_size, palette=palette) if (connect and (not (hue is None))): for i in range((len(hue_order) - 1)): 'Loop over pairs of hues (i.e. grouped boxes)' curHues = hue_order[i:(i + 2)] 'Pull out just the swarm collections that are needed' zipper = ([order] + [swarm.collections[i::len(hue_order)], swarm.collections[(i + 1)::len(hue_order)]]) for (curx, cA, cB) in zip(*zipper): 'Loop over the x positions (i.e. outer groups)' indA = ((data[x] == curx) & (data[hue] == curHues[0])) indB = ((data[x] == curx) & (data[hue] == curHues[1])) 'Locate the data and match it up with the points plotted for each hue' tmpA = data[([x, hue, y] + connect_on)].loc[indA].dropna() tmpB = data[([x, hue, y] + connect_on)].loc[indB].dropna() plottedA = cA.get_offsets() plottedB = cB.get_offsets() 'Merge the data from each hue, including the new detangled x coords,\n based on what was plotted' tmpA.loc[(:, '_untangi')] = untangle(tmpA[y].values, plottedA[(:, 1)]) tmpB.loc[(:, '_untangi')] = untangle(tmpB[y].values, plottedB[(:, 1)]) tmpA.loc[(:, '_newx')] = plottedA[(:, 0)][tmpA['_untangi'].values] tmpB.loc[(:, '_newx')] = plottedB[(:, 0)][tmpB['_untangi'].values] "Using 'inner' drops the data points that are in one hue grouping and not the other" tmp = pd.merge(tmpA, tmpB, left_on=connect_on, right_on=connect_on, suffixes=('_A', '_B'), how='inner') 'Plot them one by one' for (rind, r) in tmp.iterrows(): plt.plot(r[['_newx_A', '_newx_B']], r[[(y + '_A'), (y + '_B')]], '-', color='gray', linewidth=0.5) if ((not (hue is None)) and (not (legend_loc is None))): plt.legend([plt.Circle(1, color=c, alpha=1) for c in palette], hue_order, title=hue, loc=legend_loc, bbox_to_anchor=legend_bbox) if (legend_loc is None): plt.gca().legend_.remove()
Based on seaborn boxplots and swarmplots. Adds the option to connect dots by joining on an identifier columns
myboxplot.py
swarmbox
big0tim1/Cycluster
0
python
def swarmbox(x, y, data, hue=None, palette=None, order=None, hue_order=None, connect=False, connect_on=[], legend_loc=0, legend_bbox=None, swarm_alpha=1, swarm_size=5, box_alpha=1, box_edgecolor='k', box_facewhite=False): 'Based on seaborn boxplots and swarmplots.\n Adds the option to connect dots by joining on an identifier columns' if ((palette is None) and (not (hue is None))): palette = sns.color_palette('Set2', n_colors=data[hue].unique().shape[0]) if ((hue_order is None) and (not (hue is None))): hue_order = sorted(data[hue].unique()) if (order is None): order = sorted(data[x].unique()) params = dict(data=data, x=x, y=y, hue=hue, order=order, hue_order=hue_order) box_axh = sns.boxplot(**params, fliersize=0, linewidth=1.5, palette=palette) for patch in box_axh.artists: patch.set_edgecolor((0, 0, 0, 1)) (r, g, b, a) = patch.get_facecolor() if box_facewhite: patch.set_facecolor((1, 1, 1, 1)) else: patch.set_facecolor((r, g, b, box_alpha)) for line in box_axh.lines: line.set_color(box_edgecolor) swarm = sns.swarmplot(**params, linewidth=0.5, edgecolor='black', dodge=True, alpha=swarm_alpha, size=swarm_size, palette=palette) if (connect and (not (hue is None))): for i in range((len(hue_order) - 1)): 'Loop over pairs of hues (i.e. grouped boxes)' curHues = hue_order[i:(i + 2)] 'Pull out just the swarm collections that are needed' zipper = ([order] + [swarm.collections[i::len(hue_order)], swarm.collections[(i + 1)::len(hue_order)]]) for (curx, cA, cB) in zip(*zipper): 'Loop over the x positions (i.e. outer groups)' indA = ((data[x] == curx) & (data[hue] == curHues[0])) indB = ((data[x] == curx) & (data[hue] == curHues[1])) 'Locate the data and match it up with the points plotted for each hue' tmpA = data[([x, hue, y] + connect_on)].loc[indA].dropna() tmpB = data[([x, hue, y] + connect_on)].loc[indB].dropna() plottedA = cA.get_offsets() plottedB = cB.get_offsets() 'Merge the data from each hue, including the new detangled x coords,\n based on what was plotted' tmpA.loc[(:, '_untangi')] = untangle(tmpA[y].values, plottedA[(:, 1)]) tmpB.loc[(:, '_untangi')] = untangle(tmpB[y].values, plottedB[(:, 1)]) tmpA.loc[(:, '_newx')] = plottedA[(:, 0)][tmpA['_untangi'].values] tmpB.loc[(:, '_newx')] = plottedB[(:, 0)][tmpB['_untangi'].values] "Using 'inner' drops the data points that are in one hue grouping and not the other" tmp = pd.merge(tmpA, tmpB, left_on=connect_on, right_on=connect_on, suffixes=('_A', '_B'), how='inner') 'Plot them one by one' for (rind, r) in tmp.iterrows(): plt.plot(r[['_newx_A', '_newx_B']], r[[(y + '_A'), (y + '_B')]], '-', color='gray', linewidth=0.5) if ((not (hue is None)) and (not (legend_loc is None))): plt.legend([plt.Circle(1, color=c, alpha=1) for c in palette], hue_order, title=hue, loc=legend_loc, bbox_to_anchor=legend_bbox) if (legend_loc is None): plt.gca().legend_.remove()
def swarmbox(x, y, data, hue=None, palette=None, order=None, hue_order=None, connect=False, connect_on=[], legend_loc=0, legend_bbox=None, swarm_alpha=1, swarm_size=5, box_alpha=1, box_edgecolor='k', box_facewhite=False): 'Based on seaborn boxplots and swarmplots.\n Adds the option to connect dots by joining on an identifier columns' if ((palette is None) and (not (hue is None))): palette = sns.color_palette('Set2', n_colors=data[hue].unique().shape[0]) if ((hue_order is None) and (not (hue is None))): hue_order = sorted(data[hue].unique()) if (order is None): order = sorted(data[x].unique()) params = dict(data=data, x=x, y=y, hue=hue, order=order, hue_order=hue_order) box_axh = sns.boxplot(**params, fliersize=0, linewidth=1.5, palette=palette) for patch in box_axh.artists: patch.set_edgecolor((0, 0, 0, 1)) (r, g, b, a) = patch.get_facecolor() if box_facewhite: patch.set_facecolor((1, 1, 1, 1)) else: patch.set_facecolor((r, g, b, box_alpha)) for line in box_axh.lines: line.set_color(box_edgecolor) swarm = sns.swarmplot(**params, linewidth=0.5, edgecolor='black', dodge=True, alpha=swarm_alpha, size=swarm_size, palette=palette) if (connect and (not (hue is None))): for i in range((len(hue_order) - 1)): 'Loop over pairs of hues (i.e. grouped boxes)' curHues = hue_order[i:(i + 2)] 'Pull out just the swarm collections that are needed' zipper = ([order] + [swarm.collections[i::len(hue_order)], swarm.collections[(i + 1)::len(hue_order)]]) for (curx, cA, cB) in zip(*zipper): 'Loop over the x positions (i.e. outer groups)' indA = ((data[x] == curx) & (data[hue] == curHues[0])) indB = ((data[x] == curx) & (data[hue] == curHues[1])) 'Locate the data and match it up with the points plotted for each hue' tmpA = data[([x, hue, y] + connect_on)].loc[indA].dropna() tmpB = data[([x, hue, y] + connect_on)].loc[indB].dropna() plottedA = cA.get_offsets() plottedB = cB.get_offsets() 'Merge the data from each hue, including the new detangled x coords,\n based on what was plotted' tmpA.loc[(:, '_untangi')] = untangle(tmpA[y].values, plottedA[(:, 1)]) tmpB.loc[(:, '_untangi')] = untangle(tmpB[y].values, plottedB[(:, 1)]) tmpA.loc[(:, '_newx')] = plottedA[(:, 0)][tmpA['_untangi'].values] tmpB.loc[(:, '_newx')] = plottedB[(:, 0)][tmpB['_untangi'].values] "Using 'inner' drops the data points that are in one hue grouping and not the other" tmp = pd.merge(tmpA, tmpB, left_on=connect_on, right_on=connect_on, suffixes=('_A', '_B'), how='inner') 'Plot them one by one' for (rind, r) in tmp.iterrows(): plt.plot(r[['_newx_A', '_newx_B']], r[[(y + '_A'), (y + '_B')]], '-', color='gray', linewidth=0.5) if ((not (hue is None)) and (not (legend_loc is None))): plt.legend([plt.Circle(1, color=c, alpha=1) for c in palette], hue_order, title=hue, loc=legend_loc, bbox_to_anchor=legend_bbox) if (legend_loc is None): plt.gca().legend_.remove()<|docstring|>Based on seaborn boxplots and swarmplots. Adds the option to connect dots by joining on an identifier columns<|endoftext|>
fe142ef327a9175817991b61998b3f219044773569bf787d0bd7172630d7d234
def parse_isolation_level(isolation_lvl: Optional[str]) -> IsolationLevel: '\n Convert textual description to an isolation level\n ' if ((isolation_lvl is None) or (len(isolation_lvl) < 2)): return IsolationLevel.PL0 isolation_lvl = isolation_lvl.strip().upper() if (isolation_lvl[:2] == 'PL'): suffix: str = isolation_lvl[2:] if ('SS' in suffix): return IsolationLevel.PLSS elif ('3U' in suffix): return IsolationLevel.PL3U elif ('99' in suffix): return IsolationLevel.PL299 elif ('SI' in suffix): return IsolationLevel.PLSI elif ('FCV' in suffix): return IsolationLevel.PLFCV elif (('+' in suffix) or ('PLUS' in suffix)): return IsolationLevel.PL2plus elif ('MSR' in suffix): return IsolationLevel.PLMSR elif ('2L' in suffix): return IsolationLevel.PL2L elif ('3' == suffix[(- 1)]): return IsolationLevel.PL3 elif ('2' == suffix[(- 1)]): return IsolationLevel.PL2 elif ('1' == suffix[(- 1)]): return IsolationLevel.PL1 elif ('0' == suffix[(- 1)]): return IsolationLevel.PL0 else: raise ValueError('Unknown PL isolation level: {}'.format(isolation_lvl)) else: if (('CURSOR' in isolation_lvl) and ('STABILITY' in isolation_lvl)): return IsolationLevel.PLCS elif (('MONOTONIC' in isolation_lvl) and ('VIEW' in isolation_lvl)): return IsolationLevel.PL2L elif (('MONOTONIC' in isolation_lvl) and ('SNAPSHOT' in isolation_lvl) and ('READS' in isolation_lvl)): return IsolationLevel.PLMSR elif (('CONSISTENT' in isolation_lvl) and ('VIEW' in isolation_lvl)): return (IsolationLevel.PLFCV if ('FORWARD' in isolation_lvl) else IsolationLevel.PL2plus) elif (('SNAPSHOT' in isolation_lvl) and ('ISOLATION' in isolation_lvl)): return IsolationLevel.PLSI elif (('REPEATABLE' in isolation_lvl) and ('READ' in isolation_lvl)): return IsolationLevel.PL299 elif (('SERIALIZIBILITY' in isolation_lvl) or ('SERIALIZABLE' in isolation_lvl)): if ('UPDATE' in isolation_lvl): return IsolationLevel.PL3U elif ('STRICT' in isolation_lvl): return IsolationLevel.PLSS else: return IsolationLevel.PL3 elif ('READ' in isolation_lvl): if ('UNCOMMITTED' in isolation_lvl): return IsolationLevel.PL1 elif ('COMMITTED' in isolation_lvl): return IsolationLevel.PL2 raise ValueError('Unknown isolation level: {}\nKnown Isolation Levels:\n{}'.format(isolation_lvl, '\n'.join((repr(a) for a in IsolationLevel))))
Convert textual description to an isolation level
frodo/checker.py
parse_isolation_level
memsql/frodo
4
python
def parse_isolation_level(isolation_lvl: Optional[str]) -> IsolationLevel: '\n \n ' if ((isolation_lvl is None) or (len(isolation_lvl) < 2)): return IsolationLevel.PL0 isolation_lvl = isolation_lvl.strip().upper() if (isolation_lvl[:2] == 'PL'): suffix: str = isolation_lvl[2:] if ('SS' in suffix): return IsolationLevel.PLSS elif ('3U' in suffix): return IsolationLevel.PL3U elif ('99' in suffix): return IsolationLevel.PL299 elif ('SI' in suffix): return IsolationLevel.PLSI elif ('FCV' in suffix): return IsolationLevel.PLFCV elif (('+' in suffix) or ('PLUS' in suffix)): return IsolationLevel.PL2plus elif ('MSR' in suffix): return IsolationLevel.PLMSR elif ('2L' in suffix): return IsolationLevel.PL2L elif ('3' == suffix[(- 1)]): return IsolationLevel.PL3 elif ('2' == suffix[(- 1)]): return IsolationLevel.PL2 elif ('1' == suffix[(- 1)]): return IsolationLevel.PL1 elif ('0' == suffix[(- 1)]): return IsolationLevel.PL0 else: raise ValueError('Unknown PL isolation level: {}'.format(isolation_lvl)) else: if (('CURSOR' in isolation_lvl) and ('STABILITY' in isolation_lvl)): return IsolationLevel.PLCS elif (('MONOTONIC' in isolation_lvl) and ('VIEW' in isolation_lvl)): return IsolationLevel.PL2L elif (('MONOTONIC' in isolation_lvl) and ('SNAPSHOT' in isolation_lvl) and ('READS' in isolation_lvl)): return IsolationLevel.PLMSR elif (('CONSISTENT' in isolation_lvl) and ('VIEW' in isolation_lvl)): return (IsolationLevel.PLFCV if ('FORWARD' in isolation_lvl) else IsolationLevel.PL2plus) elif (('SNAPSHOT' in isolation_lvl) and ('ISOLATION' in isolation_lvl)): return IsolationLevel.PLSI elif (('REPEATABLE' in isolation_lvl) and ('READ' in isolation_lvl)): return IsolationLevel.PL299 elif (('SERIALIZIBILITY' in isolation_lvl) or ('SERIALIZABLE' in isolation_lvl)): if ('UPDATE' in isolation_lvl): return IsolationLevel.PL3U elif ('STRICT' in isolation_lvl): return IsolationLevel.PLSS else: return IsolationLevel.PL3 elif ('READ' in isolation_lvl): if ('UNCOMMITTED' in isolation_lvl): return IsolationLevel.PL1 elif ('COMMITTED' in isolation_lvl): return IsolationLevel.PL2 raise ValueError('Unknown isolation level: {}\nKnown Isolation Levels:\n{}'.format(isolation_lvl, '\n'.join((repr(a) for a in IsolationLevel))))
def parse_isolation_level(isolation_lvl: Optional[str]) -> IsolationLevel: '\n \n ' if ((isolation_lvl is None) or (len(isolation_lvl) < 2)): return IsolationLevel.PL0 isolation_lvl = isolation_lvl.strip().upper() if (isolation_lvl[:2] == 'PL'): suffix: str = isolation_lvl[2:] if ('SS' in suffix): return IsolationLevel.PLSS elif ('3U' in suffix): return IsolationLevel.PL3U elif ('99' in suffix): return IsolationLevel.PL299 elif ('SI' in suffix): return IsolationLevel.PLSI elif ('FCV' in suffix): return IsolationLevel.PLFCV elif (('+' in suffix) or ('PLUS' in suffix)): return IsolationLevel.PL2plus elif ('MSR' in suffix): return IsolationLevel.PLMSR elif ('2L' in suffix): return IsolationLevel.PL2L elif ('3' == suffix[(- 1)]): return IsolationLevel.PL3 elif ('2' == suffix[(- 1)]): return IsolationLevel.PL2 elif ('1' == suffix[(- 1)]): return IsolationLevel.PL1 elif ('0' == suffix[(- 1)]): return IsolationLevel.PL0 else: raise ValueError('Unknown PL isolation level: {}'.format(isolation_lvl)) else: if (('CURSOR' in isolation_lvl) and ('STABILITY' in isolation_lvl)): return IsolationLevel.PLCS elif (('MONOTONIC' in isolation_lvl) and ('VIEW' in isolation_lvl)): return IsolationLevel.PL2L elif (('MONOTONIC' in isolation_lvl) and ('SNAPSHOT' in isolation_lvl) and ('READS' in isolation_lvl)): return IsolationLevel.PLMSR elif (('CONSISTENT' in isolation_lvl) and ('VIEW' in isolation_lvl)): return (IsolationLevel.PLFCV if ('FORWARD' in isolation_lvl) else IsolationLevel.PL2plus) elif (('SNAPSHOT' in isolation_lvl) and ('ISOLATION' in isolation_lvl)): return IsolationLevel.PLSI elif (('REPEATABLE' in isolation_lvl) and ('READ' in isolation_lvl)): return IsolationLevel.PL299 elif (('SERIALIZIBILITY' in isolation_lvl) or ('SERIALIZABLE' in isolation_lvl)): if ('UPDATE' in isolation_lvl): return IsolationLevel.PL3U elif ('STRICT' in isolation_lvl): return IsolationLevel.PLSS else: return IsolationLevel.PL3 elif ('READ' in isolation_lvl): if ('UNCOMMITTED' in isolation_lvl): return IsolationLevel.PL1 elif ('COMMITTED' in isolation_lvl): return IsolationLevel.PL2 raise ValueError('Unknown isolation level: {}\nKnown Isolation Levels:\n{}'.format(isolation_lvl, '\n'.join((repr(a) for a in IsolationLevel))))<|docstring|>Convert textual description to an isolation level<|endoftext|>
0e0142bab5cad3a82c2a7d59bad06d1fe54a5a36984f930b0a99974f5bea95ae
def proscribed_anomalies(isolation_lvl: IsolationLevel) -> List[Anomaly.Type]: '\n An isolation level is defined by proscribing certain anomalies\n\n This function encodes that information\n ' mapping: Dict[(IsolationLevel, List[Any])] = {IsolationLevel.PL0: [], IsolationLevel.PL1: [DSG.CyclicalAnomaly.G0], IsolationLevel.PL2: [Anomaly.G1], IsolationLevel.PLCS: [Anomaly.G1, DSG.CyclicalAnomaly.Gcursor], IsolationLevel.PL2L: [Anomaly.G1, DSG.CyclicalAnomaly.Gmonotonic], IsolationLevel.PLMSR: [Anomaly.G1, DSG.CyclicalAnomaly.GMSR], IsolationLevel.PL2plus: [Anomaly.G1, DSG.CyclicalAnomaly.Gsingle], IsolationLevel.PLFCV: [Anomaly.G1, DSG.CyclicalAnomaly.GSIB], IsolationLevel.PLSI: [Anomaly.G1, DSG.CyclicalAnomaly.GSI], IsolationLevel.PL299: [Anomaly.G1, DSG.CyclicalAnomaly.G2item], IsolationLevel.PL3U: [Anomaly.G1, DSG.CyclicalAnomaly.Gupdate], IsolationLevel.PL3: [Anomaly.G1, DSG.CyclicalAnomaly.G2], IsolationLevel.PL3: [Anomaly.G1, DSG.CyclicalAnomaly.G2]} return mapping[isolation_lvl]
An isolation level is defined by proscribing certain anomalies This function encodes that information
frodo/checker.py
proscribed_anomalies
memsql/frodo
4
python
def proscribed_anomalies(isolation_lvl: IsolationLevel) -> List[Anomaly.Type]: '\n An isolation level is defined by proscribing certain anomalies\n\n This function encodes that information\n ' mapping: Dict[(IsolationLevel, List[Any])] = {IsolationLevel.PL0: [], IsolationLevel.PL1: [DSG.CyclicalAnomaly.G0], IsolationLevel.PL2: [Anomaly.G1], IsolationLevel.PLCS: [Anomaly.G1, DSG.CyclicalAnomaly.Gcursor], IsolationLevel.PL2L: [Anomaly.G1, DSG.CyclicalAnomaly.Gmonotonic], IsolationLevel.PLMSR: [Anomaly.G1, DSG.CyclicalAnomaly.GMSR], IsolationLevel.PL2plus: [Anomaly.G1, DSG.CyclicalAnomaly.Gsingle], IsolationLevel.PLFCV: [Anomaly.G1, DSG.CyclicalAnomaly.GSIB], IsolationLevel.PLSI: [Anomaly.G1, DSG.CyclicalAnomaly.GSI], IsolationLevel.PL299: [Anomaly.G1, DSG.CyclicalAnomaly.G2item], IsolationLevel.PL3U: [Anomaly.G1, DSG.CyclicalAnomaly.Gupdate], IsolationLevel.PL3: [Anomaly.G1, DSG.CyclicalAnomaly.G2], IsolationLevel.PL3: [Anomaly.G1, DSG.CyclicalAnomaly.G2]} return mapping[isolation_lvl]
def proscribed_anomalies(isolation_lvl: IsolationLevel) -> List[Anomaly.Type]: '\n An isolation level is defined by proscribing certain anomalies\n\n This function encodes that information\n ' mapping: Dict[(IsolationLevel, List[Any])] = {IsolationLevel.PL0: [], IsolationLevel.PL1: [DSG.CyclicalAnomaly.G0], IsolationLevel.PL2: [Anomaly.G1], IsolationLevel.PLCS: [Anomaly.G1, DSG.CyclicalAnomaly.Gcursor], IsolationLevel.PL2L: [Anomaly.G1, DSG.CyclicalAnomaly.Gmonotonic], IsolationLevel.PLMSR: [Anomaly.G1, DSG.CyclicalAnomaly.GMSR], IsolationLevel.PL2plus: [Anomaly.G1, DSG.CyclicalAnomaly.Gsingle], IsolationLevel.PLFCV: [Anomaly.G1, DSG.CyclicalAnomaly.GSIB], IsolationLevel.PLSI: [Anomaly.G1, DSG.CyclicalAnomaly.GSI], IsolationLevel.PL299: [Anomaly.G1, DSG.CyclicalAnomaly.G2item], IsolationLevel.PL3U: [Anomaly.G1, DSG.CyclicalAnomaly.Gupdate], IsolationLevel.PL3: [Anomaly.G1, DSG.CyclicalAnomaly.G2], IsolationLevel.PL3: [Anomaly.G1, DSG.CyclicalAnomaly.G2]} return mapping[isolation_lvl]<|docstring|>An isolation level is defined by proscribing certain anomalies This function encodes that information<|endoftext|>
eed6d3931dbf496578978ee85173df43dbd2c29bfbdc3560c4449b038be7db23
def implies(anomaly_type: Any) -> List[Any]: '\n An anomaly can imply a list of other anomalies\n ' if (anomaly_type == NonCyclicalAnomaly.G1A): return [Anomaly.G1] if (anomaly_type == NonCyclicalAnomaly.G1B): return [Anomaly.G1] if (anomaly_type == Anomaly.G1): return [] return DSG.CyclicalAnomaly.cyclical_implies(anomaly_type)
An anomaly can imply a list of other anomalies
frodo/checker.py
implies
memsql/frodo
4
python
def implies(anomaly_type: Any) -> List[Any]: '\n \n ' if (anomaly_type == NonCyclicalAnomaly.G1A): return [Anomaly.G1] if (anomaly_type == NonCyclicalAnomaly.G1B): return [Anomaly.G1] if (anomaly_type == Anomaly.G1): return [] return DSG.CyclicalAnomaly.cyclical_implies(anomaly_type)
def implies(anomaly_type: Any) -> List[Any]: '\n \n ' if (anomaly_type == NonCyclicalAnomaly.G1A): return [Anomaly.G1] if (anomaly_type == NonCyclicalAnomaly.G1B): return [Anomaly.G1] if (anomaly_type == Anomaly.G1): return [] return DSG.CyclicalAnomaly.cyclical_implies(anomaly_type)<|docstring|>An anomaly can imply a list of other anomalies<|endoftext|>
53d40e0a026fbe8a3bd0816ad3bebfd5630c29ec6b38d6f890189b1ab771a976
def closure(anomaly_type: Any) -> List[Any]: '\n Transitive closure of the `implies` relationship\n ' def aux(l: List[Any], additions: List[Any]) -> List[Any]: if (len(additions) == 0): return l return aux((l + additions), sum(map((lambda y: implies(y)), additions), [])) return aux(list(), [anomaly_type])
Transitive closure of the `implies` relationship
frodo/checker.py
closure
memsql/frodo
4
python
def closure(anomaly_type: Any) -> List[Any]: '\n \n ' def aux(l: List[Any], additions: List[Any]) -> List[Any]: if (len(additions) == 0): return l return aux((l + additions), sum(map((lambda y: implies(y)), additions), [])) return aux(list(), [anomaly_type])
def closure(anomaly_type: Any) -> List[Any]: '\n \n ' def aux(l: List[Any], additions: List[Any]) -> List[Any]: if (len(additions) == 0): return l return aux((l + additions), sum(map((lambda y: implies(y)), additions), [])) return aux(list(), [anomaly_type])<|docstring|>Transitive closure of the `implies` relationship<|endoftext|>
f5b083b97c1c9669cfe407db78d5f2fecbeaab93f3d26c2ad711dbd6f080363a
def output_dot(dsg: DSG, anomaly_types: List[Any], graph_filename: Optional[str]=None, full_graph: bool=False, separate_cycles: bool=False) -> None: '\n Output a DSG as a DOT graph\n\n If the filename is not present, do nothing.\n If <full_graph> is true, output the full DSG, not just the transactions involved in anomalies\n If <separate_cycles> is true, also ouput separate DOT files with each node cycle\n ' if (graph_filename is not None): if separate_cycles: for (n, dot) in enumerate(dsg.dump_dots(anomaly_types)): with open('{}_{}'.format(n, graph_filename), 'w') as f: f.write(dot) with open(graph_filename, 'w') as f: f.write(dsg.dump_dot(anomaly_types, full_graph))
Output a DSG as a DOT graph If the filename is not present, do nothing. If <full_graph> is true, output the full DSG, not just the transactions involved in anomalies If <separate_cycles> is true, also ouput separate DOT files with each node cycle
frodo/checker.py
output_dot
memsql/frodo
4
python
def output_dot(dsg: DSG, anomaly_types: List[Any], graph_filename: Optional[str]=None, full_graph: bool=False, separate_cycles: bool=False) -> None: '\n Output a DSG as a DOT graph\n\n If the filename is not present, do nothing.\n If <full_graph> is true, output the full DSG, not just the transactions involved in anomalies\n If <separate_cycles> is true, also ouput separate DOT files with each node cycle\n ' if (graph_filename is not None): if separate_cycles: for (n, dot) in enumerate(dsg.dump_dots(anomaly_types)): with open('{}_{}'.format(n, graph_filename), 'w') as f: f.write(dot) with open(graph_filename, 'w') as f: f.write(dsg.dump_dot(anomaly_types, full_graph))
def output_dot(dsg: DSG, anomaly_types: List[Any], graph_filename: Optional[str]=None, full_graph: bool=False, separate_cycles: bool=False) -> None: '\n Output a DSG as a DOT graph\n\n If the filename is not present, do nothing.\n If <full_graph> is true, output the full DSG, not just the transactions involved in anomalies\n If <separate_cycles> is true, also ouput separate DOT files with each node cycle\n ' if (graph_filename is not None): if separate_cycles: for (n, dot) in enumerate(dsg.dump_dots(anomaly_types)): with open('{}_{}'.format(n, graph_filename), 'w') as f: f.write(dot) with open(graph_filename, 'w') as f: f.write(dsg.dump_dot(anomaly_types, full_graph))<|docstring|>Output a DSG as a DOT graph If the filename is not present, do nothing. If <full_graph> is true, output the full DSG, not just the transactions involved in anomalies If <separate_cycles> is true, also ouput separate DOT files with each node cycle<|endoftext|>
1212667dc1c508acd204b70d9e6494d4251c5eb18fabf6e6f2e6df04f6ef7d90
def check_history(hist: History, isolation_level: IsolationLevel, limit: Optional[int]=None, graph_filename: Optional[str]=None, full_graph: bool=False, separate_cycles: bool=False) -> List[Anomaly]: '\n Verify that a history is valid under some isolation level\n\n Using <limit> the number of found anomalies can be tuned, since\n sometimes a lot of them are found, which can be quite noisy.\n\n Check output_dot() for the semantics of the other arguments\n ' anomaly_types: List[Any] = proscribed_anomalies(isolation_level) cyclical_anomaly_types: List[Any] = list(filter((lambda anom_type: issubclass(anom_type, DSG.CyclicalAnomaly.CyclicalAnomalyType)), anomaly_types)) dsg: DSG = DSG(hist) anomalies: List[Anomaly] = list() if (Anomaly.G1 in anomaly_types): anomalies += (find_g1a(hist) + find_g1b(hist)) for a in dsg.find_anomalies(cyclical_anomaly_types): anomalies.append(a) if ((limit is not None) and (len(anomalies) >= limit)): break output_dot(dsg, cyclical_anomaly_types, graph_filename, full_graph, separate_cycles) return anomalies
Verify that a history is valid under some isolation level Using <limit> the number of found anomalies can be tuned, since sometimes a lot of them are found, which can be quite noisy. Check output_dot() for the semantics of the other arguments
frodo/checker.py
check_history
memsql/frodo
4
python
def check_history(hist: History, isolation_level: IsolationLevel, limit: Optional[int]=None, graph_filename: Optional[str]=None, full_graph: bool=False, separate_cycles: bool=False) -> List[Anomaly]: '\n Verify that a history is valid under some isolation level\n\n Using <limit> the number of found anomalies can be tuned, since\n sometimes a lot of them are found, which can be quite noisy.\n\n Check output_dot() for the semantics of the other arguments\n ' anomaly_types: List[Any] = proscribed_anomalies(isolation_level) cyclical_anomaly_types: List[Any] = list(filter((lambda anom_type: issubclass(anom_type, DSG.CyclicalAnomaly.CyclicalAnomalyType)), anomaly_types)) dsg: DSG = DSG(hist) anomalies: List[Anomaly] = list() if (Anomaly.G1 in anomaly_types): anomalies += (find_g1a(hist) + find_g1b(hist)) for a in dsg.find_anomalies(cyclical_anomaly_types): anomalies.append(a) if ((limit is not None) and (len(anomalies) >= limit)): break output_dot(dsg, cyclical_anomaly_types, graph_filename, full_graph, separate_cycles) return anomalies
def check_history(hist: History, isolation_level: IsolationLevel, limit: Optional[int]=None, graph_filename: Optional[str]=None, full_graph: bool=False, separate_cycles: bool=False) -> List[Anomaly]: '\n Verify that a history is valid under some isolation level\n\n Using <limit> the number of found anomalies can be tuned, since\n sometimes a lot of them are found, which can be quite noisy.\n\n Check output_dot() for the semantics of the other arguments\n ' anomaly_types: List[Any] = proscribed_anomalies(isolation_level) cyclical_anomaly_types: List[Any] = list(filter((lambda anom_type: issubclass(anom_type, DSG.CyclicalAnomaly.CyclicalAnomalyType)), anomaly_types)) dsg: DSG = DSG(hist) anomalies: List[Anomaly] = list() if (Anomaly.G1 in anomaly_types): anomalies += (find_g1a(hist) + find_g1b(hist)) for a in dsg.find_anomalies(cyclical_anomaly_types): anomalies.append(a) if ((limit is not None) and (len(anomalies) >= limit)): break output_dot(dsg, cyclical_anomaly_types, graph_filename, full_graph, separate_cycles) return anomalies<|docstring|>Verify that a history is valid under some isolation level Using <limit> the number of found anomalies can be tuned, since sometimes a lot of them are found, which can be quite noisy. Check output_dot() for the semantics of the other arguments<|endoftext|>
090e4df688a5278f68e8b4dcbd1bdf5bd9f5e7eadcc5d8ae07aee9df1af226c7
@manage.command def test(): ' Run the unit tests.' import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)
Run the unit tests.
manage.py
test
Linyameng/alphadata-dev
0
python
@manage.command def test(): ' ' import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)
@manage.command def test(): ' ' import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)<|docstring|>Run the unit tests.<|endoftext|>
30b62040a1625a143714c6450332232002997577a911ee7970f209ec0f66d683
def __init__(self, error_ratio: float, response_time: Number, exceptions: Iterable[Type[Exception]]=(Exception,), recovery_time: Number=None, broken_time: Number=None, passing_time: Number=None, exception_inspector: ExceptionInspectorType=None, statistic_name: Optional[str]=None): "\n Circuit Breaker pattern implementation. The class instance collects\n call statistics through the ``call`` or ``call async`` methods.\n\n The state machine has three states:\n * ``CircuitBreakerStates.PASSING``\n * ``CircuitBreakerStates.BROKEN``\n * ``CircuitBreakerStates.RECOVERING``\n\n In passing mode all results or exceptions will be returned as is.\n Statistic collects for each call.\n\n In broken mode returns exception ``CircuitBroken`` for each call.\n Statistic doesn't collecting.\n\n In recovering mode the part of calls is real function calls and\n remainings raises ``CircuitBroken``. The count of real calls grows\n exponentially in this case but when 20% (by default) will be failed\n the state returns to broken state.\n\n :param error_ratio: Failed to success calls ratio. The state might be\n changed if ratio will reach given value within\n ``response time`` (in seconds).\n Value between 0.0 and 1.0.\n :param response_time: Time window to collect statistics (seconds)\n :param exceptions: Only this exceptions will affect ratio.\n Base class ``Exception`` used by default.\n :param recovery_time: minimal time in recovery state (seconds)\n :param broken_time: minimal time in broken state (seconds)\n :param passing_time: minimum time in passing state (seconds)\n " if (response_time <= 0): raise ValueError('Response time must be greater then zero') if (0.0 > error_ratio >= 1.0): raise ValueError(('Error ratio must be between 0 and 1 not %r' % error_ratio)) self._statistic = deque(maxlen=self.BUCKET_COUNT) self._lock = threading.RLock() self._loop = asyncio.get_event_loop() self._error_ratio = error_ratio self._state = CircuitBreakerStates.PASSING self._response_time = response_time self._stuck_until = 0 self._recovery_at = 0 self._exceptions = tuple(frozenset(exceptions)) self._exception_inspector = exception_inspector self._passing_time = (passing_time or self._response_time) self._broken_time = (broken_time or self._response_time) self._recovery_time = (recovery_time or self._response_time) self._last_exception = None self._counters = CircuitBreakerStatistic(statistic_name) self._counters.error_ratio_threshold = error_ratio
Circuit Breaker pattern implementation. The class instance collects call statistics through the ``call`` or ``call async`` methods. The state machine has three states: * ``CircuitBreakerStates.PASSING`` * ``CircuitBreakerStates.BROKEN`` * ``CircuitBreakerStates.RECOVERING`` In passing mode all results or exceptions will be returned as is. Statistic collects for each call. In broken mode returns exception ``CircuitBroken`` for each call. Statistic doesn't collecting. In recovering mode the part of calls is real function calls and remainings raises ``CircuitBroken``. The count of real calls grows exponentially in this case but when 20% (by default) will be failed the state returns to broken state. :param error_ratio: Failed to success calls ratio. The state might be changed if ratio will reach given value within ``response time`` (in seconds). Value between 0.0 and 1.0. :param response_time: Time window to collect statistics (seconds) :param exceptions: Only this exceptions will affect ratio. Base class ``Exception`` used by default. :param recovery_time: minimal time in recovery state (seconds) :param broken_time: minimal time in broken state (seconds) :param passing_time: minimum time in passing state (seconds)
aiomisc/circuit_breaker.py
__init__
Alviner/aiomisc
232
python
def __init__(self, error_ratio: float, response_time: Number, exceptions: Iterable[Type[Exception]]=(Exception,), recovery_time: Number=None, broken_time: Number=None, passing_time: Number=None, exception_inspector: ExceptionInspectorType=None, statistic_name: Optional[str]=None): "\n Circuit Breaker pattern implementation. The class instance collects\n call statistics through the ``call`` or ``call async`` methods.\n\n The state machine has three states:\n * ``CircuitBreakerStates.PASSING``\n * ``CircuitBreakerStates.BROKEN``\n * ``CircuitBreakerStates.RECOVERING``\n\n In passing mode all results or exceptions will be returned as is.\n Statistic collects for each call.\n\n In broken mode returns exception ``CircuitBroken`` for each call.\n Statistic doesn't collecting.\n\n In recovering mode the part of calls is real function calls and\n remainings raises ``CircuitBroken``. The count of real calls grows\n exponentially in this case but when 20% (by default) will be failed\n the state returns to broken state.\n\n :param error_ratio: Failed to success calls ratio. The state might be\n changed if ratio will reach given value within\n ``response time`` (in seconds).\n Value between 0.0 and 1.0.\n :param response_time: Time window to collect statistics (seconds)\n :param exceptions: Only this exceptions will affect ratio.\n Base class ``Exception`` used by default.\n :param recovery_time: minimal time in recovery state (seconds)\n :param broken_time: minimal time in broken state (seconds)\n :param passing_time: minimum time in passing state (seconds)\n " if (response_time <= 0): raise ValueError('Response time must be greater then zero') if (0.0 > error_ratio >= 1.0): raise ValueError(('Error ratio must be between 0 and 1 not %r' % error_ratio)) self._statistic = deque(maxlen=self.BUCKET_COUNT) self._lock = threading.RLock() self._loop = asyncio.get_event_loop() self._error_ratio = error_ratio self._state = CircuitBreakerStates.PASSING self._response_time = response_time self._stuck_until = 0 self._recovery_at = 0 self._exceptions = tuple(frozenset(exceptions)) self._exception_inspector = exception_inspector self._passing_time = (passing_time or self._response_time) self._broken_time = (broken_time or self._response_time) self._recovery_time = (recovery_time or self._response_time) self._last_exception = None self._counters = CircuitBreakerStatistic(statistic_name) self._counters.error_ratio_threshold = error_ratio
def __init__(self, error_ratio: float, response_time: Number, exceptions: Iterable[Type[Exception]]=(Exception,), recovery_time: Number=None, broken_time: Number=None, passing_time: Number=None, exception_inspector: ExceptionInspectorType=None, statistic_name: Optional[str]=None): "\n Circuit Breaker pattern implementation. The class instance collects\n call statistics through the ``call`` or ``call async`` methods.\n\n The state machine has three states:\n * ``CircuitBreakerStates.PASSING``\n * ``CircuitBreakerStates.BROKEN``\n * ``CircuitBreakerStates.RECOVERING``\n\n In passing mode all results or exceptions will be returned as is.\n Statistic collects for each call.\n\n In broken mode returns exception ``CircuitBroken`` for each call.\n Statistic doesn't collecting.\n\n In recovering mode the part of calls is real function calls and\n remainings raises ``CircuitBroken``. The count of real calls grows\n exponentially in this case but when 20% (by default) will be failed\n the state returns to broken state.\n\n :param error_ratio: Failed to success calls ratio. The state might be\n changed if ratio will reach given value within\n ``response time`` (in seconds).\n Value between 0.0 and 1.0.\n :param response_time: Time window to collect statistics (seconds)\n :param exceptions: Only this exceptions will affect ratio.\n Base class ``Exception`` used by default.\n :param recovery_time: minimal time in recovery state (seconds)\n :param broken_time: minimal time in broken state (seconds)\n :param passing_time: minimum time in passing state (seconds)\n " if (response_time <= 0): raise ValueError('Response time must be greater then zero') if (0.0 > error_ratio >= 1.0): raise ValueError(('Error ratio must be between 0 and 1 not %r' % error_ratio)) self._statistic = deque(maxlen=self.BUCKET_COUNT) self._lock = threading.RLock() self._loop = asyncio.get_event_loop() self._error_ratio = error_ratio self._state = CircuitBreakerStates.PASSING self._response_time = response_time self._stuck_until = 0 self._recovery_at = 0 self._exceptions = tuple(frozenset(exceptions)) self._exception_inspector = exception_inspector self._passing_time = (passing_time or self._response_time) self._broken_time = (broken_time or self._response_time) self._recovery_time = (recovery_time or self._response_time) self._last_exception = None self._counters = CircuitBreakerStatistic(statistic_name) self._counters.error_ratio_threshold = error_ratio<|docstring|>Circuit Breaker pattern implementation. The class instance collects call statistics through the ``call`` or ``call async`` methods. The state machine has three states: * ``CircuitBreakerStates.PASSING`` * ``CircuitBreakerStates.BROKEN`` * ``CircuitBreakerStates.RECOVERING`` In passing mode all results or exceptions will be returned as is. Statistic collects for each call. In broken mode returns exception ``CircuitBroken`` for each call. Statistic doesn't collecting. In recovering mode the part of calls is real function calls and remainings raises ``CircuitBroken``. The count of real calls grows exponentially in this case but when 20% (by default) will be failed the state returns to broken state. :param error_ratio: Failed to success calls ratio. The state might be changed if ratio will reach given value within ``response time`` (in seconds). Value between 0.0 and 1.0. :param response_time: Time window to collect statistics (seconds) :param exceptions: Only this exceptions will affect ratio. Base class ``Exception`` used by default. :param recovery_time: minimal time in recovery state (seconds) :param broken_time: minimal time in broken state (seconds) :param passing_time: minimum time in passing state (seconds)<|endoftext|>
6d27f4cad550c381bddd107f30ebc9deae540be52472f320073814bf4c5a541f
def __gen_statistic(self) -> Generator[(Counter, None, None)]: '\n Generator which returns only buckets Counters not before current_time\n ' not_before = (self.bucket() - (self._response_time * self.BUCKET_COUNT)) for idx in range((len(self._statistic) - 1), (- 1), (- 1)): (bucket, counter) = self._statistic[idx] if (bucket < not_before): break (yield counter)
Generator which returns only buckets Counters not before current_time
aiomisc/circuit_breaker.py
__gen_statistic
Alviner/aiomisc
232
python
def __gen_statistic(self) -> Generator[(Counter, None, None)]: '\n \n ' not_before = (self.bucket() - (self._response_time * self.BUCKET_COUNT)) for idx in range((len(self._statistic) - 1), (- 1), (- 1)): (bucket, counter) = self._statistic[idx] if (bucket < not_before): break (yield counter)
def __gen_statistic(self) -> Generator[(Counter, None, None)]: '\n \n ' not_before = (self.bucket() - (self._response_time * self.BUCKET_COUNT)) for idx in range((len(self._statistic) - 1), (- 1), (- 1)): (bucket, counter) = self._statistic[idx] if (bucket < not_before): break (yield counter)<|docstring|>Generator which returns only buckets Counters not before current_time<|endoftext|>
f6973dff68cd065b8473bb7e1b96479c2a3009bafda20c0b10ce4899f33e66f7
def post_save(self, obj): ' Add groups ' if ('group' in self.form.cleaned_data): for group in self.form.cleaned_data['group']: try: obj.groups.add(group) except ValueError: (new_group, _) = AnimalGroup.objects.get_or_create(name=string.capwords(group)) if (new_group and (new_group not in obj.groups.all())): obj.groups.add(new_group) return obj
Add groups
farmguru/animals/views.py
post_save
savioabuga/farmguru
0
python
def post_save(self, obj): ' ' if ('group' in self.form.cleaned_data): for group in self.form.cleaned_data['group']: try: obj.groups.add(group) except ValueError: (new_group, _) = AnimalGroup.objects.get_or_create(name=string.capwords(group)) if (new_group and (new_group not in obj.groups.all())): obj.groups.add(new_group) return obj
def post_save(self, obj): ' ' if ('group' in self.form.cleaned_data): for group in self.form.cleaned_data['group']: try: obj.groups.add(group) except ValueError: (new_group, _) = AnimalGroup.objects.get_or_create(name=string.capwords(group)) if (new_group and (new_group not in obj.groups.all())): obj.groups.add(new_group) return obj<|docstring|>Add groups<|endoftext|>
2db78b1cf9d8145e1c9c9230badd93adcad438af1a68757f418da1c22c5b7b54
def test_version(self): ' test version overrides min_version and max_version ' version = VersionedDependency(name='tensorflow', version='0.3.0', min_version='0.1.0', max_version='0.4.0') self.assertTrue((version.min_version == '0.3.0')) self.assertTrue((version.max_version == '0.3.0')) self.assertTrue(version.has_versions()) self.assertTrue((version.name == 'tensorflow'))
test version overrides min_version and max_version
sdk/python/tests/compiler/component_builder_test.py
test_version
adrian555/pipelines
0
python
def test_version(self): ' ' version = VersionedDependency(name='tensorflow', version='0.3.0', min_version='0.1.0', max_version='0.4.0') self.assertTrue((version.min_version == '0.3.0')) self.assertTrue((version.max_version == '0.3.0')) self.assertTrue(version.has_versions()) self.assertTrue((version.name == 'tensorflow'))
def test_version(self): ' ' version = VersionedDependency(name='tensorflow', version='0.3.0', min_version='0.1.0', max_version='0.4.0') self.assertTrue((version.min_version == '0.3.0')) self.assertTrue((version.max_version == '0.3.0')) self.assertTrue(version.has_versions()) self.assertTrue((version.name == 'tensorflow'))<|docstring|>test version overrides min_version and max_version<|endoftext|>
3cf1a4f3ca0dc7e82e7142e2c90cd91a8358fb27f4ef94454a292aede14d9be1
def test_minmax_version(self): ' test if min_version and max_version are configured when version is not given ' version = VersionedDependency(name='tensorflow', min_version='0.1.0', max_version='0.4.0') self.assertTrue((version.min_version == '0.1.0')) self.assertTrue((version.max_version == '0.4.0')) self.assertTrue(version.has_versions())
test if min_version and max_version are configured when version is not given
sdk/python/tests/compiler/component_builder_test.py
test_minmax_version
adrian555/pipelines
0
python
def test_minmax_version(self): ' ' version = VersionedDependency(name='tensorflow', min_version='0.1.0', max_version='0.4.0') self.assertTrue((version.min_version == '0.1.0')) self.assertTrue((version.max_version == '0.4.0')) self.assertTrue(version.has_versions())
def test_minmax_version(self): ' ' version = VersionedDependency(name='tensorflow', min_version='0.1.0', max_version='0.4.0') self.assertTrue((version.min_version == '0.1.0')) self.assertTrue((version.max_version == '0.4.0')) self.assertTrue(version.has_versions())<|docstring|>test if min_version and max_version are configured when version is not given<|endoftext|>
c6c9063495ef08cb0463d83b4a93f177225e988e93993514099564c0eb689e21
def test_min_or_max_version(self): ' test if min_version and max_version are configured when version is not given ' version = VersionedDependency(name='tensorflow', min_version='0.1.0') self.assertTrue((version.min_version == '0.1.0')) self.assertTrue(version.has_versions()) version = VersionedDependency(name='tensorflow', max_version='0.3.0') self.assertTrue((version.max_version == '0.3.0')) self.assertTrue(version.has_versions())
test if min_version and max_version are configured when version is not given
sdk/python/tests/compiler/component_builder_test.py
test_min_or_max_version
adrian555/pipelines
0
python
def test_min_or_max_version(self): ' ' version = VersionedDependency(name='tensorflow', min_version='0.1.0') self.assertTrue((version.min_version == '0.1.0')) self.assertTrue(version.has_versions()) version = VersionedDependency(name='tensorflow', max_version='0.3.0') self.assertTrue((version.max_version == '0.3.0')) self.assertTrue(version.has_versions())
def test_min_or_max_version(self): ' ' version = VersionedDependency(name='tensorflow', min_version='0.1.0') self.assertTrue((version.min_version == '0.1.0')) self.assertTrue(version.has_versions()) version = VersionedDependency(name='tensorflow', max_version='0.3.0') self.assertTrue((version.max_version == '0.3.0')) self.assertTrue(version.has_versions())<|docstring|>test if min_version and max_version are configured when version is not given<|endoftext|>
4935ab9f2d7418bc60c1d80f57cff209fc5af439980354464f64776e6a1e7c2d
def test_no_version(self): ' test the no version scenario ' version = VersionedDependency(name='tensorflow') self.assertFalse(version.has_min_version()) self.assertFalse(version.has_max_version()) self.assertFalse(version.has_versions())
test the no version scenario
sdk/python/tests/compiler/component_builder_test.py
test_no_version
adrian555/pipelines
0
python
def test_no_version(self): ' ' version = VersionedDependency(name='tensorflow') self.assertFalse(version.has_min_version()) self.assertFalse(version.has_max_version()) self.assertFalse(version.has_versions())
def test_no_version(self): ' ' version = VersionedDependency(name='tensorflow') self.assertFalse(version.has_min_version()) self.assertFalse(version.has_max_version()) self.assertFalse(version.has_versions())<|docstring|>test the no version scenario<|endoftext|>
0c67cf776a1e60be435a24e322b15cd6d15db729f528add8c92bc98c5fc33ed1
def test_generate_requirement(self): ' Test generating requirement file ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') temp_file = os.path.join(test_data_dir, 'test_requirements.tmp') dependency_helper = DependencyHelper() dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.10.0', max_version='0.11.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.6.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='pytorch', max_version='0.3.0')) dependency_helper.generate_pip_requirements(temp_file) golden_requirement_payload = 'tensorflow >= 0.10.0, <= 0.11.0\nkubernetes >= 0.6.0\npytorch <= 0.3.0\n' with open(temp_file, 'r') as f: target_requirement_payload = f.read() self.assertEqual(target_requirement_payload, golden_requirement_payload) os.remove(temp_file)
Test generating requirement file
sdk/python/tests/compiler/component_builder_test.py
test_generate_requirement
adrian555/pipelines
0
python
def test_generate_requirement(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') temp_file = os.path.join(test_data_dir, 'test_requirements.tmp') dependency_helper = DependencyHelper() dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.10.0', max_version='0.11.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.6.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='pytorch', max_version='0.3.0')) dependency_helper.generate_pip_requirements(temp_file) golden_requirement_payload = 'tensorflow >= 0.10.0, <= 0.11.0\nkubernetes >= 0.6.0\npytorch <= 0.3.0\n' with open(temp_file, 'r') as f: target_requirement_payload = f.read() self.assertEqual(target_requirement_payload, golden_requirement_payload) os.remove(temp_file)
def test_generate_requirement(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') temp_file = os.path.join(test_data_dir, 'test_requirements.tmp') dependency_helper = DependencyHelper() dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.10.0', max_version='0.11.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.6.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='pytorch', max_version='0.3.0')) dependency_helper.generate_pip_requirements(temp_file) golden_requirement_payload = 'tensorflow >= 0.10.0, <= 0.11.0\nkubernetes >= 0.6.0\npytorch <= 0.3.0\n' with open(temp_file, 'r') as f: target_requirement_payload = f.read() self.assertEqual(target_requirement_payload, golden_requirement_payload) os.remove(temp_file)<|docstring|>Test generating requirement file<|endoftext|>
db1b397dd78fde8dc87deaaeea3b37b637d0f21236a10cb72a4fb32c44aa8d7b
def test_add_python_package(self): ' Test add_python_package ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') temp_file = os.path.join(test_data_dir, 'test_requirements.tmp') dependency_helper = DependencyHelper() dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.10.0', max_version='0.11.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.6.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.12.0'), override=True) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.8.0'), override=False) dependency_helper.add_python_package(dependency=VersionedDependency(name='pytorch', version='0.3.0')) dependency_helper.generate_pip_requirements(temp_file) golden_requirement_payload = 'tensorflow >= 0.12.0\nkubernetes >= 0.6.0\npytorch >= 0.3.0, <= 0.3.0\n' with open(temp_file, 'r') as f: target_requirement_payload = f.read() self.assertEqual(target_requirement_payload, golden_requirement_payload) os.remove(temp_file)
Test add_python_package
sdk/python/tests/compiler/component_builder_test.py
test_add_python_package
adrian555/pipelines
0
python
def test_add_python_package(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') temp_file = os.path.join(test_data_dir, 'test_requirements.tmp') dependency_helper = DependencyHelper() dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.10.0', max_version='0.11.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.6.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.12.0'), override=True) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.8.0'), override=False) dependency_helper.add_python_package(dependency=VersionedDependency(name='pytorch', version='0.3.0')) dependency_helper.generate_pip_requirements(temp_file) golden_requirement_payload = 'tensorflow >= 0.12.0\nkubernetes >= 0.6.0\npytorch >= 0.3.0, <= 0.3.0\n' with open(temp_file, 'r') as f: target_requirement_payload = f.read() self.assertEqual(target_requirement_payload, golden_requirement_payload) os.remove(temp_file)
def test_add_python_package(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') temp_file = os.path.join(test_data_dir, 'test_requirements.tmp') dependency_helper = DependencyHelper() dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.10.0', max_version='0.11.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.6.0')) dependency_helper.add_python_package(dependency=VersionedDependency(name='tensorflow', min_version='0.12.0'), override=True) dependency_helper.add_python_package(dependency=VersionedDependency(name='kubernetes', min_version='0.8.0'), override=False) dependency_helper.add_python_package(dependency=VersionedDependency(name='pytorch', version='0.3.0')) dependency_helper.generate_pip_requirements(temp_file) golden_requirement_payload = 'tensorflow >= 0.12.0\nkubernetes >= 0.6.0\npytorch >= 0.3.0, <= 0.3.0\n' with open(temp_file, 'r') as f: target_requirement_payload = f.read() self.assertEqual(target_requirement_payload, golden_requirement_payload) os.remove(temp_file)<|docstring|>Test add_python_package<|endoftext|>
720cb172be3f725aed36ae8bcad227702fd212e25a892aa7060c1201a75d4283
def test_generate_dockerfile(self): ' Test generate dockerfile ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') target_dockerfile = os.path.join(test_data_dir, 'component.temp.dockerfile') golden_dockerfile_payload_one = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\nADD main.py /ml/main.py\nENTRYPOINT ["python3", "-u", "/ml/main.py"]' golden_dockerfile_payload_two = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\nADD requirements.txt /ml/requirements.txt\nRUN pip3 install -r /ml/requirements.txt\nADD main.py /ml/main.py\nENTRYPOINT ["python3", "-u", "/ml/main.py"]' golden_dockerfile_payload_three = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python python-pip python-setuptools\nADD requirements.txt /ml/requirements.txt\nRUN pip install -r /ml/requirements.txt\nADD main.py /ml/main.py\nENTRYPOINT ["python", "-u", "/ml/main.py"]' _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python3') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_one) _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python3', requirement_filename='requirements.txt') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_two) _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python2', requirement_filename='requirements.txt') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_three) self.assertRaises(ValueError, _generate_dockerfile, filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python4', requirement_filename='requirements.txt') os.remove(target_dockerfile)
Test generate dockerfile
sdk/python/tests/compiler/component_builder_test.py
test_generate_dockerfile
adrian555/pipelines
0
python
def test_generate_dockerfile(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') target_dockerfile = os.path.join(test_data_dir, 'component.temp.dockerfile') golden_dockerfile_payload_one = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\nADD main.py /ml/main.py\nENTRYPOINT ["python3", "-u", "/ml/main.py"]' golden_dockerfile_payload_two = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\nADD requirements.txt /ml/requirements.txt\nRUN pip3 install -r /ml/requirements.txt\nADD main.py /ml/main.py\nENTRYPOINT ["python3", "-u", "/ml/main.py"]' golden_dockerfile_payload_three = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python python-pip python-setuptools\nADD requirements.txt /ml/requirements.txt\nRUN pip install -r /ml/requirements.txt\nADD main.py /ml/main.py\nENTRYPOINT ["python", "-u", "/ml/main.py"]' _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python3') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_one) _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python3', requirement_filename='requirements.txt') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_two) _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python2', requirement_filename='requirements.txt') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_three) self.assertRaises(ValueError, _generate_dockerfile, filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python4', requirement_filename='requirements.txt') os.remove(target_dockerfile)
def test_generate_dockerfile(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') target_dockerfile = os.path.join(test_data_dir, 'component.temp.dockerfile') golden_dockerfile_payload_one = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\nADD main.py /ml/main.py\nENTRYPOINT ["python3", "-u", "/ml/main.py"]' golden_dockerfile_payload_two = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\nADD requirements.txt /ml/requirements.txt\nRUN pip3 install -r /ml/requirements.txt\nADD main.py /ml/main.py\nENTRYPOINT ["python3", "-u", "/ml/main.py"]' golden_dockerfile_payload_three = 'FROM gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0\nRUN apt-get update -y && apt-get install --no-install-recommends -y -q python python-pip python-setuptools\nADD requirements.txt /ml/requirements.txt\nRUN pip install -r /ml/requirements.txt\nADD main.py /ml/main.py\nENTRYPOINT ["python", "-u", "/ml/main.py"]' _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python3') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_one) _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python3', requirement_filename='requirements.txt') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_two) _generate_dockerfile(filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python2', requirement_filename='requirements.txt') with open(target_dockerfile, 'r') as f: target_dockerfile_payload = f.read() self.assertEqual(target_dockerfile_payload, golden_dockerfile_payload_three) self.assertRaises(ValueError, _generate_dockerfile, filename=target_dockerfile, base_image='gcr.io/ngao-mlpipeline-testing/tensorflow:1.10.0', entrypoint_filename='main.py', python_version='python4', requirement_filename='requirements.txt') os.remove(target_dockerfile)<|docstring|>Test generate dockerfile<|endoftext|>
8986113b74aa94cdd7ae07ec8a2e35cfd470e13e3c195ed07ffc238617cdd16f
def test_generate_entrypoint(self): ' Test entrypoint generation ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') generated_codes = _func_to_entrypoint(component_func=sample_component_func) golden = 'def sample_component_func(a: str, b: int) -> float:\n result = 3.45\n if a == "succ":\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func(a,b,_output_file):\n output = sample_component_func(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func(**args)\n' self.assertEqual(golden, generated_codes) generated_codes = _func_to_entrypoint(component_func=sample_component_func_two) golden = 'def sample_component_func_two(a: str, b: int) -> float:\n result = 3.45\n if a == \'succ\':\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func_two(a,b,_output_file):\n output = sample_component_func_two(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_two(**args)\n' self.assertEqual(golden, generated_codes) generated_codes = _func_to_entrypoint(component_func=sample_component_func_three) golden = 'def sample_component_func_three() -> float:\n return 1.0\n\ndef wrapper_sample_component_func_three(_output_file):\n output = sample_component_func_three()\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_three(**args)\n' self.assertEqual(golden, generated_codes)
Test entrypoint generation
sdk/python/tests/compiler/component_builder_test.py
test_generate_entrypoint
adrian555/pipelines
0
python
def test_generate_entrypoint(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') generated_codes = _func_to_entrypoint(component_func=sample_component_func) golden = 'def sample_component_func(a: str, b: int) -> float:\n result = 3.45\n if a == "succ":\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func(a,b,_output_file):\n output = sample_component_func(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func(**args)\n' self.assertEqual(golden, generated_codes) generated_codes = _func_to_entrypoint(component_func=sample_component_func_two) golden = 'def sample_component_func_two(a: str, b: int) -> float:\n result = 3.45\n if a == \'succ\':\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func_two(a,b,_output_file):\n output = sample_component_func_two(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_two(**args)\n' self.assertEqual(golden, generated_codes) generated_codes = _func_to_entrypoint(component_func=sample_component_func_three) golden = 'def sample_component_func_three() -> float:\n return 1.0\n\ndef wrapper_sample_component_func_three(_output_file):\n output = sample_component_func_three()\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_three(**args)\n' self.assertEqual(golden, generated_codes)
def test_generate_entrypoint(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') generated_codes = _func_to_entrypoint(component_func=sample_component_func) golden = 'def sample_component_func(a: str, b: int) -> float:\n result = 3.45\n if a == "succ":\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func(a,b,_output_file):\n output = sample_component_func(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func(**args)\n' self.assertEqual(golden, generated_codes) generated_codes = _func_to_entrypoint(component_func=sample_component_func_two) golden = 'def sample_component_func_two(a: str, b: int) -> float:\n result = 3.45\n if a == \'succ\':\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func_two(a,b,_output_file):\n output = sample_component_func_two(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_two(**args)\n' self.assertEqual(golden, generated_codes) generated_codes = _func_to_entrypoint(component_func=sample_component_func_three) golden = 'def sample_component_func_three() -> float:\n return 1.0\n\ndef wrapper_sample_component_func_three(_output_file):\n output = sample_component_func_three()\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_three(**args)\n' self.assertEqual(golden, generated_codes)<|docstring|>Test entrypoint generation<|endoftext|>
cfc64eee0a8443fb38ccfc4a99f94cb6134235c78a9cffe6e00f3c1d99e02cad
def test_generate_entrypoint_python2(self): ' Test entrypoint generation for python2' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') generated_codes = _func_to_entrypoint(component_func=sample_component_func_two, python_version='python2') golden = 'def sample_component_func_two(a, b):\n result = 3.45\n if a == \'succ\':\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func_two(a,b,_output_file):\n output = sample_component_func_two(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_two(**args)\n' self.assertEqual(golden, generated_codes)
Test entrypoint generation for python2
sdk/python/tests/compiler/component_builder_test.py
test_generate_entrypoint_python2
adrian555/pipelines
0
python
def test_generate_entrypoint_python2(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') generated_codes = _func_to_entrypoint(component_func=sample_component_func_two, python_version='python2') golden = 'def sample_component_func_two(a, b):\n result = 3.45\n if a == \'succ\':\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func_two(a,b,_output_file):\n output = sample_component_func_two(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_two(**args)\n' self.assertEqual(golden, generated_codes)
def test_generate_entrypoint_python2(self): ' ' test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') generated_codes = _func_to_entrypoint(component_func=sample_component_func_two, python_version='python2') golden = 'def sample_component_func_two(a, b):\n result = 3.45\n if a == \'succ\':\n result = float(b + 5)\n return result\n\ndef wrapper_sample_component_func_two(a,b,_output_file):\n output = sample_component_func_two(str(a),int(b))\n import os\n os.makedirs(os.path.dirname(_output_file))\n with open(_output_file, "w") as data:\n data.write(str(output))\n\nimport argparse\nparser = argparse.ArgumentParser(description="Parsing arguments")\nparser.add_argument("a", type=str)\nparser.add_argument("b", type=int)\nparser.add_argument("_output_file", type=str)\nargs = vars(parser.parse_args())\n\nif __name__ == "__main__":\n wrapper_sample_component_func_two(**args)\n' self.assertEqual(golden, generated_codes)<|docstring|>Test entrypoint generation for python2<|endoftext|>
feb4145905bd3621e9e27c499a637b15501a23dde6591a72edc27db450159136
def test_codegen(self): ' Test code generator a function' codegen = CodeGenerator(indentation=' ') codegen.begin() codegen.writeline('def hello():') codegen.indent() codegen.writeline('print("hello")') generated_codes = codegen.end() self.assertEqual(generated_codes, inspect.getsource(hello))
Test code generator a function
sdk/python/tests/compiler/component_builder_test.py
test_codegen
adrian555/pipelines
0
python
def test_codegen(self): ' ' codegen = CodeGenerator(indentation=' ') codegen.begin() codegen.writeline('def hello():') codegen.indent() codegen.writeline('print("hello")') generated_codes = codegen.end() self.assertEqual(generated_codes, inspect.getsource(hello))
def test_codegen(self): ' ' codegen = CodeGenerator(indentation=' ') codegen.begin() codegen.writeline('def hello():') codegen.indent() codegen.writeline('print("hello")') generated_codes = codegen.end() self.assertEqual(generated_codes, inspect.getsource(hello))<|docstring|>Test code generator a function<|endoftext|>
6fe59239e54a71c4ec68ec43d3db2614af2b26634cde0e6541574bee915ff9d2
@click.group() @click.version_option(message='%(version)s', package_name='tokenlists') def cli(): '\n Utility for working with the `py-tokenlists` installed token lists\n '
Utility for working with the `py-tokenlists` installed token lists
tokenlists/_cli.py
cli
unparalleled-js/py-tokenlists
14
python
@click.group() @click.version_option(message='%(version)s', package_name='tokenlists') def cli(): '\n \n '
@click.group() @click.version_option(message='%(version)s', package_name='tokenlists') def cli(): '\n \n '<|docstring|>Utility for working with the `py-tokenlists` installed token lists<|endoftext|>
0d476811f93f6f4b9cde022c3c0f905bb251976d877e80bfde9567e9a84045c4
def get_taddol_selections(universe, univ_in_dict=True): 'Returns a dict of AtomSelections from the given universe' d_out = dict() if univ_in_dict: d_out['universe'] = universe d_out['phenrtt'] = universe.select_atoms('bynum 92 94') d_out['phenrtb'] = universe.select_atoms('bynum 82 87') d_out['phenrbt'] = universe.select_atoms('bynum 69 71') d_out['phenrbb'] = universe.select_atoms('bynum 59 64') d_out['phenltt'] = universe.select_atoms('bynum 115 117') d_out['phenltb'] = universe.select_atoms('bynum 105 110') d_out['phenlbt'] = universe.select_atoms('bynum 36 41') d_out['phenlbb'] = universe.select_atoms('bynum 46 48') d_out['quatl'] = universe.select_atoms('bynum 6') d_out['quatr'] = universe.select_atoms('bynum 1') d_out['chirl'] = universe.select_atoms('bynum 4') d_out['chirr'] = universe.select_atoms('bynum 2') d_out['cyclon'] = universe.select_atoms('bynum 13') d_out['cyclof'] = universe.select_atoms('bynum 22') d_out['aoxl'] = universe.select_atoms('bynum 9') d_out['aoxr'] = universe.select_atoms('bynum 7') return d_out
Returns a dict of AtomSelections from the given universe
paratemp/coordinate_analysis.py
get_taddol_selections
theavey/ParaTemp
12
python
def get_taddol_selections(universe, univ_in_dict=True): d_out = dict() if univ_in_dict: d_out['universe'] = universe d_out['phenrtt'] = universe.select_atoms('bynum 92 94') d_out['phenrtb'] = universe.select_atoms('bynum 82 87') d_out['phenrbt'] = universe.select_atoms('bynum 69 71') d_out['phenrbb'] = universe.select_atoms('bynum 59 64') d_out['phenltt'] = universe.select_atoms('bynum 115 117') d_out['phenltb'] = universe.select_atoms('bynum 105 110') d_out['phenlbt'] = universe.select_atoms('bynum 36 41') d_out['phenlbb'] = universe.select_atoms('bynum 46 48') d_out['quatl'] = universe.select_atoms('bynum 6') d_out['quatr'] = universe.select_atoms('bynum 1') d_out['chirl'] = universe.select_atoms('bynum 4') d_out['chirr'] = universe.select_atoms('bynum 2') d_out['cyclon'] = universe.select_atoms('bynum 13') d_out['cyclof'] = universe.select_atoms('bynum 22') d_out['aoxl'] = universe.select_atoms('bynum 9') d_out['aoxr'] = universe.select_atoms('bynum 7') return d_out
def get_taddol_selections(universe, univ_in_dict=True): d_out = dict() if univ_in_dict: d_out['universe'] = universe d_out['phenrtt'] = universe.select_atoms('bynum 92 94') d_out['phenrtb'] = universe.select_atoms('bynum 82 87') d_out['phenrbt'] = universe.select_atoms('bynum 69 71') d_out['phenrbb'] = universe.select_atoms('bynum 59 64') d_out['phenltt'] = universe.select_atoms('bynum 115 117') d_out['phenltb'] = universe.select_atoms('bynum 105 110') d_out['phenlbt'] = universe.select_atoms('bynum 36 41') d_out['phenlbb'] = universe.select_atoms('bynum 46 48') d_out['quatl'] = universe.select_atoms('bynum 6') d_out['quatr'] = universe.select_atoms('bynum 1') d_out['chirl'] = universe.select_atoms('bynum 4') d_out['chirr'] = universe.select_atoms('bynum 2') d_out['cyclon'] = universe.select_atoms('bynum 13') d_out['cyclof'] = universe.select_atoms('bynum 22') d_out['aoxl'] = universe.select_atoms('bynum 9') d_out['aoxr'] = universe.select_atoms('bynum 7') return d_out<|docstring|>Returns a dict of AtomSelections from the given universe<|endoftext|>
d2fcd1d0bbd9756629f27edbbb53321e62ef02f3ad8f8eae07a10c119e787f7f
def get_dist(a, b, box=None): 'Calculate the distance between AtomGroups a and b.\n\n If a box is provided, this will use the builtin MDAnalysis function to\n account for periodic boundary conditions.' warn('get_dist will soon be deprecated. Use Universe.calculate_distances', DeprecationWarning) if (box is not None): coordinates = (np.array([atom.centroid()]) for atom in (a, b)) return MDa.lib.distances.calc_bonds(*coordinates, box=box) else: return np.linalg.norm((a.centroid() - b.centroid()))
Calculate the distance between AtomGroups a and b. If a box is provided, this will use the builtin MDAnalysis function to account for periodic boundary conditions.
paratemp/coordinate_analysis.py
get_dist
theavey/ParaTemp
12
python
def get_dist(a, b, box=None): 'Calculate the distance between AtomGroups a and b.\n\n If a box is provided, this will use the builtin MDAnalysis function to\n account for periodic boundary conditions.' warn('get_dist will soon be deprecated. Use Universe.calculate_distances', DeprecationWarning) if (box is not None): coordinates = (np.array([atom.centroid()]) for atom in (a, b)) return MDa.lib.distances.calc_bonds(*coordinates, box=box) else: return np.linalg.norm((a.centroid() - b.centroid()))
def get_dist(a, b, box=None): 'Calculate the distance between AtomGroups a and b.\n\n If a box is provided, this will use the builtin MDAnalysis function to\n account for periodic boundary conditions.' warn('get_dist will soon be deprecated. Use Universe.calculate_distances', DeprecationWarning) if (box is not None): coordinates = (np.array([atom.centroid()]) for atom in (a, b)) return MDa.lib.distances.calc_bonds(*coordinates, box=box) else: return np.linalg.norm((a.centroid() - b.centroid()))<|docstring|>Calculate the distance between AtomGroups a and b. If a box is provided, this will use the builtin MDAnalysis function to account for periodic boundary conditions.<|endoftext|>
83455d5a4e78356d8215c91a9bc18bdc8db9fea9402dc2a1845d5b68da30cee8
def get_dist_dict(dictionary, a, b, box=None): 'Calculate distance using dict of AtomSelections' warn('get_dist_dict will soon be deprecated. Use Universe.calculate_distances', DeprecationWarning) return get_dist(dictionary[a], dictionary[b], box=box)
Calculate distance using dict of AtomSelections
paratemp/coordinate_analysis.py
get_dist_dict
theavey/ParaTemp
12
python
def get_dist_dict(dictionary, a, b, box=None): warn('get_dist_dict will soon be deprecated. Use Universe.calculate_distances', DeprecationWarning) return get_dist(dictionary[a], dictionary[b], box=box)
def get_dist_dict(dictionary, a, b, box=None): warn('get_dist_dict will soon be deprecated. Use Universe.calculate_distances', DeprecationWarning) return get_dist(dictionary[a], dictionary[b], box=box)<|docstring|>Calculate distance using dict of AtomSelections<|endoftext|>
1a8c12905c43ee185b98ec1a7bc94f413b3ffdd705f71af1da4deb8d2ff26f10
def get_angle(a, b, c, units='rad'): 'Calculate the angle between ba and bc for AtomGroups a, b, c' warn('get_angle will soon be deprecated. Implement Universe.calculate_angles', DeprecationWarning) b_center = b.centroid() ba = (a.centroid() - b_center) bc = (c.centroid() - b_center) angle = np.arccos((np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)))) if (units == 'rad'): return angle elif (units == 'deg'): return np.rad2deg(angle) else: raise InputError(units, 'Unrecognized units: the two recognized units are rad and deg.')
Calculate the angle between ba and bc for AtomGroups a, b, c
paratemp/coordinate_analysis.py
get_angle
theavey/ParaTemp
12
python
def get_angle(a, b, c, units='rad'): warn('get_angle will soon be deprecated. Implement Universe.calculate_angles', DeprecationWarning) b_center = b.centroid() ba = (a.centroid() - b_center) bc = (c.centroid() - b_center) angle = np.arccos((np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)))) if (units == 'rad'): return angle elif (units == 'deg'): return np.rad2deg(angle) else: raise InputError(units, 'Unrecognized units: the two recognized units are rad and deg.')
def get_angle(a, b, c, units='rad'): warn('get_angle will soon be deprecated. Implement Universe.calculate_angles', DeprecationWarning) b_center = b.centroid() ba = (a.centroid() - b_center) bc = (c.centroid() - b_center) angle = np.arccos((np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)))) if (units == 'rad'): return angle elif (units == 'deg'): return np.rad2deg(angle) else: raise InputError(units, 'Unrecognized units: the two recognized units are rad and deg.')<|docstring|>Calculate the angle between ba and bc for AtomGroups a, b, c<|endoftext|>
abf40cfc3a172430b15a19b663ae675de65092ed0b1e70fe4630ec328364bf0a
def get_angle_dict(dictionary, a, b, c, units='rad'): 'Calculate angle using dict of AtomSelections' warn('get_angle_dict will soon be deprecated. Implement Universe.calculate_angles', DeprecationWarning) return get_angle(dictionary[a], dictionary[b], dictionary[c], units=units)
Calculate angle using dict of AtomSelections
paratemp/coordinate_analysis.py
get_angle_dict
theavey/ParaTemp
12
python
def get_angle_dict(dictionary, a, b, c, units='rad'): warn('get_angle_dict will soon be deprecated. Implement Universe.calculate_angles', DeprecationWarning) return get_angle(dictionary[a], dictionary[b], dictionary[c], units=units)
def get_angle_dict(dictionary, a, b, c, units='rad'): warn('get_angle_dict will soon be deprecated. Implement Universe.calculate_angles', DeprecationWarning) return get_angle(dictionary[a], dictionary[b], dictionary[c], units=units)<|docstring|>Calculate angle using dict of AtomSelections<|endoftext|>
84e16fdf897dc85543bf49876cd39f72fb5508f0029f8acae2fc9e7a3f15f4b4
def get_dihedral(a, b, c, d, units='rad'): 'Calculate the angle between abc and bcd for AtomGroups a,b,c,d\n\n Based on formula given in\n https://en.wikipedia.org/wiki/Dihedral_angle' warn('get_dihedral will soon be deprecated. Use Universe.calculate_dihedrals', DeprecationWarning) ba = (a.centroid() - b.centroid()) bc = (b.centroid() - c.centroid()) dc = (d.centroid() - c.centroid()) angle = np.arctan2((np.dot(np.cross(np.cross(ba, bc), np.cross(bc, dc)), bc) / np.linalg.norm(bc)), np.dot(np.cross(ba, bc), np.cross(bc, dc))) if (units == 'rad'): return angle elif (units == 'deg'): return np.rad2deg(angle) else: raise InputError(units, 'Unrecognized units: the two recognized units are rad and deg.')
Calculate the angle between abc and bcd for AtomGroups a,b,c,d Based on formula given in https://en.wikipedia.org/wiki/Dihedral_angle
paratemp/coordinate_analysis.py
get_dihedral
theavey/ParaTemp
12
python
def get_dihedral(a, b, c, d, units='rad'): 'Calculate the angle between abc and bcd for AtomGroups a,b,c,d\n\n Based on formula given in\n https://en.wikipedia.org/wiki/Dihedral_angle' warn('get_dihedral will soon be deprecated. Use Universe.calculate_dihedrals', DeprecationWarning) ba = (a.centroid() - b.centroid()) bc = (b.centroid() - c.centroid()) dc = (d.centroid() - c.centroid()) angle = np.arctan2((np.dot(np.cross(np.cross(ba, bc), np.cross(bc, dc)), bc) / np.linalg.norm(bc)), np.dot(np.cross(ba, bc), np.cross(bc, dc))) if (units == 'rad'): return angle elif (units == 'deg'): return np.rad2deg(angle) else: raise InputError(units, 'Unrecognized units: the two recognized units are rad and deg.')
def get_dihedral(a, b, c, d, units='rad'): 'Calculate the angle between abc and bcd for AtomGroups a,b,c,d\n\n Based on formula given in\n https://en.wikipedia.org/wiki/Dihedral_angle' warn('get_dihedral will soon be deprecated. Use Universe.calculate_dihedrals', DeprecationWarning) ba = (a.centroid() - b.centroid()) bc = (b.centroid() - c.centroid()) dc = (d.centroid() - c.centroid()) angle = np.arctan2((np.dot(np.cross(np.cross(ba, bc), np.cross(bc, dc)), bc) / np.linalg.norm(bc)), np.dot(np.cross(ba, bc), np.cross(bc, dc))) if (units == 'rad'): return angle elif (units == 'deg'): return np.rad2deg(angle) else: raise InputError(units, 'Unrecognized units: the two recognized units are rad and deg.')<|docstring|>Calculate the angle between abc and bcd for AtomGroups a,b,c,d Based on formula given in https://en.wikipedia.org/wiki/Dihedral_angle<|endoftext|>
1ff30a580420f6624e1f708f1951b8d84b94e6b6803462617eba89fe824a0437
def get_dihedral_dict(dictionary, a, b, c, d, units='rad'): 'Calculate dihedral using dict of AtomSelections' warn('get_dihedral_dict will soon be deprecated. Use Universe.calculate_dihedrals', DeprecationWarning) return get_dihedral(dictionary[a], dictionary[b], dictionary[c], dictionary[d], units=units)
Calculate dihedral using dict of AtomSelections
paratemp/coordinate_analysis.py
get_dihedral_dict
theavey/ParaTemp
12
python
def get_dihedral_dict(dictionary, a, b, c, d, units='rad'): warn('get_dihedral_dict will soon be deprecated. Use Universe.calculate_dihedrals', DeprecationWarning) return get_dihedral(dictionary[a], dictionary[b], dictionary[c], dictionary[d], units=units)
def get_dihedral_dict(dictionary, a, b, c, d, units='rad'): warn('get_dihedral_dict will soon be deprecated. Use Universe.calculate_dihedrals', DeprecationWarning) return get_dihedral(dictionary[a], dictionary[b], dictionary[c], dictionary[d], units=units)<|docstring|>Calculate dihedral using dict of AtomSelections<|endoftext|>