max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
skyportal/plot.py | dannygoldstein/skyportal | 0 | 12700 | import numpy as np
import pandas as pd
from bokeh.core.json_encoder import serialize_json
from bokeh.core.properties import List, String
from bokeh.document import Document
from bokeh.layouts import row, column
from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button
from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs
from bokeh.palettes import viridis
from bokeh.plotting import figure, ColumnDataSource
from bokeh.util.compiler import bundle_all_models
from bokeh.util.serialization import make_id
from matplotlib import cm
from matplotlib.colors import rgb2hex
import os
from skyportal.models import (
DBSession,
Obj,
Photometry,
Group,
Instrument,
Telescope,
PHOT_ZP,
)
import sncosmo
DETECT_THRESH = 5 # sigma
SPEC_LINES = {
'H': ([3970, 4102, 4341, 4861, 6563], '#ff0000'),
'He': ([3886, 4472, 5876, 6678, 7065], '#002157'),
'He II': ([3203, 4686], '#003b99'),
'C II': ([3919, 4267, 6580, 7234, 9234], '#570199'),
'C III': ([4650, 5696], '#a30198'),
'C IV': ([5801], '#ff0073'),
'O': ([7772, 7774, 7775, 8447, 9266], '#007236'),
'O II': ([3727], '#00a64d'),
'O III': ([4959, 5007], '#00bf59'),
'Na': ([5890, 5896, 8183, 8195], '#aba000'),
'Mg': ([2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184], '#8c6239'),
'Mg II': ([2791, 2796, 2803, 4481], '#bf874e'),
'Si II': ([3856, 5041, 5056, 5670, 6347, 6371], '#5674b9'),
'S II': ([5433, 5454, 5606, 5640, 5647, 6715], '#a38409'),
'Ca II': ([3934, 3969, 7292, 7324, 8498, 8542, 8662], '#005050'),
'Fe II': ([5018, 5169], '#f26c4f'),
'Fe III': ([4397, 4421, 4432, 5129, 5158], '#f9917b'),
}
# TODO add groups
# Galaxy lines
#
# 'H': '4341, 4861, 6563;
# 'N II': '6548, 6583;
# 'O I': '6300;'
# 'O II': '3727;
# 'O III': '4959, 5007;
# 'Mg II': '2798;
# 'S II': '6717, 6731'
# 'H': '3970, 4102, 4341, 4861, 6563'
# 'Na': '5890, 5896, 8183, 8195'
# 'He': '3886, 4472, 5876, 6678, 7065'
# 'Mg': '2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184'
# 'He II': '3203, 4686'
# 'Mg II': '2791, 2796, 2803, 4481'
# 'O': '7772, 7774, 7775, 8447, 9266'
# 'Si II': '3856, 5041, 5056, 5670 6347, 6371'
# 'O II': '3727'
# 'Ca II': '3934, 3969, 7292, 7324, 8498, 8542, 8662'
# 'O III': '4959, 5007'
# 'Fe II': '5018, 5169'
# 'S II': '5433, 5454, 5606, 5640, 5647, 6715'
# 'Fe III': '4397, 4421, 4432, 5129, 5158'
#
# Other
#
# 'Tel: 6867-6884, 7594-7621'
# 'Tel': '#b7b7b7',
# 'H: 4341, 4861, 6563;
# 'N II': 6548, 6583;
# 'O I': 6300;
# 'O II': 3727;
# 'O III': 4959, 5007;
# 'Mg II': 2798;
# 'S II': 6717, 6731'
class CheckboxWithLegendGroup(CheckboxGroup):
colors = List(String, help="List of legend colors")
__implementation__ = """
import {empty, input, label, div} from "core/dom"
import * as p from "core/properties"
import {CheckboxGroup, CheckboxGroupView} from "models/widgets/checkbox_group"
export class CheckboxWithLegendGroupView extends CheckboxGroupView
render: () ->
super()
empty(@el)
active = @model.active
colors = @model.colors
for text, i in @model.labels
inputEl = input({type: "checkbox", value: "#{i}"})
inputEl.addEventListener("change", () => @change_input())
if @model.disabled then inputEl.disabled = true
if i in active then inputEl.checked = true
attrs = {
style: "border-left: 12px solid #{colors[i]}; padding-left: 0.3em;"
}
labelEl = label(attrs, inputEl, text)
if @model.inline
labelEl.classList.add("bk-bs-checkbox-inline")
@el.appendChild(labelEl)
else
divEl = div({class: "bk-bs-checkbox"}, labelEl)
@el.appendChild(divEl)
return @
export class CheckboxWithLegendGroup extends CheckboxGroup
type: "CheckboxWithLegendGroup"
default_view: CheckboxWithLegendGroupView
@define {
colors: [ p.Array, [] ]
}
"""
# TODO replace with (script, div) method
def _plot_to_json(plot):
"""Convert plot to JSON objects necessary for rendering with `bokehJS`.
Parameters
----------
plot : bokeh.plotting.figure.Figure
Bokeh plot object to be rendered.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
render_items = [{'docid': plot._id, 'elementid': make_id()}]
doc = Document()
doc.add_root(plot)
docs_json_inner = doc.to_json()
docs_json = {render_items[0]['docid']: docs_json_inner}
docs_json = serialize_json(docs_json)
render_items = serialize_json(render_items)
custom_model_js = bundle_all_models()
return docs_json, render_items, custom_model_js
tooltip_format = [
('mjd', '@mjd{0.000000}'),
('flux', '@flux'),
('filter', '@filter'),
('fluxerr', '@fluxerr'),
('mag', '@mag'),
('magerr', '@magerr'),
('lim_mag', '@lim_mag'),
('instrument', '@instrument'),
('stacked', '@stacked'),
]
cmap = cm.get_cmap('jet_r')
def get_color(bandpass_name, cmap_limits=(3000.0, 10000.0)):
if bandpass_name.startswith('ztf'):
return {'ztfg': 'green', 'ztfi': 'orange', 'ztfr': 'red'}[bandpass_name]
else:
bandpass = sncosmo.get_bandpass(bandpass_name)
wave = bandpass.wave_eff
rgb = cmap((cmap_limits[1] - wave) / (cmap_limits[1] - cmap_limits[0]))[:3]
bandcolor = rgb2hex(rgb)
return bandcolor
# TODO make async so that thread isn't blocked
def photometry_plot(obj_id, user, width=600, height=300):
"""Create scatter plot of photometry for object.
Parameters
----------
obj_id : str
ID of Obj to be plotted.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
data = pd.read_sql(
DBSession()
.query(
Photometry,
Telescope.nickname.label("telescope"),
Instrument.name.label("instrument"),
)
.join(Instrument, Instrument.id == Photometry.instrument_id)
.join(Telescope, Telescope.id == Instrument.telescope_id)
.filter(Photometry.obj_id == obj_id)
.filter(
Photometry.groups.any(Group.id.in_([g.id for g in user.accessible_groups]))
)
.statement,
DBSession().bind,
)
if data.empty:
return None, None, None
data['color'] = [get_color(f) for f in data['filter']]
data['label'] = [
f'{i} {f}-band' for i, f in zip(data['instrument'], data['filter'])
]
data['zp'] = PHOT_ZP
data['magsys'] = 'ab'
data['alpha'] = 1.0
data['lim_mag'] = -2.5 * np.log10(data['fluxerr'] * DETECT_THRESH) + data['zp']
# Passing a dictionary to a bokeh datasource causes the frontend to die,
# deleting the dictionary column fixes that
del data['original_user_data']
# keep track of things that are only upper limits
data['hasflux'] = ~data['flux'].isna()
# calculate the magnitudes - a photometry point is considered "significant"
# or "detected" (and thus can be represented by a magnitude) if its snr
# is above DETECT_THRESH
obsind = data['hasflux'] & (
data['flux'].fillna(0.0) / data['fluxerr'] >= DETECT_THRESH
)
data.loc[~obsind, 'mag'] = None
data.loc[obsind, 'mag'] = -2.5 * np.log10(data[obsind]['flux']) + PHOT_ZP
# calculate the magnitude errors using standard error propagation formulae
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae
data.loc[~obsind, 'magerr'] = None
coeff = 2.5 / np.log(10)
magerrs = np.abs(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])
data.loc[obsind, 'magerr'] = magerrs
data['obs'] = obsind
data['stacked'] = False
split = data.groupby('label', sort=False)
finite = np.isfinite(data['flux'])
fdata = data[finite]
lower = np.min(fdata['flux']) * 0.95
upper = np.max(fdata['flux']) * 1.05
plot = figure(
plot_width=width,
plot_height=height,
active_drag='box_zoom',
tools='box_zoom,wheel_zoom,pan,reset,save',
y_range=(lower, upper),
)
imhover = HoverTool(tooltips=tooltip_format)
plot.add_tools(imhover)
model_dict = {}
for i, (label, sdf) in enumerate(split):
# for the flux plot, we only show things that have a flux value
df = sdf[sdf['hasflux']]
key = f'obs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='flux',
color='color',
marker='circle',
fill_color='color',
alpha='alpha',
source=ColumnDataSource(df),
)
imhover.renderers.append(model_dict[key])
key = f'bin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='flux',
color='color',
marker='circle',
fill_color='color',
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
stacked=[],
instrument=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = 'obserr' + str(i)
y_err_x = []
y_err_y = []
for d, ro in df.iterrows():
px = ro['mjd']
py = ro['flux']
err = ro['fluxerr']
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
alpha='alpha',
source=ColumnDataSource(
data=dict(
xs=y_err_x, ys=y_err_y, color=df['color'], alpha=[1.0] * len(df)
)
),
)
key = f'binerr{i}'
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
)
plot.xaxis.axis_label = 'MJD'
plot.yaxis.axis_label = 'Flux (μJy)'
plot.toolbar.logo = None
toggle = CheckboxWithLegendGroup(
labels=list(data.label.unique()),
active=list(range(len(data.label.unique()))),
colors=list(data.color.unique()),
)
# TODO replace `eval` with Namespaces
# https://github.com/bokeh/bokeh/pull/6340
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglef.js')
).read(),
)
slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')
callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackf.js')
)
.read()
.replace('default_zp', str(PHOT_ZP))
.replace('detect_thresh', str(DETECT_THRESH)),
)
slider.js_on_change('value', callback)
# Mark the first and last detections
detection_dates = data[data['hasflux']]['mjd']
if len(detection_dates) > 0:
first = round(detection_dates.min(), 6)
last = round(detection_dates.max(), 6)
first_color = "#34b4eb"
last_color = "#8992f5"
midpoint = (upper + lower) / 2
line_top = 5 * upper - 4 * midpoint
line_bottom = 5 * lower - 4 * midpoint
first_x = np.full(5000, first)
last_x = np.full(5000, last)
y = np.linspace(line_bottom, line_top, num=5000)
first_r = plot.line(
x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2,
)
plot.add_tools(
HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],)
)
last_r = plot.line(
x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2
)
plot.add_tools(
HoverTool(tooltips=[("Last detection", f'{last}')], renderers=[last_r],)
)
layout = row(plot, toggle)
layout = column(slider, layout)
p1 = Panel(child=layout, title='Flux')
# now make the mag light curve
ymax = np.nanmax(data['mag']) + 0.1
ymin = np.nanmin(data['mag']) - 0.1
plot = figure(
plot_width=width,
plot_height=height,
active_drag='box_zoom',
tools='box_zoom,wheel_zoom,pan,reset,save',
y_range=(ymax, ymin),
toolbar_location='above',
)
# Mark the first and last detections again
if len(detection_dates) > 0:
midpoint = (ymax + ymin) / 2
line_top = 5 * ymax - 4 * midpoint
line_bottom = 5 * ymin - 4 * midpoint
y = np.linspace(line_bottom, line_top, num=5000)
first_r = plot.line(
x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2,
)
plot.add_tools(
HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],)
)
last_r = plot.line(
x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2
)
plot.add_tools(
HoverTool(
tooltips=[("Last detection", f'{last}')],
renderers=[last_r],
point_policy='follow_mouse',
)
)
imhover = HoverTool(tooltips=tooltip_format)
plot.add_tools(imhover)
model_dict = {}
for i, (label, df) in enumerate(split):
key = f'obs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='mag',
color='color',
marker='circle',
fill_color='color',
alpha='alpha',
source=ColumnDataSource(df[df['obs']]),
)
imhover.renderers.append(model_dict[key])
unobs_source = df[~df['obs']].copy()
unobs_source.loc[:, 'alpha'] = 0.8
key = f'unobs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='lim_mag',
color='color',
marker='inverted_triangle',
fill_color='white',
line_color='color',
alpha='alpha',
source=ColumnDataSource(unobs_source),
)
imhover.renderers.append(model_dict[key])
key = f'bin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='mag',
color='color',
marker='circle',
fill_color='color',
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
instrument=[],
stacked=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = 'obserr' + str(i)
y_err_x = []
y_err_y = []
for d, ro in df[df['obs']].iterrows():
px = ro['mjd']
py = ro['mag']
err = ro['magerr']
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
alpha='alpha',
source=ColumnDataSource(
data=dict(
xs=y_err_x,
ys=y_err_y,
color=df[df['obs']]['color'],
alpha=[1.0] * len(df[df['obs']]),
)
),
)
key = f'binerr{i}'
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
)
key = f'unobsbin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='lim_mag',
color='color',
marker='inverted_triangle',
fill_color='white',
line_color='color',
alpha=0.8,
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
instrument=[],
stacked=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = f'all{i}'
model_dict[key] = ColumnDataSource(df)
key = f'bold{i}'
model_dict[key] = ColumnDataSource(
df[
[
'mjd',
'flux',
'fluxerr',
'mag',
'magerr',
'filter',
'zp',
'magsys',
'lim_mag',
'stacked',
]
]
)
plot.xaxis.axis_label = 'MJD'
plot.yaxis.axis_label = 'AB mag'
plot.toolbar.logo = None
toggle = CheckboxWithLegendGroup(
labels=list(data.label.unique()),
active=list(range(len(data.label.unique()))),
colors=list(data.color.unique()),
)
# TODO replace `eval` with Namespaces
# https://github.com/bokeh/bokeh/pull/6340
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglem.js')
).read(),
)
slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')
button = Button(label="Export Bold Light Curve to CSV")
button.callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(
os.path.dirname(__file__), '../static/js/plotjs', "download.js"
)
)
.read()
.replace('objname', obj_id)
.replace('default_zp', str(PHOT_ZP)),
)
toplay = row(slider, button)
callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackm.js')
)
.read()
.replace('default_zp', str(PHOT_ZP))
.replace('detect_thresh', str(DETECT_THRESH)),
)
slider.js_on_change('value', callback)
layout = row(plot, toggle)
layout = column(toplay, layout)
p2 = Panel(child=layout, title='Mag')
tabs = Tabs(tabs=[p2, p1])
return _plot_to_json(tabs)
# TODO make async so that thread isn't blocked
def spectroscopy_plot(obj_id, spec_id=None):
"""TODO normalization? should this be handled at data ingestion or plot-time?"""
obj = Obj.query.get(obj_id)
spectra = Obj.query.get(obj_id).spectra
if spec_id is not None:
spectra = [spec for spec in spectra if spec.id == int(spec_id)]
if len(spectra) == 0:
return None, None, None
color_map = dict(zip([s.id for s in spectra], viridis(len(spectra))))
data = pd.concat(
[
pd.DataFrame(
{
'wavelength': s.wavelengths,
'flux': s.fluxes,
'id': s.id,
'instrument': s.instrument.telescope.nickname,
}
)
for i, s in enumerate(spectra)
]
)
split = data.groupby('id')
hover = HoverTool(
tooltips=[('wavelength', '$x'), ('flux', '$y'), ('instrument', '@instrument')]
)
plot = figure(
plot_width=600,
plot_height=300,
sizing_mode='scale_both',
tools='box_zoom,wheel_zoom,pan,reset',
active_drag='box_zoom',
)
plot.add_tools(hover)
model_dict = {}
for i, (key, df) in enumerate(split):
model_dict['s' + str(i)] = plot.line(
x='wavelength', y='flux', color=color_map[key], source=ColumnDataSource(df)
)
plot.xaxis.axis_label = 'Wavelength (Å)'
plot.yaxis.axis_label = 'Flux'
plot.toolbar.logo = None
# TODO how to choose a good default?
plot.y_range = Range1d(0, 1.03 * data.flux.max())
toggle = CheckboxWithLegendGroup(
labels=[s.instrument.telescope.nickname for s in spectra],
active=list(range(len(spectra))),
width=100,
colors=[color_map[k] for k, df in split],
)
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code="""
for (let i = 0; i < toggle.labels.length; i++) {
eval("s" + i).visible = (toggle.active.includes(i))
}
""",
)
elements = CheckboxWithLegendGroup(
labels=list(SPEC_LINES.keys()),
active=[],
width=80,
colors=[c for w, c in SPEC_LINES.values()],
)
z = TextInput(value=str(obj.redshift), title="z:")
v_exp = TextInput(value='0', title="v_exp:")
for i, (wavelengths, color) in enumerate(SPEC_LINES.values()):
el_data = pd.DataFrame({'wavelength': wavelengths})
el_data['x'] = el_data['wavelength'] * (1 + obj.redshift)
model_dict[f'el{i}'] = plot.segment(
x0='x',
x1='x',
# TODO change limits
y0=0,
y1=1e-13,
color=color,
source=ColumnDataSource(el_data),
)
model_dict[f'el{i}'].visible = False
# TODO callback policy: don't require submit for text changes?
elements.callback = CustomJS(
args={'elements': elements, 'z': z, 'v_exp': v_exp, **model_dict},
code="""
let c = 299792.458; // speed of light in km / s
for (let i = 0; i < elements.labels.length; i++) {
let el = eval("el" + i);
el.visible = (elements.active.includes(i))
el.data_source.data.x = el.data_source.data.wavelength.map(
x_i => (x_i * (1 + parseFloat(z.value)) /
(1 + parseFloat(v_exp.value) / c))
);
el.data_source.change.emit();
}
""",
)
z.callback = elements.callback
v_exp.callback = elements.callback
layout = row(plot, toggle, elements, column(z, v_exp))
return _plot_to_json(layout)
| 1.734375 | 2 |
lib/session.py | Hiteshsuhas/err-stackstorm | 15 | 12701 | # coding:utf-8
import uuid
import string
import hashlib
import logging
from lib.errors import SessionExpiredError, SessionConsumedError
from datetime import datetime as dt
from random import SystemRandom
LOG = logging.getLogger("errbot.plugin.st2.session")
def generate_password(length=8):
rnd = SystemRandom()
if length > 255:
length = 255
return "".join([rnd.choice(string.hexdigits) for _ in range(length)])
class Session(object):
def __init__(self, user_id, user_secret, session_ttl=3600):
self.bot_secret = None
self.user_id = user_id
self._is_sealed = True
self.session_id = uuid.uuid4()
self.create_date = int(dt.now().timestamp())
self.modified_date = self.create_date
self.ttl_in_seconds = session_ttl
self._hashed_secret = self.hash_secret(user_secret)
del user_secret
def is_expired(self):
"""
Returns False if both create and modified timestamps have exceeded the ttl.
"""
now = int(dt.now().timestamp())
modified_expiry = self.modified_date + self.ttl_in_seconds
if modified_expiry < now:
raise SessionExpiredError
return False
def attributes(self):
return {
"UserID": self.user_id,
"IsSealed": self._is_sealed,
"SessionID": self.session_id,
"CreationDate": str(dt.fromtimestamp(self.create_date)),
"ModifiedDate": str(dt.fromtimestamp(self.modified_date)),
"ExpiryDate": str(dt.fromtimestamp(self.modified_date + self.ttl_in_seconds)),
}
def __repr__(self):
return " ".join(
[
"UserID: {},".format(str(self.user_id)),
"Is Sealed: {},".format(str(self._is_sealed)),
"SessionID: {},".format(str(self.session_id)),
"Creation Date: {},".format(str(dt.fromtimestamp(self.create_date))),
"Modified Date: {},".format(str(dt.fromtimestamp(self.modified_date))),
"Expiry Date: {}".format(
str(dt.fromtimestamp(self.modified_date + self.ttl_in_seconds))
),
]
)
def unseal(self):
"""
Mark the session as being consumed. Returns true if the session was available to be
consumed or raises SessionConsumedError if the session has already been marked as consumed.
"""
self.is_expired()
if self._is_sealed:
self._is_sealed = False
else:
raise SessionConsumedError
return True
def is_sealed(self):
"""
Query the state of the one time use flag.
Returns True if the session has not been consumed or False if the session has already been
consumed.
"""
self.is_expired()
return self._is_sealed
def id(self):
"""
Return the UUID for the session.
"""
return str(self.session_id)
def ttl(self, ttl=None):
"""
Get/Set the time to live for the session.
param: ttl[int] The number of seconds the session should remain valid since creation or
modification.
Returns the number of seconds the ttl has been set to if no agrument is provided otherwise
the ttl is set to the number of seconds provided to the ttl argument.
"""
self.is_expired()
if ttl is None:
return self.ttl_in_seconds
if isinstance(ttl, int):
self.ttl_in_seconds = ttl
self.modified_date = int(dt.now().timestamp())
else:
LOG.warning("session ttl must be an integer type, got '{}'".format(ttl))
def hash_secret(self, user_secret):
"""
Generate a unique token by hashing a random bot secret with the user secrets.
param: user_secret[string] - The users secret provided in the chat backend.
"""
self.is_expired()
if self.bot_secret is None:
self.bot_secret = generate_password(8)
h = hashlib.sha256()
h.update(bytes(user_secret, "utf-8"))
del user_secret
h.update(bytes(self.bot_secret, "utf-8"))
return h.hexdigest()
def match_secret(self, user_secret):
"""
Compare a secret with the session's hashed secret.
param: user_secret[string] the secret to compare.
Return True if the user_secret hash has matches the session hash or False if it does not.
"""
self.is_expired()
return self._hashed_secret == self.hash_secret(user_secret)
| 2.5 | 2 |
junopy/entities/bill.py | robertons/junopy | 3 | 12702 | <filename>junopy/entities/bill.py
# -*- coding: utf-8 -*-
from .lib import *
class Bill(JunoEntity):
def __init__(cls, **kw):
cls.__route__ = '/bill-payments'
cls.__metadata__ = {}
# FIELDS
cls.id = String(max=80)
cls.digitalAccountId = String(max=100)
cls.billType = ObjList(context=cls, key='status', name='str')
cls.numericalBarCode = String(max=100)
cls.paymentDescription = String(max=100)
cls.beneficiaryDocument = String(max=100)
cls.dueDate = DateTime(format="%Y-%m-%d")
cls.paymentDate = DateTime(format="%Y-%m-%d")
cls.billAmount = Float()
cls.paidAmount =Float()
cls.createdOn = DateTime(format="iso")
cls.status = ObjList(context=cls, key='status', name='str')
super().__init__(**kw)
| 2.171875 | 2 |
accounts/signals.py | julesc00/challenge | 0 | 12703 | <gh_stars>0
from django.db.models.signals import post_save
from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.dispatch import receiver
from .models import Usuario, LoginLog
def user_profile(sender, instance, created, **kwargs):
if created:
group = Group.objects.get(name="usuarios")
instance.groups.add(group)
Usuario.objects.create(
user=instance,
name=instance.username
)
print("Profile created")
post_save.connect(user_profile, sender=User)
@receiver(user_logged_in)
def log_user_login(sender, request, user, **kwargs):
print(f"User {user.username} logged in on {user.last_login}")
log = user.last_login
LoginLog.objects.create(
login_log=log
)
| 2.203125 | 2 |
Python Scripting/Python - POC-3/DvdApp.py | vaibhavkrishna-bhosle/Trendnxt-Projects | 0 | 12704 | <reponame>vaibhavkrishna-bhosle/Trendnxt-Projects<filename>Python Scripting/Python - POC-3/DvdApp.py
import mysql.connector
from mysql.connector.errors import ProgrammingError
from mysql.connector import Error
from DvdOperations import DvdStore
database = "db4"
def CreateDatabase(database):
mydb = mysql.connector.connect(
host="localhost",
user="Vaibhav",
passwd="<PASSWORD>",
)
mycursor = mydb.cursor()
mycursor.execute("CREATE DATABASE "+database)
mydb.close()
print("Database is created ")
Function1()
def Function1():
try:
mydb1 = mysql.connector.connect(
host="localhost",
user="Vaibhav",
passwd="<PASSWORD>",
database=database
)
except mysql.connector.errors.ProgrammingError as error1:
print("error occurred because : {}".format(error1))
CreateDatabase(database=database)
except mysql.connector.Error as error2:
print("error occured because : {}".format(error2))
exit
else:
mycursor = mydb1.cursor()
s1 = "CREATE TABLE IF NOT EXISTS DVDSTORE (id INT AUTO_INCREMENT PRIMARY KEY, title VARCHAR(255), star_name VARCHAR(255), year_of_release INT, genre VARCHAR(255))"
mycursor.execute(s1)
mydb1.commit()
def Function2():
Function1()
print("\nWELCOME TO DVD STORE ")
print("1. Add a DVD\n2. Search\n3. Modify a DVD\n4. Delete a DVD\n5. Exit")
ch = int(input("Enter your choice : "))
if ch == 1 :
DvdStore.AddDvd()
Function2()
elif ch ==2 :
DvdStore.SearchDvd()
Function2()
elif ch == 3:
DvdStore.ModifyDvd()
Function2()
elif ch == 4:
DvdStore.DeleteDvd()
Function2()
elif ch == 5:
print("\nThank You !!! Visit Again")
else:
print("\nInvalid Choice !!! Enter Choice Again\n")
Function2()
def PrintTable():
mydb1 = mysql.connector.connect(
host="localhost",
user="Vaibhav",
passwd="<PASSWORD>",
database=database
)
mycursor = mydb1.cursor()
mycursor.execute("SELECT * FROM DVDSTORE")
myresult = mycursor.fetchall()
for i in myresult:
print(i)
Function2() | 3.078125 | 3 |
cloudshell/rest/api.py | QualiSystems/cloudshell-rest-api | 1 | 12705 | <reponame>QualiSystems/cloudshell-rest-api
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import json
try:
import urllib2
except:
import urllib.request as urllib2
from requests import delete, get, post, put
from cloudshell.rest.exceptions import ShellNotFoundException, FeatureUnavailable
class PackagingRestApiClient(object):
def __init__(self, ip, port, username, password, domain):
"""
Logs into CloudShell using REST API
:param ip: CloudShell server IP or host name
:param port: port, usually 9000
:param username: CloudShell username
:param password: <PASSWORD>
:param domain: CloudShell domain, usually Global
"""
self.ip = ip
self.port = port
opener = urllib2.build_opener(urllib2.HTTPHandler)
url = "http://{0}:{1}/API/Auth/Login".format(ip, port)
data = "username={0}&password={1}&domain={2}" \
.format(username, PackagingRestApiClient._urlencode(password), domain).encode()
request = urllib2.Request(url=url, data=data)
request.add_header("Content-Type", "application/x-www-form-urlencoded")
backup = request.get_method
request.get_method = lambda: "PUT"
url = opener.open(request)
self.token = url.read()
if isinstance(self.token, bytes):
self.token = self.token.decode()
self.token = self.token.strip("\"")
request.get_method = backup
def add_shell(self, shell_path):
"""
Adds a new Shell Entity to CloudShell
If the shell exists, exception will be thrown
:param shell_path:
:return:
"""
url = "http://{0}:{1}/API/Shells".format(self.ip, self.port)
response = post(url,
files={os.path.basename(shell_path): open(shell_path, "rb")},
headers={"Authorization": "Basic " + self.token})
if response.status_code != 201:
raise Exception(response.text)
def update_shell(self, shell_path, shell_name=None):
"""
Updates an existing Shell Entity in CloudShell
:param shell_path: The path to the shell file
:param shell_name: The shell name. if not supplied the shell name is derived from the shell path
:return:
"""
filename = os.path.basename(shell_path)
shell_name = shell_name or self._get_shell_name_from_filename(filename)
url = "http://{0}:{1}/API/Shells/{2}".format(self.ip, self.port, shell_name)
response = put(url,
files={filename: open(shell_path, "rb")},
headers={"Authorization": "Basic " + self.token})
if response.status_code == 404: # Not Found
raise ShellNotFoundException()
if response.status_code != 200: # Ok
raise Exception(response.text)
def get_installed_standards(self):
"""
Gets all standards installed on CloudShell
:return:
"""
url = "http://{0}:{1}/API/Standards".format(self.ip, self.port)
response = get(url,
headers={"Authorization": "Basic " + self.token})
if response.status_code == 404: # Feature unavailable (probably due to cloudshell version below 8.1)
raise FeatureUnavailable()
if response.status_code != 200: # Ok
raise Exception(response.text)
return response.json()
def get_shell(self, shell_name):
url = "http://{0}:{1}/API/Shells/{2}".format(self.ip, self.port, shell_name)
response = get(url,
headers={"Authorization": "Basic " + self.token})
if response.status_code == 404 or response.status_code == 405: # Feature unavailable (probably due to cloudshell version below 8.2)
raise FeatureUnavailable()
if response.status_code == 400: # means shell not found
raise ShellNotFoundException()
if response.status_code != 200:
raise Exception(response.text)
return response.json()
def delete_shell(self, shell_name):
url = "http://{0}:{1}/API/Shells/{2}".format(self.ip, self.port, shell_name)
response = delete(url,
headers={"Authorization": "Basic " + self.token})
if response.status_code == 404 or response.status_code == 405: # Feature unavailable (probably due to cloudshell version below 9.2)
raise FeatureUnavailable()
if response.status_code == 400: # means shell not found
raise ShellNotFoundException()
if response.status_code != 200:
raise Exception(response.text)
def export_package(self, topologies):
"""Export a package with the topologies from the CloudShell
:type topologies: list[str]
:rtype: str
:return: package content
"""
url = "http://{0.ip}:{0.port}/API/Package/ExportPackage".format(self)
response = post(
url,
headers={"Authorization": "Basic " + self.token,
"Content-type": "application/json"},
json={"TopologyNames": topologies},
)
if response.status_code in (404, 405):
raise FeatureUnavailable()
if not response.ok:
raise Exception(response.text)
return response.content
def import_package(self, package_path):
"""Import the package to the CloudShell
:type package_path: str
"""
url = "http://{0.ip}:{0.port}/API/Package/ImportPackage".format(self)
with open(package_path, "rb") as fo:
response = post(
url,
headers={"Authorization": "Basic " + self.token},
files={"file": fo},
)
if response.status_code in (404, 405):
raise FeatureUnavailable()
if not response.ok:
raise Exception(response.text)
if not response.json().get("Success"):
error_msg = response.json().get("ErrorMessage", "Problem with importing the package")
raise Exception(error_msg)
@staticmethod
def _urlencode(s):
return s.replace("+", "%2B").replace("/", "%2F").replace("=", "%3D")
@staticmethod
def _get_shell_name_from_filename(filename):
return os.path.splitext(filename)[0]
def upload_environment_zip_file(self, zipfilename):
with open(zipfilename, "rb") as g:
zipdata = g.read()
self.upload_environment_zip_data(zipdata)
def upload_environment_zip_data(self, zipdata):
boundary = b'''------------------------652c70c071862fc2'''
fd = b'''--''' + boundary + \
b'''\r\nContent-Disposition: form-data; name="file"; filename="my_zip.zip"''' + \
b'''\r\nContent-Type: application/octet-stream\r\n\r\n''' + zipdata + \
b'''\r\n--''' + boundary + b'''--\r\n'''
class FakeReader(object):
def __init__(self, k):
self.k = k
self.offset = 0
def read(self, blocksize):
if self.offset >= len(self.k):
return None
if self.offset + blocksize >= len(self.k):
rv = self.k[self.offset:]
self.offset = len(self.k)
else:
rv = self.k[self.offset:self.offset+blocksize]
self.offset += blocksize
return rv
fdreader = FakeReader(fd)
request = urllib2.Request("http://{}:{}/API/Package/ImportPackage".format(self.ip, str(self.port)),
data=fdreader)
backup = request.get_method
request.get_method = lambda: "POST"
request.add_header("Authorization", "Basic " + self.token)
request.add_header("Content-Type", "multipart/form-data; boundary=" + boundary)
request.add_header("Accept", "*/*")
request.add_header("Content-Length", str(len(fd)))
request.get_method = backup
opener = urllib2.build_opener(urllib2.HTTPHandler)
url = opener.open(request)
try:
s = url.read()
if isinstance(s, bytes):
s = s.decode()
o = json.loads(s)
if "Success" not in o:
raise Exception("'Success' value not found in Quali API response: " + str(o))
except Exception as ue:
raise Exception("Error extracting Quali API zip import result: " + str(ue))
if not o["Success"]:
raise Exception("Error uploading Quali API zip package: "+o["ErrorMessage"])
| 2.59375 | 3 |
data/scripts/classes/team_row.py | matt-waite/lol-reference | 1 | 12706 | <reponame>matt-waite/lol-reference
from classes import oracles_headers
class TeamRow:
COLUMNS = oracles_headers.oracles_columns
def __init__(self, row):
self.ROW = row
def GetCell(self, name):
return self.ROW[self.COLUMNS[name]]
def GetDatabaseObject(self):
game = {
"gameId": self.GameId(),
"isComplete": self.IsComplete(),
"league": self.League(),
"year": self.Year(),
"split": self.Split(),
"date": self.Date(),
"patch": self.Patch(),
"side": self.Side(),
"team": self.Team(),
"bans": self.Bans(),
"gameLength": self.GameLength(),
"result": self.Result(),
"kills": self.Kills(),
"deaths": self.Deaths(),
"assists": self.Assists()
}
return game
def GameId(self):
return self.GetCell('GameId')
def IsComplete(self):
return self.GetCell('IsComplete')
def League(self):
return self.GetCell('League')
def Year(self):
return int(self.GetCell('Year'))
def Split(self):
return self.GetCell('Split')
def Date(self):
return self.GetCell('Date')
def Patch(self):
return self.GetCell('Patch')
def Side(self):
return self.GetCell('Side')
def Team(self):
return self.GetCell('Team')
def Bans(self):
return [self.GetCell(f"Ban{i}") for i in range(1, 6)]
def GameLength(self):
return self.GetCell('GameLength')
def Result(self):
return False if self.GetCell('Result') == "0" else True
def Kills(self):
return int(self.GetCell('Kills'))
def Deaths(self):
return int(self.GetCell('Deaths'))
def Assists(self):
return int(self.GetCell('Assists'))
| 3.1875 | 3 |
startup/97-standard-plans.py | MikeHart85/SIX_profile_collection | 0 | 12707 | def pol_V(offset=None):
yield from mv(m1_simple_fbk,0)
cur_mono_e = pgm.en.user_readback.value
yield from mv(epu1.table,6) # 4 = 3rd harmonic; 6 = "testing V" 1st harmonic
if offset is not None:
yield from mv(epu1.offset,offset)
yield from mv(epu1.phase,28.5)
yield from mv(pgm.en,cur_mono_e+1) #TODO this is dirty trick. figure out how to process epu.table.input
yield from mv(pgm.en,cur_mono_e)
yield from mv(m1_simple_fbk,1)
print('\nFinished moving the polarization to vertical.\n\tNote that the offset for epu calibration is {}eV.\n\n'.format(offset))
def pol_H(offset=None):
yield from mv(m1_simple_fbk,0)
cur_mono_e = pgm.en.user_readback.value
yield from mv(epu1.table,5) # 2 = 3rd harmonic; 5 = "testing H" 1st harmonic
if offset is not None:
yield from mv(epu1.offset,offset)
yield from mv(epu1.phase,0)
yield from mv(pgm.en,cur_mono_e+1) #TODO this is dirty trick. figure out how to process epu.table.input
yield from mv(pgm.en,cur_mono_e)
yield from mv(m1_simple_fbk,1)
print('\nFinished moving the polarization to horizontal.\n\tNote that the offset for epu calibration is {}eV.\n\n'.format(offset))
def m3_check():
yield from mv(m3_simple_fbk,0)
sclr_enable()
if pzshutter.value == 0:
print('Piezo Shutter is disabled')
flag = 0
if pzshutter.value == 2:
print('Piezo Shutter is enabled: going to be disabled')
yield from pzshutter_disable()
flag = 1
temp_extslt_vg=extslt.vg.user_readback.value
temp_extslt_hg=extslt.hg.user_readback.value
temp_gcdiag = gcdiag.y.user_readback.value
#yield from mv(qem07.averaging_time, 1)
yield from mv(sclr.preset_time, 1)
yield from mv(extslt.hg,10)
yield from mv(extslt.vg,30)
#yield from gcdiag.grid # RE-COMMENT THIS LINE 5/7/2019
#yield from rel_scan([qem07],m3.pit,-0.0005,0.0005,31, md = {'reason':'checking m3 before cff'})
yield from rel_scan([sclr],m3.pit,-0.0005,0.0005,31, md = {'reason':'checking m3'})
#yield from mv(m3.pit,peaks['cen']['gc_diag_grid'])
yield from mv(m3.pit,peaks['cen']['sclr_channels_chan8'])
#yield from mv(m3.pit,peaks['cen']['sclr_channels_chan2'])
yield from mv(extslt.hg,temp_extslt_hg)
yield from mv(extslt.vg,temp_extslt_vg)
yield from mv(gcdiag.y,temp_gcdiag)
yield from sleep(20)
#yield from mv(m1_fbk_sp,extslt_cam.stats1.centroid.x.value)
yield from mv(m3_simple_fbk_target,extslt_cam.stats1.centroid.x.value)#m3_simple_fbk_cen.value)
yield from mv(m3_simple_fbk,1)
if flag == 0:
print('Piezo Shutter remains disabled')
if flag == 1:
print('Piezo Shutter is going to renabled')
yield from pzshutter_enable()
def m1_align_fine2():
m1x_init=m1.x.user_readback.value
m1pit_init=m1.pit.user_readback.value
m1pit_step=50
m1pit_start=m1pit_init-1*m1pit_step
for i in range(0,5):
yield from mv(m1.pit,m1pit_start+i*m1pit_step)
yield from scan([qem05],m1.x,-3,3.8,35)
yield from mv(m1.pit,m1pit_init)
yield from mv(m1.x,m1x_init)
def alignM3x():
# get the exit slit positions to return to at the end
vg_init = extslt.vg.user_setpoint.value
hg_init = extslt.hg.user_setpoint.value
hc_init = extslt.hc.user_setpoint.value
print('Saving exit slit positions for later')
# get things out of the way
yield from m3diag.out
# read gas cell diode
yield from gcdiag.grid
# set detector e.g. gas cell diagnostics qem
detList=[qem07] #[sclr]
# set V exit slit value to get enough signal
yield from mv(extslt.vg, 30)
# open H slit full open
yield from mv(extslt.hg, 9000)
#move extslt.hs appropriately and scan m3.x
yield from mv(extslt.hc,-9)
yield from relative_scan(detList,m3.x,-6,6,61)
yield from mv(extslt.hc,-3)
yield from relative_scan(detList,m3.x,-6,6,61)
yield from mv(extslt.hc,3)
yield from relative_scan(detList,m3.x,-6,6,61)
print('Returning exit slit positions to the inital values')
yield from mv(extslt.hc,hc_init)
yield from mv(extslt.vg, vg_init, extslt.hg, hg_init)
def beamline_align():
yield from mv(m1_fbk,0)
yield from align.m1pit
yield from sleep(5)
yield from m3_check()
#yield from mv(m1_fbk_cam_time,0.002)
#yield from mv(m1_fbk_th,1500)
yield from sleep(5)
yield from mv(m1_fbk_sp,extslt_cam.stats1.centroid.x.value)
yield from mv(m1_fbk,1)
def beamline_align_v2():
yield from mv(m1_simple_fbk,0)
yield from mv(m3_simple_fbk,0)
yield from mv(m1_fbk,0)
yield from align.m1pit
yield from sleep(5)
yield from mv(m1_simple_fbk_target_ratio,m1_simple_fbk_ratio.value)
yield from mv(m1_simple_fbk,1)
yield from sleep(5)
yield from m3_check()
def xas(dets,motor,start_en,stop_en,num_points,sec_per_point):
sclr_enable()
sclr_set_time=sclr.preset_time.value
if pzshutter.value == 0:
print('Piezo Shutter is disabled')
flag = 0
if pzshutter.value == 2:
print('Piezo Shutter is enabled: going to be disabled')
yield from pzshutter_disable()
flag = 1
yield from mv(sclr.preset_time,sec_per_point)
yield from scan(dets,pgm.en,start_en,stop_en,num_points)
E_max = peaks['max']['sclr_channels_chan2'][0]
E_com = peaks['com']['sclr_channels_chan2']
if flag == 0:
print('Piezo Shutter remains disabled')
if flag == 1:
print('Piezo Shutter is going to renabled')
yield from pzshutter_enable()
yield from mv(sclr.preset_time,sclr_set_time)
return E_com, E_max
#TODO put this inside of rixscam
def rixscam_get_threshold(Ei = None):
'''Calculate the minimum and maximum threshold for RIXSCAM single photon counting (LS mode)
Ei\t:\t float - incident energy (default is beamline current energy)
'''
if Ei is None:
Ei = pgm.en.user_readback.value
t_min = 0.7987 * Ei - 97.964
t_max = 1.4907 * Ei + 38.249
print('\n\n\tMinimum value for RIXSCAM threshold (LS mode):\t{}'.format(t_min))
print('\tMaximum value for RIXSCAM threshold (LS mode):\t{}'.format(t_max))
print('\tFor Beamline Energy:\t\t\t\t{}'.format(Ei))
return t_min, t_max
#TODO put this insdie of rixscam
def rixscam_set_threshold(Ei=None):
'''Setup the RIXSCAM.XIP plugin values for a specific energy for single photon counting and
centroiding in LS mode.
Ei\t:\t float - incident energy (default is beamline current energy)
'''
if Ei is None:
Ei = pgm.en.user_readback.value
thold_min, thold_max = rixscam_get_threshold(Ei)
yield from mv(rixscam.xip.beamline_energy, Ei,
rixscam.xip.sum_3x3_threshold_min, thold_min,
rixscam.xip.sum_3x3_threshold_max, thold_max)
#TODO make official so that there is a m1_fbk device like m1fbk.setpoint
m1_fbk = EpicsSignal('XF:02IDA-OP{FBck}Sts:FB-Sel', name = 'm1_fbk')
m1_fbk_sp = EpicsSignal('XF:02IDA-OP{FBck}PID-SP', name = 'm1_fbk_sp')
m1_fbk_th = extslt_cam.stats1.centroid_threshold
#m1_fbk_pix_x = extslt_cam.stats1.centroid.x.value
m1_fbk_cam_time = extslt_cam.cam.acquire_time
#(mv(m1_fbk_th,1500)
m1_simple_fbk = EpicsSignal('XF:02IDA-OP{M1_simp_feed}FB-Ena', name = 'm1_simple_fbk')
m1_simple_fbk_target_ratio = EpicsSignal('XF:02IDA-OP{M1_simp_feed}FB-TarRat', name = 'm1_simple_fbk_target_ratio')
m1_simple_fbk_ratio = EpicsSignal('XF:02IDA-OP{M1_simp_feed}FB-Ratio', name = 'm1_simple_fbk_ratio')
m3_simple_fbk = EpicsSignal('XF:02IDA-OP{M3_simp_feed}FB-Ena', name = 'm3_simple_fbk')
m3_simple_fbk_target = EpicsSignal('XF:02IDA-OP{M3_simp_feed}FB-Targ', name = 'm3_simple_fbk_target')
m3_simple_fbk_cen = EpicsSignal('XF:02IDA-OP{M3_simp_feed}FB_inpbuf', name = 'm3_simple_fbk_cen')
| 2.375 | 2 |
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py | speechpro/cloud-python | 15 | 12708 | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.session_api
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.synthesize_api
| 1.101563 | 1 |
learn_pyqt5/checkable_bar.py | liusong-cn/python | 1 | 12709 | # _*_ coding:utf-8 _*_
# author:ls
# time:2020/3/19 0019
import sys
from PyQt5.QtWidgets import QApplication,QAction,QMainWindow
from PyQt5.QtGui import QIcon
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.setui()
def setui(self):
self.statusbar = self.statusBar()
self.statusbar.showMessage('default show')
act = QAction('check',self,checkable=True)
act.setCheckable(True)
act.setStatusTip('view changed')
#不是太明白triggered如何使toggle函数执行
act.triggered.connect(self.toggle)
menubar = self.menuBar()
menu = menubar.addMenu('checkable')
menu.addAction(act)
self.setGeometry(300,300,400,150)
self.setWindowTitle('this is a checkable menu')
self.show()
def toggle(self,state):
if state:
self.statusbar.show()
else:
self.statusbar.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 2.640625 | 3 |
service/test.py | ksiomelo/cubix | 3 | 12710 | <reponame>ksiomelo/cubix<gh_stars>1-10
#!/usr/bin/env python
import pika
import time
import json
import StringIO
#from fca.concept import Concept
from casa import Casa
#from fca.readwrite import cxt
def read_cxt_string(data):
input_file = StringIO.StringIO(data)
assert input_file.readline().strip() == "B",\
"File is not valid cxt"
input_file.readline() # Empty line
number_of_objects = int(input_file.readline().strip())
number_of_attributes = int(input_file.readline().strip())
input_file.readline() # Empty line
objects = [input_file.readline().strip() for i in xrange(number_of_objects)]
attributes = [input_file.readline().strip() for i in xrange(number_of_attributes)]
table = []
for i in xrange(number_of_objects):
line = map(lambda c: c=="X", input_file.readline().strip())
table.append(line)
input_file.close()
return Casa("sample", objects, attributes, table)
def get_a_context():
title = "sample context"
objects = [1, 2, 3, 4]
attributes = ['a', 'b', 'c', 'd']
rels = [[True, False, False, True],\
[True, False, True, False],\
[False, True, True, False],\
[False, True, True, True]]
return Casa(title,objects,attributes,rels)
def on_queue_declared(queue):
channel.queue_bind(queue='test',
exchange='',
routing_key='order.test.customer')
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True, exclusive=False)
channel.queue_declare(queue='msg_queue', durable=True, exclusive=False)
#channel.exchange_declare(exchange='',
# type="topic",
# durable=True,
# auto_delete=False)
#channel.queue_declare(queue="task_queue",
# durable=True,
# exclusive=False,
# auto_delete=False,
# callback=on_queue_declared)
print ' [*] Waiting for messages. To exit press CTRL+C'
def msg_callback(ch, method, props, body):
print " [x] Received %r" % (body,)
response = body + " MODIFIED"
#response = get_a_concept()
print " [x] Done"
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body= str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
def callback(ch, method, props, body):
print " [x] Received %r" % (body,)
response = body + " MODIFIED"
context = read_cxt_string(body)
print context.to_dict(False)
#response = get_a_concept()
print " [x] Done"
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body= json.dumps(context.to_dict(False)))#str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,
queue='task_queue')
channel.basic_consume(msg_callback,
queue='msg_queue')
channel.start_consuming() | 2.125 | 2 |
src/nia/selections/rank.py | salar-shdk/nia | 8 | 12711 | from .selection import Selection
import numpy as np
class Rank(Selection):
@Selection.initializer
def __init__(self, size=20):
pass
def select(self, population, fitness):
indexes = fitness.argsort()
return (population[indexes])[:self.size], (fitness[indexes])[:self.size]
| 2.984375 | 3 |
app.py | juergenpointinger/status-dashboard | 0 | 12712 | # Standard library imports
import logging
import os
# Third party imports
import dash
import dash_bootstrap_components as dbc
from flask_caching import Cache
import plotly.io as pio
# Local application imports
from modules.gitlab import GitLab
import settings
# Initialize logging mechanism
logging.basicConfig(level=settings.LOGLEVEL, format=settings.LOGFORMAT)
logger = logging.getLogger(__name__)
gl = GitLab()
logger.info("Current GitLab version: {}".format(GitLab.version))
# App instance
app = dash.Dash(__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP])
app.title = settings.APP_NAME
# App caching
# CACHE_CONFIG = {
# # Note that filesystem cache doesn't work on systems with ephemeral
# # filesystems like Heroku.
# 'CACHE_TYPE': 'filesystem',
# 'CACHE_DIR': 'cache-directory',
# # should be equal to maximum number of users on the app at a single time
# # higher numbers will store more data in the filesystem / redis cache
# 'CACHE_THRESHOLD': 200
# }
CACHE_CONFIG = {
# try 'filesystem' if you don't want to setup redis
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': settings.REDIS_URL
}
cache = Cache()
cache.init_app(app.server, config=CACHE_CONFIG)
pio.templates.default = "plotly_dark" | 2.171875 | 2 |
tests/scanner/test_data/fake_retention_scanner_data.py | ogreface/forseti-security | 0 | 12713 | <reponame>ogreface/forseti-security<filename>tests/scanner/test_data/fake_retention_scanner_data.py
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake Retention scanner data."""
import json
from datetime import datetime, timedelta
import collections
from google.cloud.forseti.common.gcp_type import organization
from google.cloud.forseti.common.gcp_type import project
from google.cloud.forseti.common.gcp_type import bucket
from google.cloud.forseti.scanner.audit import retention_rules_engine as rre
ORGANIZATION = organization.Organization(
'123456',
display_name='Default Organization',
full_name='organization/123456/',
data='fake_org_data_123456',
)
PROJECT1 = project.Project(
'def-project-1',
project_number=11223344,
display_name='default project 1',
parent=ORGANIZATION,
full_name='organization/123456/project/def-project-1/',
data='fake_project_data_11223344',
)
PROJECT2 = project.Project(
'def-project-2',
project_number=55667788,
display_name='default project 2',
parent=ORGANIZATION,
full_name='organization/123456/project/def-project-2/',
data='fake_project_data_55667788',
)
PROJECT3 = project.Project(
'def-project-3',
project_number=12121212,
display_name='default project 3',
parent=ORGANIZATION,
full_name='organization/123456/project/def-project-3/',
data='fake_project_data_12121212',
)
PROJECT4 = project.Project(
'def-project-4',
project_number=34343434,
display_name='default project 4',
parent=ORGANIZATION,
full_name='organization/123456/project/def-project-4/',
data='fake_project_data_34343434',
)
def build_bucket_violations(bucket, rule_name):
data_lifecycle = bucket.get_lifecycle_rule()
data_lifecycle_str = json.dumps(data_lifecycle, sort_keys=True)
return [rre.RuleViolation(
resource_name='buckets/'+bucket.id,
resource_id=bucket.id,
resource_type=bucket.type,
full_name=bucket.full_name,
rule_index=0,
rule_name=rule_name,
violation_type='RETENTION_VIOLATION',
violation_data=data_lifecycle_str,
resource_data=bucket.data,
)]
class FakeBucketDataCreater():
def __init__(self, id, project):
self._id = id
self._parent = project
self._data_lifecycle = None
def SetLifecycleDict(self):
self._data_lifecycle = {"rule": []}
def AddLifecycleDict(
self,
action=None,
age=None,
created_before=None,
matches_storage_class=None,
num_newer_versions=None,
is_live=None):
if not self._data_lifecycle:
self.SetLifecycleDict()
result = {'action':{}, 'condition':{}}
result['action']['type'] = action
if age != None:
result['condition']['age'] = age
if created_before != None:
result['condition']['createdBefore'] = created_before
if matches_storage_class != None:
result['condition']['matchesStorageClass'] = matches_storage_class
if num_newer_versions != None:
result['condition']['numNewerVersions'] = num_newer_versions
if is_live != None:
result['condition']['isLive'] = is_live
self._data_lifecycle['rule'].append(result)
return result
def get_resource(self):
data_dict = {'id':self._id, 'location':'earth'}
if self._data_lifecycle is not None:
data_dict['lifecycle'] = self._data_lifecycle
data = json.dumps(data_dict)
return bucket.Bucket(bucket_id=self._id,
parent=self._parent,
full_name=self._parent.full_name+'bucket/'+self._id+'/',
data=data)
FakeBucketDataInput = collections.namedtuple(
'FakeBucketDataInput', ['id', 'project', 'lifecycles'])
LifecycleInput = collections.namedtuple(
'LifecycleInput', ['action', 'conditions'])
def get_fake_bucket_resource(fake_bucket_data_input):
data_creater = FakeBucketDataCreater(
fake_bucket_data_input.id, fake_bucket_data_input.project)
for lifecycle in fake_bucket_data_input.lifecycles:
data_creater.AddLifecycleDict(
action=lifecycle.action,
age=lifecycle.conditions.get('age'),
created_before=lifecycle.conditions.get('created_before'),
matches_storage_class=lifecycle.conditions.get('matches_storage_class'),
num_newer_versions=lifecycle.conditions.get('num_newer_versions'),
is_live=lifecycle.conditions.get('is_live'))
return data_creater.get_resource()
| 1.6875 | 2 |
libsonyapi/camera.py | BugsForDays/libsonyapi | 13 | 12714 | import socket
import requests
import json
import xml.etree.ElementTree as ET
class Camera(object):
def __init__(self):
"""
create camera object
"""
self.xml_url = self.discover()
self.name, self.api_version, self.services = self.connect(self.xml_url)
self.camera_endpoint_url = self.services["camera"] + "/camera"
self.available_apis = self.do("getAvailableApiList")["result"]
# prepare camera for rec mode
if "startRecMode" in self.available_apis[0]:
self.do("startRecMode")
self.available_apis = self.do("getAvailableApiList")["result"]
self.connected = False
def discover(self):
"""
discover camera using upnp ssdp method, return url for device xml
"""
msg = (
"M-SEARCH * HTTP/1.1\r\n"
"HOST: 172.16.58.3:1900\r\n"
'MAN: "ssdp:discover" \r\n'
"MX: 2\r\n"
"ST: urn:schemas-sony-com:service:ScalarWebAPI:1\r\n"
"\r\n"
).encode()
# Set up UDP socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.settimeout(2)
s.sendto(msg, ("172.16.58.3", 1900))
try:
while True:
data, addr = s.recvfrom(65507)
decoded_data = data.decode()
# get xml url from ssdp response
for item in decoded_data.split("\n"):
if "LOCATION" in item:
return item.strip().split(" ")[
1
] # get location url from ssdp response
self.connected = True
except socket.timeout:
raise ConnectionError("you are not connected to the camera's wifi")
def connect(self, xml_url):
"""
returns name, api_version, api_service_urls on success
"""
device_xml_request = requests.get(xml_url)
xml_file = str(device_xml_request.content.decode())
xml = ET.fromstring(xml_file)
name = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-upnp-org:device-1-0}friendlyName"
).text
api_version = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo/{urn:schemas-sony-com:av}X_ScalarWebAPI_Version"
).text
service_list = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo/{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceList"
)
api_service_urls = {}
for service in service_list:
service_type = service.find(
"{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType"
).text
action_url = service.find(
"{urn:schemas-sony-com:av}X_ScalarWebAPI_ActionList_URL"
).text
api_service_urls[service_type] = action_url
return name, api_version, api_service_urls
def info(self):
"""
returns camera info(name, api version, supported services, available apis) in a dictionary
"""
return {
"name": self.name,
"api version": self.api_version,
"supported services": list(self.services.keys()),
"available apis": self.available_apis,
}
def post_request(self, url, method, param=[]):
"""
sends post request to url with method and param as json
"""
if type(param) is not list:
param = [param]
json_request = {"method": method, "params": param, "id": 1, "version": "1.0"}
request = requests.post(url, json.dumps(json_request))
response = json.loads(request.content)
if "error" in list(response.keys()):
print("Error: ")
print(response)
else:
return response
def do(self, method, param=[]):
"""
this calls to camera service api, require method and param args
"""
# TODO: response handler, return result of do, etc
response = self.post_request(self.camera_endpoint_url, method, param)
return response
class ConnectionError(Exception):
pass
| 3 | 3 |
utility_functions_flu.py | neherlab/treetime_validation | 4 | 12715 | <reponame>neherlab/treetime_validation
#!/usr/bin/env python
"""
This module defines functions to facilitate operations with data specific
to Flu trees and alignments.
"""
import numpy as np
from Bio import AlignIO, Phylo
from Bio.Align import MultipleSeqAlignment
import random
import subprocess
import datetime
import os, copy
import matplotlib.pyplot as plt
from scipy.stats import linregress
from collections import Counter
import StringIO
import treetime
from utility_functions_general import remove_polytomies
from utility_functions_beast import run_beast, create_beast_xml, read_beast_log
import xml.etree.ElementTree as XML
from external_binaries import BEAST_BIN
def date_from_seq_name(name):
"""
Parse flu sequence name to the date in numeric format (YYYY.F)
Args:
- name(str): name of the flu sequence.
Returns:
- sequence sampling date if succeeded to parse. None otherwise.
"""
def str2date_time(instr):
"""
Convert input string to datetime object.
Args:
- instr (str): input string. Accepts one of the formats:
{MM.DD.YYYY, MM.YYYY, MM/DD/YYYY, MM/YYYY, YYYY}.
Returns:
- date (datetime.datetime): parsed date object. If the parsing failed,
None is returned
"""
instr = instr.replace('/', '.')
# import ipdb; ipdb.set_trace()
try:
date = datetime.datetime.strptime(instr, "%m.%d.%Y")
except ValueError:
date = None
if date is not None:
return date
try:
date = datetime.datetime.strptime(instr, "%m.%Y")
except ValueError:
date = None
if date is not None:
return date
try:
date = datetime.datetime.strptime(instr, "%Y")
except ValueError:
date = None
return date
try:
date = str2date_time(name.split('|')[3].strip())
return date.year + (date - datetime.datetime(date.year, 1, 1)).days / 365.25
except:
return None
def dates_from_flu_tree(tree):
"""
Iterate over the Flu tree, parse each leaf name and return dates for the
leaves as dictionary.
Args:
- tree(str or Biopython tree): Flu tree
Returns:
- dates(dict): dictionary of dates in format {seq_name: numdate}. Only the
entries which were parsed successfully are included.
"""
if isinstance(tree, str):
tree = Phylo.read(tree, 'newick')
dates = {k.name:date_from_seq_name(k.name) for k in tree.get_terminals()
if date_from_seq_name(k.name) is not None}
return dates
def subtree_with_same_root(tree, Nleaves, outfile, optimize=True):
"""
Sample subtree of the given tree so that the root of the subtree is that of
the original tree.
Args:
- tree(str or Biopython tree): initial tree
- Nleaves(int): number of leaves in the target subtree
- outfile(str): path to save the resulting subtree
optimize(bool): perform branch length optimization for the subtree?
Returns:
- tree(Biopython tree): the subtree
"""
if isinstance(tree, str):
treecopy = Phylo.read(tree, 'newick')
else:
treecopy = copy.deepcopy(tree)
remove_polytomies(treecopy)
assert(len(treecopy.root.clades) == 2)
tot_terminals = treecopy.count_terminals()
# sample to the left of the root
left = treecopy.root.clades[0]
n_left = left.count_terminals()
right = treecopy.root.clades[1]
n_right = right.count_terminals()
n_left_sampled = np.min((n_left, Nleaves * n_left / (n_left + n_right)))
n_left_sampled = np.max((n_left_sampled, 5)) # make sure we have at least one
left_terminals = left.get_terminals()
left_sample_idx = np.random.choice(np.arange(len(left_terminals)), size=n_left_sampled, replace=False)
left_sample = [left_terminals[i] for i in left_sample_idx]
# sample to the right of the root
n_right_sampled = np.min((n_right, Nleaves * n_right / (n_left + n_right)))
n_right_sampled = np.max((n_right_sampled, 5)) # make sure we have at least one
right_terminals = right.get_terminals()
right_sample_idx = np.random.choice(np.arange(len(right_terminals)), size=n_right_sampled, replace=False)
right_sample = [right_terminals[i] for i in right_sample_idx]
for leaf in treecopy.get_terminals():
if leaf not in right_sample and leaf not in left_sample:
treecopy.prune(leaf)
else:
pass
#print ("leaving leaf {} in the tree".format(leaf.name))
if optimize:
import treetime
dates = dates_from_flu_tree(treecopy)
aln = './resources/flu_H3N2/H3N2_HA_2011_2013.fasta'
tt = treetime.TreeAnc(tree=treecopy, aln=aln,gtr='Jukes-Cantor')
tt.optimize_seq_and_branch_len(prune_short=False)
Phylo.write(tt.tree, outfile, 'newick')
return tt.tree
else:
Phylo.write(treecopy, outfile, 'newick')
return treecopy
def subtree_year_vol(tree, N_per_year, outfile):
"""
Sample subtree of the given tree with equal number of samples per year.
Note:
- if there are not enough leaves sampled at a given year, all leaves for this
year will be included in the subtree.
Args:
- tree(str or Biopython object): Initial tree
- N_per_year(int): number of samples per year.
- outfile (str): path to save the subtree
Returns:
- tree(Biopython tree): the subtree
"""
if isinstance(tree, str):
treecopy = Phylo.read(tree, 'newick')
else:
treecopy = copy.deepcopy(tree)
remove_polytomies(treecopy)
dates = dates_from_flu_tree(treecopy)
sample = []
cntr = Counter(map (int, dates.values()))
years = cntr.keys()
min_year = np.min(years)
for year in years:
all_names = [k for k in dates if int(dates[k]) == year]
if len(all_names) <= N_per_year or year == min_year:
sample += all_names
else:
sample += list(np.random.choice(all_names, size=N_per_year, replace=False))
for leaf in treecopy.get_terminals():
if leaf.name not in sample:
treecopy.prune(leaf)
else:
pass
#print ("leaving leaf {} in the tree".format(leaf.name))
Phylo.write(treecopy, outfile, 'newick')
return treecopy
def create_LSD_dates_file_from_flu_tree(tree, outfile):
"""
Parse dates from the flu tree and write to the file in the LSD format.
Args:
- tree(str or Biopython object): Initial tree
- outfile(str): path to save the LSD dates file.
Returns:
- dates(dict): dates parsed from the tree as dictionary.
"""
dates = dates_from_flu_tree(tree)
with open(outfile, 'w') as df:
df.write(str(len(dates)) + "\n")
df.write("\n".join([str(k) + "\t" + str(dates[k]) for k in dates]))
return dates
def make_known_dates_dict(alnfile, dates_known_fraction=1.0):
"""
Read all the dates of the given flu sequences, and make the dates dictionary
for only a fraction of them. The sequences in the resulting dict are chosen
randomly.
"""
aln = AlignIO.read(alnfile, 'fasta')
dates = {k.name: date_from_seq_name(k.name) for k in aln}
# randomly choose the dates so that only the known_ratio number of dates is known
if dates_known_fraction != 1.0:
assert(dates_known_fraction > 0 and dates_known_fraction < 1.0)
knonw_keys = np.random.choice(dates.keys(), size=int (len(dates) * dates_known_fraction), replace=False)
dates = {k : dates[k] for k in knonw_keys}
return dates
def create_treetime_with_missing_dates(alnfile, treefile, dates_known_fraction=1.0):
"""dates = {k.name: date_from_seq_name(k.name) for k in aln}
Create TreeTime object with fraction of leaves having no sampling dates.
The leaves to earse sampling dates are chosen randomly.
Args:
- alnfile(str): path to the flu alignment
- treefiule(str): path to the Flu newixk tree
- dates_known_fraction(float): fraction of leaves, which should have
sampling date information.
"""
aln = AlignIO.read(alnfile, 'fasta')
tt = Phylo.read(treefile, 'newick')
dates = make_known_dates_dict(alnfile, dates_known_fraction)
myTree = treetime.TreeTime(gtr='Jukes-Cantor', tree = treefile,
aln = alnfile, verbose = 4, dates = dates, debug=False)
myTree.optimize_seq_and_branch_len(reuse_branch_len=True, prune_short=True, max_iter=5, infer_gtr=False)
return myTree
def create_subtree(tree, n_seqs, out_file, st_type='equal_sampling'):
"""
Args:
- tree(filename or Biopython tree): original tree
- n_seqs: number of leaves in the resulting subtree
- out_file: output locaton to store the resulting subtree
- st_type: type of the subtree generation algorithm. Available types:
- random: just choose n_leaves randomly
- equal_sampling: choose equal leaves each year (if possible)
- preserve_root: sample from right and left subtrees of the tree root.
The root of the resulting subtree is therefore the same as of the original tree
"""
if isinstance(tree, str):
tree = Phylo.read(tree, 'newick')
pass
def correct_beast_xml_for_missing_dates(config_xml):
def create_leafHeight(strain):
xml_leafHeightParam = XML.Element('parameter')
xml_leafHeightParam.attrib={'id': strain+".height"}
xml_leafHeight = XML.Element('leafHeight')
xml_leafHeight.attrib={"taxon": strain}
xml_leafHeight.append(xml_leafHeightParam)
return xml_leafHeight
def create_leafHeight_operator(strain, weight):
#<parameter idref="A/Yurimaguas/FLU4785/2006.height"/>
xml_param = XML.Element('parameter')
xml_param.attrib = {'idref': strain+'.height'}
#<uniformOperator weight="0.024154589371980676">
xml_operator = XML.Element('uniformOperator')
xml_operator.attrib = {'weight': str(weight)}
xml_operator.append(xml_param)
return xml_operator
def create_taxon_date():
xml_date = XML.Element('date')
xml_date.attrib={'value': '2011', 'direction':"forwards", 'units':"years", 'precision':'4.0'}
return xml_date
xml_treeModel = config_xml.find('treeModel')
xml_operators = config_xml.find('operators')
xml_taxa = config_xml.find('taxa').findall('taxon')
xml_filelog = config_xml.findall('mcmc')[0].findall('log')[np.argmax([k.attrib['id']=='filelog' for k in config_xml .findall('mcmc')[0].findall('log')])]
operator_weight = 1. / np.sum([k.find('date') is None for k in xml_taxa])
#import ipdb; ipdb.set_trace()
for t in xml_taxa:
if t.find('date') is None:
strain = t.attrib['id']
t.append(create_taxon_date())
xml_treeModel.append(create_leafHeight(strain))
xml_operators.append(create_leafHeight_operator(strain, operator_weight))
parameter = XML.Element("parameter")
parameter.attrib = {"idref" : strain+".height"}
xml_filelog.append(parameter)
return config_xml
def run_beast(tree_name, aln_name, dates, beast_prefix, log_post_process=None, template_file="./resources/beast/template_bedford_et_al_2015.xml"):
config_filename = beast_prefix + ".config.xml"
config_xml = create_beast_xml(tree_name, aln_name, dates, beast_prefix, template_file)
config_xml = correct_beast_xml_for_missing_dates(config_xml)
config_xml.write(config_filename)
#print (config_filename)
#return config_xml
call = ["java", "-jar", BEAST_BIN, "-beagle_off", "-overwrite", config_filename]
subprocess.call(call)
if log_post_process is not None:
log_file = beast_prefix + ".log.txt"
log_post_process(log_file)
if __name__ == '__main__':
pass
| 2.65625 | 3 |
python/632.smallest-range-covering-elements-from-k-lists.py | stavanmehta/leetcode | 0 | 12716 | class Solution:
def smallestRange(self, nums: List[List[int]]) -> List[int]:
| 2.296875 | 2 |
add_label.py | Mause/pull_requests | 0 | 12717 | from asyncio import get_event_loop
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from aiohttp import ClientSession
from pydantic import BaseModel
from sgqlc.endpoint.base import BaseEndpoint
from sgqlc.operation import Operation
from sgqlc_schemas.github.schema import (
AddLabelsToLabelableInput,
AddLabelsToLabelablePayload,
MergePullRequestInput,
Mutation,
Query,
Repository,
)
class Shared(BaseModel):
class Config:
arbitrary_types_allowed = True
class Location(Shared):
column: int
line: int
class Error(Shared):
locations: List[Location]
message: str
path: Optional[List[str]] = None
class DataWithErrors(Shared):
data: Union[Query, Mutation]
errors: List[Error]
@dataclass
class AsyncHttpEndpoint(BaseEndpoint):
url: str
headers: Dict[str, str] = field(default_factory=dict)
async def __call__(self, query) -> DataWithErrors:
async with ClientSession() as session:
res = await session.post(
self.url,
headers={**self.headers, 'Content-Type': 'application/json'},
json={'query': bytes(query).decode()},
)
try:
data = await res.json()
except Exception as e:
self._log_json_error(await res.text(), e)
data.setdefault('errors', [])
if data['errors']:
self._log_graphql_error(query, data)
if not (data['errors'] or data.get('data')):
data['errors'] = [{'message': data['message'], 'locations': []}]
return DataWithErrors(data=query + data, errors=data['errors'])
async def add_labels_to_labelable(
endpoint: BaseEndpoint, repository_id: str, labelable_id: str, label: str
) -> AddLabelsToLabelablePayload:
query = Operation(Query)
query.node(id=repository_id).__as__(Repository).labels(first=50).nodes().__fields__(
'name', 'id'
)
labels = {
repo_label.name: repo_label.id
for repo_label in (await endpoint(query)).node.labels.nodes
}
mutation = Operation(Mutation)
mutation.add_labels_to_labelable(
input=AddLabelsToLabelableInput(
labelable_id=labelable_id, label_ids=[labels[label]]
)
)
return (await endpoint(mutation)).add_labels_to_labelable
async def build_endpoint(token: str) -> AsyncHttpEndpoint:
return AsyncHttpEndpoint(
'https://api.github.com/graphql',
{'Authorization': 'Bearer ' + token},
)
async def main():
endpoint = await build_endpoint(open('token.txt').read())
qu = Operation(Query)
repo = qu.repository(owner='Mause', name='media')
repo.id()
repo.pull_requests(first=1).nodes().__fields__('title', 'id')
res = (await endpoint(qu)).repository
await add_labels_to_labelable(
endpoint, res.id, res.pull_requests.nodes[0].id, 'automerge'
)
op = Operation(Mutation)
op = build_merge([res.pull_requests.nodes[0].id])
res = await endpoint(op)
print(res)
def build_merge(ids: List[str]):
op = Operation(Mutation)
for i, ident in enumerate(ids):
op.merge_pull_request(
input=MergePullRequestInput(pull_request_id=ident), __alias__=f'merge_{i}'
).pull_request.title()
return op
if __name__ == "__main__":
get_event_loop().run_until_complete(main())
| 2.140625 | 2 |
DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py | yz830620/FinMindBook | 5 | 12718 | <gh_stars>1-10
import time
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from financialdata.producer import Update
from loguru import logger
def sent_crawler_task():
# 將此段,改成發送任務的程式碼
# logger.info(f"sent_crawler_task {dataset}")
today = datetime.datetime.today().date().strftime("%Y-%m-%d")
Update(dataset="taiwan_stock_price", start_date=today, end_date=today)
def main():
scheduler = BackgroundScheduler(timezone="Asia/Taipei")
# 與 crontab 類似,設定何時執行,有小時、分鐘、秒參數,* 星號代表任意時間點
scheduler.add_job(
id="sent_crawler_task",
func=sent_crawler_task,
trigger="cron",
hour="15",
minute="0",
day_of_week="mon-fri",
)
logger.info("sent_crawler_task")
scheduler.start()
if __name__ == "__main__":
main()
while True:
time.sleep(600)
| 2.4375 | 2 |
openmc_plasma_source/plotting/plot_tokamak_source.py | mdfaisal98/openmc-plasma-source | 0 | 12719 | <reponame>mdfaisal98/openmc-plasma-source
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
def scatter_tokamak_source(source, quantity=None, **kwargs):
"""Create a 2D scatter plot of the tokamak source.
See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html
for more arguments.
Args:
source (ops.TokamakSource): the plasma source
quantity ("str", optional): value by which the lines should be
coloured. Defaults to None.
Raises:
ValueError: If the quantity is unknown
"""
quantity_to_attribute = {
"ion_temperature": source.temperatures,
"neutron_source_density": source.strengths
}
if quantity in quantity_to_attribute:
colours = quantity_to_attribute[quantity]
elif quantity is None:
colours = None
else:
raise ValueError("Unknown quantity")
plt.gca().set_aspect("equal")
return plt.scatter(source.RZ[0], source.RZ[1], c=colours, **kwargs)
def plot_tokamak_source_3D(source, quantity=None, angles=[0, 1/2*np.pi], colorbar="viridis", **kwargs):
"""Creates a 3D plot of the tokamak source.
See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot
for more arguments.
Args:
source (ops.TokamakSource): the plasma source
quantity ("str", optional): value by which the lines should be
coloured. Defaults to None.
angles (list, optional): iterable of two floats defining the coverage.
Defaults to [0, 1/2*np.pi].
colorbar (str, optional): colorbar used if quantity is not None.
Defaults to "viridis".
Raises:
ValueError: If the quantity is unknown
"""
quantity_to_attribute = {
"ion_temperature": source.temperatures,
"neutron_source_density": source.strengths
}
if quantity in quantity_to_attribute:
values = quantity_to_attribute[quantity]
elif quantity is None:
values = None
else:
raise ValueError("Unknown quantity")
colorbar = cm.get_cmap(colorbar)
axes = plt.axes(projection="3d")
theta = np.linspace(*angles, 100)
for i in range(source.sample_size):
if values is not None:
colour = colorbar(values[i]/max(values))
else:
colour = None
x = source.RZ[0][i] * np.sin(theta)
y = source.RZ[0][i] * np.cos(theta)
z = source.RZ[1][i]
plt.plot(x, y, z, color=colour, **kwargs)
axes.set_xlim(-source.major_radius, source.major_radius)
axes.set_ylim(-source.major_radius, source.major_radius)
axes.set_zlim(-source.major_radius, source.major_radius)
| 3.015625 | 3 |
Exercício feitos pela primeira vez/ex046.py | Claayton/pythonExerciciosLinux | 1 | 12720 | #Exercício046
from time import sleep
import emoji
print('\033[32mCONTAGEM REGRESSIVA PARA O ANO NOVO:\033[m')
sleep(1)
for c in range(10, 0 - 1, -1):#repete os números de 10 até o 0
print(c)
sleep(1)
print(emoji.emojize("\033[31m:boom::boom::boom:KABUM:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[32m:boom::boom::boom:FOGUETE:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[33m:boom::boom::boom:FOGOS E MAIS FOGOS:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[34m:boom::boom::boom:GUANAGARA VIADO:boom::boom::boom:", use_aliases=True))
print('\033[32mxD') | 3.09375 | 3 |
bmds/bmds2/logic/rules.py | shapiromatron/bmds | 2 | 12721 | import abc
import math
from ... import constants
class Rule(abc.ABC):
def __init__(self, failure_bin, **kwargs):
self.failure_bin = failure_bin
self.enabled = kwargs.get("enabled", True)
self.threshold = kwargs.get("threshold", float("nan"))
self.rule_name = kwargs.get("rule_name", self.default_rule_name)
self.kwargs = kwargs
def __unicode__(self):
enabled = "✓" if self.enabled else "✕"
threshold = "" if math.isnan(self.threshold) else ", threshold={}".format(self.threshold)
return "{0} {1} [bin={2}{3}]".format(enabled, self.rule_name, self.binmoji, threshold)
def check(self, dataset, output):
if self.enabled:
return self.apply_rule(dataset, output)
else:
return self.return_pass()
@property
def binmoji(self):
return constants.BIN_ICON[self.failure_bin]
@property
def bin_text(self):
return constants.BIN_TEXT[self.failure_bin]
def as_row(self):
return [self.rule_name, self.enabled, self.bin_text, self.threshold]
def return_pass(self):
return constants.BIN_NO_CHANGE, None
@abc.abstractmethod
def apply_rule(self, dataset, output):
"""return tuple of (bin, notes) associated with rule or None"""
...
def get_failure_message(self, *args) -> str:
return "An error occurred"
def _is_valid_number(self, val):
# Ensure number is an int or float, not equal to special case -999.
return val is not None and val != -999 and (isinstance(val, int) or isinstance(val, float))
class NumericValueExists(Rule):
# Test succeeds if value is numeric and not -999
field_name = None
field_name_verbose = None
def apply_rule(self, dataset, output):
val = output.get(self.field_name)
if self._is_valid_number(val):
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message()
def get_failure_message(self):
name = getattr(self, "field_name_verbose")
if name is None:
name = self.field_name
return "{} does not exist".format(name)
class BmdExists(NumericValueExists):
default_rule_name = "BMD exists"
field_name = "BMD"
class BmdlExists(NumericValueExists):
default_rule_name = "BMDL exists"
field_name = "BMDL"
class BmduExists(NumericValueExists):
default_rule_name = "BMDU exists"
field_name = "BMDU"
class AicExists(NumericValueExists):
default_rule_name = "AIC exists"
field_name = "AIC"
class RoiExists(NumericValueExists):
default_rule_name = "Residual of interest exists"
field_name = "residual_of_interest"
field_name_verbose = "Residual of Interest"
class ShouldBeGreaterThan(Rule):
# Test fails if value is less-than threshold.
field_name = ""
field_name_verbose = ""
def apply_rule(self, dataset, output):
val = output.get(self.field_name)
threshold = self.threshold
if not self._is_valid_number(val) or val >= threshold:
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message(val, threshold)
def get_failure_message(self, val, threshold):
name = self.field_name_verbose
return "{} is less than threshold ({:.3} < {})".format(name, float(val), threshold)
class GlobalFit(ShouldBeGreaterThan):
default_rule_name = "GGOF"
field_name = "p_value4"
field_name_verbose = "Goodness of fit p-value"
class ShouldBeLessThan(Rule, abc.ABC):
# Test fails if value is greater-than threshold.
msg = "" # w/ arguments for value and threshold
@abc.abstractmethod
def get_value(self, dataset, output):
...
def apply_rule(self, dataset, output):
val = self.get_value(dataset, output)
threshold = self.threshold
if not self._is_valid_number(val) or val <= threshold:
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message(val, threshold)
def get_failure_message(self, val, threshold):
name = self.field_name_verbose
return "{} is greater than threshold ({:.3} > {})".format(name, float(val), threshold)
class BmdBmdlRatio(ShouldBeLessThan):
default_rule_name = "BMD to BMDL ratio"
field_name_verbose = "BMD/BMDL ratio"
def get_value(self, dataset, output):
bmd = output.get("BMD")
bmdl = output.get("BMDL")
if self._is_valid_number(bmd) and self._is_valid_number(bmdl) and bmdl != 0:
return bmd / bmdl
class RoiFit(ShouldBeLessThan):
default_rule_name = "Residual of interest"
field_name_verbose = "Residual of interest"
def get_value(self, dataset, output):
return output.get("residual_of_interest")
class HighBmd(ShouldBeLessThan):
default_rule_name = "High BMD"
field_name_verbose = "BMD/high dose ratio"
def get_value(self, dataset, output):
max_dose = max(dataset.doses)
bmd = output.get("BMD")
if self._is_valid_number(max_dose) and self._is_valid_number(bmd) and bmd != 0:
return bmd / float(max_dose)
class HighBmdl(ShouldBeLessThan):
default_rule_name = "High BMDL"
field_name_verbose = "BMDL/high dose ratio"
def get_value(self, dataset, output):
max_dose = max(dataset.doses)
bmdl = output.get("BMDL")
if self._is_valid_number(max_dose) and self._is_valid_number(bmdl) and max_dose > 0:
return bmdl / float(max_dose)
class LowBmd(ShouldBeLessThan):
default_rule_name = "Low BMD"
field_name_verbose = "minimum dose/BMD ratio"
def get_value(self, dataset, output):
min_dose = min([d for d in dataset.doses if d > 0])
bmd = output.get("BMD")
if self._is_valid_number(min_dose) and self._is_valid_number(bmd) and bmd > 0:
return min_dose / float(bmd)
class LowBmdl(ShouldBeLessThan):
default_rule_name = "Low BMDL"
field_name_verbose = "minimum dose/BMDL ratio"
def get_value(self, dataset, output):
min_dose = min([d for d in dataset.doses if d > 0])
bmdl = output.get("BMDL")
if self._is_valid_number(min_dose) and self._is_valid_number(bmdl) and bmdl > 0:
return min_dose / float(bmdl)
class ControlResidual(ShouldBeLessThan):
default_rule_name = "Control residual"
field_name_verbose = "Residual at lowest dose"
def get_value(self, dataset, output):
if output.get("fit_residuals") and len(output["fit_residuals"]) > 0:
try:
return abs(output["fit_residuals"][0])
except TypeError:
return float("nan")
class ControlStdevResiduals(ShouldBeLessThan):
default_rule_name = "Control stdev"
field_name_verbose = "Ratio of modeled to actual stdev. at control"
def get_value(self, dataset, output):
if (
output.get("fit_est_stdev")
and output.get("fit_stdev")
and len(output["fit_est_stdev"]) > 0
and len(output["fit_stdev"]) > 0
):
try:
modeled = abs(output["fit_est_stdev"][0])
actual = abs(output["fit_stdev"][0])
except TypeError:
return float("nan")
if (
self._is_valid_number(modeled)
and self._is_valid_number(actual)
and modeled > 0
and actual > 0
):
return abs(modeled / actual)
class CorrectVarianceModel(Rule):
# Check variance model (continuous datasets-only)
default_rule_name = "Variance type"
def apply_rule(self, dataset, output):
if "parameters" not in output:
return self.return_pass()
# 0 = non-homogeneous modeled variance => Var(i) = alpha*mean(i)^rho
# 1 = constant variance => Var(i) = alpha*mean(i)
# if rho is a parameter, then variance model 0 is applied
rho = output["parameters"].get("rho")
constant_variance = 0 if rho else 1
p_value2 = output.get("p_value2")
if p_value2 == "<0.0001":
p_value2 = 0.0001
msg = None
if self._is_valid_number(p_value2):
if constant_variance == 1 and p_value2 < 0.1:
msg = "Incorrect variance model (p-value 2 = {}), constant variance selected".format(
p_value2
)
elif constant_variance == 0 and p_value2 > 0.1:
msg = "Incorrect variance model (p-value 2 = {}), modeled variance selected".format(
p_value2
)
else:
msg = "Correct variance model cannot be determined (p-value 2 = {})".format(p_value2)
if msg:
return self.failure_bin, msg
else:
return self.return_pass()
class VarianceModelFit(Rule):
default_rule_name = "Variance fit"
def apply_rule(self, dataset, output):
if "parameters" not in output:
return self.return_pass()
# 0 = non-homogeneous modeled variance => Var(i) = alpha*mean(i)^rho
# 1 = constant variance => Var(i) = alpha*mean(i)
# if rho is a parameter, then variance model 0 is applied
rho = output["parameters"].get("rho")
constant_variance = 0 if rho else 1
p_value2 = output.get("p_value2")
if p_value2 == "<0.0001":
p_value2 = 0.0001
p_value3 = output.get("p_value3")
if p_value3 == "<0.0001":
p_value3 = 0.0001
msg = None
if self._is_valid_number(p_value2) and constant_variance == 1 and p_value2 < 0.1:
msg = "Variance model poorly fits dataset (p-value 2 = {})".format(p_value2)
if self._is_valid_number(p_value3) and constant_variance == 0 and p_value3 < 0.1:
msg = "Variance model poorly fits dataset (p-value 3 = {})".format(p_value3)
if msg:
return self.failure_bin, msg
else:
return self.return_pass()
class NoDegreesOfFreedom(Rule):
"""
Check to ensure at least one degree of freedom exist to prevent recommendation of an
overfit model.
"""
default_rule_name = "Degrees of freedom"
def apply_rule(self, dataset, output):
df = output.get("df", 1)
if df == 0:
return self.failure_bin, "Zero degrees of freedom; saturated model"
return self.return_pass()
class Warnings(Rule):
# Test fails if any warnings exist.
default_rule_name = "Warnings"
def get_failure_message(self, warnings):
return "Warning(s): {}".format("; ".join(warnings))
def apply_rule(self, dataset, output):
warnings = output.get("warnings", [])
if len(warnings) > 0:
return self.failure_bin, self.get_failure_message(warnings)
else:
return self.return_pass()
| 3.015625 | 3 |
nets/static/conv_rnn_convT.py | MaximilienLC/nevo | 0 | 12722 | <filename>nets/static/conv_rnn_convT.py
# Copyright 2022 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from nets.static.base import StaticNetBase
class Net(StaticNetBase):
def __init__(self, transpose):
super().__init__()
self.transpose = transpose
self.conv1 = nn.Conv2d( 1, 16, 4, 2, 1)
self.conv2 = nn.Conv2d(16, 32, 4, 2, 1)
self.conv3 = nn.Conv2d(32, 64, 4)
self.rnn1 = nn.RNN(64, 64)
if not self.transpose:
self.fc1 = nn.Linear(64, 1)
else:
self.convT1 = nn.ConvTranspose2d(64, 32, 4)
self.convT2 = nn.ConvTranspose2d(32, 16, 4, 2, 1)
self.convT3 = nn.ConvTranspose2d(16, 1, 4, 2, 1)
self.h = torch.zeros(1, 1, 64)
def reset(self):
self.h = torch.zeros(1, 1, 64).to(self.device)
def pre_setup_to_run(self):
self.h.to(self.device)
def pre_setup_to_save(self):
self.h.to('cpu')
def forward(self, x):
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
x, self.h = self.rnn1(x[None, :, :, 0, 0], self.h)
if not self.transpose:
x = torch.relu(self.fc1(x[0, :, :]))
else:
x = torch.relu(self.convT1(x[0, :, :, None, None]))
x = torch.relu(self.convT2(x))
x = torch.relu(self.convT3(x))
return x | 2.34375 | 2 |
ors2bryton.py | andbue/ors2bryton | 0 | 12723 | <reponame>andbue/ors2bryton
from sys import argv
from os.path import splitext
from lxml import etree
from struct import pack
def main():
print(argv)
gpx = argv[1]
"""
bryton:
1: go ahead
2: right
3: left
4: slight right
5: slight left
6: close right
7: close left
8: exit right
9: exit left
10: continue straight
11: uturn right
12: uturn left
13++: go ahead
openrouteservice:
(https://github.com/GIScience/openrouteservice/blob/master/openrouteservice/src/main/java/org/heigit/ors/routing/instructions/InstructionType.java)
TURN_LEFT, /*0*/
TURN_RIGHT, /*1*/
TURN_SHARP_LEFT, /*2*/
TURN_SHARP_RIGHT, /*3*/
TURN_SLIGHT_LEFT, /*4*/
TURN_SLIGHT_RIGHT, /*5*/
CONTINUE, /*6*/
ENTER_ROUNDABOUT, /*7*/
EXIT_ROUNDABOUT, /*8*/
UTURN, /*9*/
FINISH, /*10*/
DEPART, /*11*/
KEEP_LEFT, /*12*/
KEEP_RIGHT, /*13*/
UNKNOWN /*14*/;
"""
orst2brt = {
0: 3,
1: 2,
2: 7,
3: 6,
4: 5,
5: 4,
6: 1,
7: 10,
8: 8,
9: 12,
10: 1,
11: 1,
12: 9,
13: 8,
14: 1
}
fname = splitext(gpx)[0]
r = etree.parse(gpx).getroot()
ns = r.nsmap[None]
rte = r.find(f'./{{{ns}}}rte')
rtepts = rte.findall(f'./{{{ns}}}rtept')
unit = r.find(f'./{{{ns}}}extensions/{{{ns}}}distance-units').text
uf = 10e2 if unit == "km" else 1
ext = rte.find(f'./{{{ns}}}extensions')
dist = int(float(ext.find(f'./{{{ns}}}distance').text) * uf)
bnds = ext.find(f'./{{{ns}}}bounds')
bnds = {k: int(float(v) * 10e5) for k, v in bnds.attrib.items()}
bnds = (bnds['maxLat'], bnds['minLat'], bnds['maxLon'], bnds['minLon'])
print(f'{fname}.smy: {len(rtepts)} waypoints, distance {dist} meters.')
with open(fname + '.smy', 'wb') as smy:
smy.write(pack('<HHIIIII36x', 1, len(rtepts), *bnds, dist))
with open(fname + '.tinfo', 'wb') as tinfo,\
open(fname + '.track', 'wb') as track:
step = None
for n, p in enumerate(rtepts):
lat = int(float(p.attrib.get('lat')) * 10e5)
lon = int(float(p.attrib.get('lon')) * 10e5)
track.write(pack('<II8x', lat, lon))
thisstep = int(p.find(f'./{{{ns}}}extensions/{{{ns}}}step').text)
if thisstep != step:
name = p.find(f'./{{{ns}}}name').text
name = name.encode() if name != None else "".encode()
dist = int(float(p.find(f'./{{{ns}}}extensions/{{{ns}}}distance').text) * uf)
dur = int(float(p.find(f'./{{{ns}}}extensions/{{{ns}}}duration').text))
t = int(p.find(f'./{{{ns}}}extensions/{{{ns}}}type').text)
d = orst2brt[t]
tinfo.write(pack('<HBxHxxHxx32s', n, d, dist, dur, name))
step = thisstep
print(f'{fname}.tinfo, {fname}.track: Finished writing.')
if __name__ == "__main__":
main()
| 2.625 | 3 |
src/main/python/smart/smartplots3_run.py | cday97/beam | 123 | 12724 | import pandas as pd
import smartplots3_setup
def createSetup(name,expansion_factor,percapita_factor,plot_size,settings):
plt_setup_smart={
'name': name,
'expansion_factor':expansion_factor,
'percapita_factor':percapita_factor,
'scenarios_itr': [],
'scenarios_id':[],
'scenarios_year':[],
'plot_size': plot_size,
'bottom_labels': [],
'top_labels': [],
'plots_folder': "makeplots3"
}
plt_setup_smart['name']=name
plt_setup_smart['expansion_factor']=expansion_factor
plt_setup_smart['plot_size']=plot_size
plt_setup_smart['scenarios_year']=[]
plt_setup_smart['scenarios_id']=[]
plt_setup_smart['scenarios_itr']=[]
plt_setup_smart['top_labels']=[]
for (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label) in settings:
plt_setup_smart['scenarios_year'].append(scenarios_year)
plt_setup_smart['scenarios_id'].append(scenarios_id)
plt_setup_smart['scenarios_itr'].append(scenarios_itr)
plt_setup_smart['top_labels'].append(top_label)
plt_setup_smart['bottom_labels'].append(bottom_label)
return plt_setup_smart
def createSettingRow(scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label):
return (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label)
scenarios_lables = {
"Base_CL_CT": "Base0",
"Base_STL_STT_BAU": "Base2",
"Base_STL_STT_VTO": "Base3",
"Base_LTL_LTT_BAU": "Base5",
"Base_LTL_LTT_VTO": "Base6",
"A_STL_STT_BAU": "A2",
"A_STL_STT_VTO": "A3",
"B_LTL_LTT_BAU": "B5",
"B_LTL_LTT_VTO": "B6",
"C_LTL_LTT_BAU": "C5",
"C_LTL_LTT_VTO": "C6"
}
output_folder = "/home/ubuntu/git/jupyter/data/28thOct2019"
# Base_CL_CT
# A_STL_STT_BAU
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3 = createSetup('7scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (8, 4.5), settings)
#smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder)
#smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder)
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,2,15,scenarios_lables["Base_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,3,15,scenarios_lables["Base_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,4,15,scenarios_lables["Base_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,5,15,scenarios_lables["Base_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3_base = createSetup('11scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (10, 4.5), settings)
smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base, output_folder)
#smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267])
smartplots3_setup.tableSummary(plt_setup_smart3_base, output_folder) | 2.46875 | 2 |
genrl/deep/agents/sac/__init__.py | ajaysub110/JigglypuffRL | 0 | 12725 | <gh_stars>0
from genrl.deep.agents.sac.sac import SAC # noqa
| 0.976563 | 1 |
contrib/libs/cxxsupp/libsan/generate_symbolizer.py | HeyLey/catboost | 6,989 | 12726 | import os
import sys
def main():
print 'const char* ya_get_symbolizer_gen() {'
print ' return "{}";'.format(os.path.join(os.path.dirname(sys.argv[1]), 'llvm-symbolizer'))
print '}'
if __name__ == '__main__':
main()
| 1.664063 | 2 |
scripts/scheduler/scheduler.py | OCHA-DAP/hdx-scraper-unosat-flood-portal | 1 | 12727 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import schedule
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(dir)
from utilities.prompt_format import item
from unosat_flood_portal_collect import collect as Collect
def Wrapper(patch=False):
'''Wrapper for main program.'''
#
# Collect data.
#
Collect.Main(patch=True)
#
# Setting-up schedule.
#
schedule.every(1).day.do(Wrapper)
def Main(verbose=True):
'''Wrapper to run all the scheduled tasks.'''
if verbose:
print '%s Running scheduler.' % item('prompt_bullet')
try:
while True:
schedule.run_pending()
time.sleep(1)
except Exception as e:
print e
return False
if __name__ == '__main__':
Main()
| 2.359375 | 2 |
grAdapt/sampling/initializer/Vertices.py | mkduong-ai/grAdapt | 25 | 12728 | # python
# import warnings
# Third party imports
import numpy as np
# grAdapt
from .base import Initial
from grAdapt.utils.sampling import sample_corner_bounds
class Vertices(Initial):
"""
Samples vertices if n_evals >= 2 ** len(bounds).
Else low discrepancy sequences are sampled.
"""
def __init__(self, sampling_method):
"""
Parameters
----------
sampling_method : grAdapt.sampling.equidistributed Object
Sample low discrepancy sequences when initial point method is not feasible
"""
super().__init__(sampling_method)
def sample(self, bounds, n_evals):
"""Returns a numpy array of sampled points.
Does not include corner points of the hypercube/search space.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n_evals : int
number of initial points sampled by method
Returns
-------
(self.n_evals, len(self.bounds)) numpy array
"""
super().sample(bounds, n_evals)
if 2 ** len(self.bounds) > self.n_evals:
return self.sampling_method.sample(bounds=bounds, n=n_evals)
else:
corner_points = sample_corner_bounds(self.bounds)
num_corner_points = corner_points.shape[0]
if self.n_evals > 2 ** len(self.bounds):
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - num_corner_points),
x_history=corner_points)
return np.vstack((corner_points, random_points))
else:
return corner_points
| 2.921875 | 3 |
thirdweb/modules/base.py | princetonwong/python-sdk | 1 | 12729 | <reponame>princetonwong/python-sdk
"""Base Module."""
from abc import ABC, abstractmethod
from typing import Callable, Dict, List, Optional, Union, cast
from eth_account.account import LocalAccount
from thirdweb_web3 import Web3
from thirdweb_web3.types import TxReceipt
from zero_ex.contract_wrappers import TxParams
import json
from ..abi.coin import Coin
from ..abi.erc165 import ERC165
from ..abi.market import Market
from ..abi.nft import SignatureMint721 as NFT
from ..abi.nft_collection import NFTCollection as NFTBundle
from ..abi.pack import Pack
from ..constants.erc_interfaces import InterfaceIdErc721, InterfaceIdErc1155
from ..errors import NoSignerException
import io
from ..options import SdkOptions
from ..storage import IpfsStorage
from ..types.role import Role
ModuleTypes = Union[NFT, Market, Pack, NFTBundle, Coin]
class BaseModule(ABC):
"""
Base module for all modules.
"""
get_client: Optional[Callable[[], Web3]]
""" Returns the client object. """
get_storage: Optional[Callable[[], IpfsStorage]]
""" Returns the storage object. """
get_signer_address: Optional[Callable[[], str]]
""" Returns the signer address. """
get_private_key: Optional[Callable[[], str]]
""" Returns the private key. """
get_transact_opts: Optional[Callable[[], TxParams]]
""" Returns the transaction options. """
get_account: Optional[Callable[[], LocalAccount]]
""" Returns the account object. """
get_options: Optional[Callable[[], SdkOptions]]
""" Returns the options object. """
def __init__(self):
self.get_client = None
self.get_storage = None
self.get_signer_address = None
self.get_private_key = None
self.get_transact_opts = None
self.get_account = None
self.get_options = None
def execute_tx(self, tx) -> TxReceipt:
"""
Execute a transaction and return the receipt.
"""
client = self.get_client()
nonce = client.eth.get_transaction_count(self.get_signer_address())
tx['nonce'] = nonce
del tx['from']
signed_tx = self.__sign_tx(tx)
tx_hash = client.eth.send_raw_transaction(signed_tx.rawTransaction)
return cast(
TxReceipt,
client.eth.wait_for_transaction_receipt(
tx_hash, timeout=self.get_options().tx_timeout_in_seconds)
)
def __sign_tx(self, tx):
"""
Sign a transaction.
"""
signed_tx = self.get_account().sign_transaction(tx)
return signed_tx
def grant_role(self, role: Role, address: str):
"""
Grants the given role to the given address
"""
role_hash = role.get_hash()
tx = self.__abi_module.grant_role.build_transaction(
role_hash, address,
self.get_transact_opts()
)
self.execute_tx(tx)
@abstractmethod
def get_abi_module(self) -> ModuleTypes:
pass
def grant_role(self, role: Role, address: str):
"""
Grants the given role to the given address
"""
role_hash = role.get_hash()
tx = self.get_abi_module().grant_role.build_transaction(
role_hash, address,
self.get_transact_opts()
)
self.execute_tx(tx)
def upload_metadata(self, data: Union[Dict, str]) -> str:
"""
Uploads the metadata to IPFS and returns the uri.
"""
storage = self.get_storage()
if isinstance(data, str) and data.startswith("ipfs://"):
return data
if 'image_uri' in data and data["image"] == "":
data["image"] = data["image_uri"]
if 'image' in data:
if isinstance(data["image"], bytes) or isinstance(data["image"], bytearray):
data["image"] = storage.upload(
data["image"], self.address, self.get_signer_address())
return storage.upload(json.dumps(data), self.address, self.get_signer_address())
def revoke_role(self, role: Role, address: str):
"""
Revokes the given role from the given address
"""
role_hash = role.get_hash()
try:
signer_address = self.get_signer_address()
if signer_address.lower() == address.lower():
self.execute_tx(self.get_abi_module().renounce_role.build_transaction(
role_hash, address, self.get_transact_opts()
))
return
except NoSignerException:
pass
self.execute_tx(self.get_abi_module().revoke_role.build_transaction(
role_hash, address, self.get_transact_opts()
))
def get_role_member_count(self, role: Role):
"""
Returns the number of members in the given role
"""
return self.get_abi_module().get_role_member_count.call(role.get_hash())
def get_role_members(self, role: Role) -> List[str]:
"""
Returns the members of the given role
"""
return [self.get_role_member(role, x) for x in range(self.get_role_member_count(role))]
def get_role_member(self, role: Role, index: int) -> str:
"""
Returns the member at the given index of the given role
"""
return self.get_abi_module().get_role_member.call(role.get_hash(), index)
def get_all_role_members(self) -> Dict[str, List[str]]:
"""
Returns all the members of all the roles
"""
return {
Role.admin.name: self.get_role_members(Role.admin),
Role.minter.name: self.get_role_members(Role.minter),
Role.transfer.name: self.get_role_members(Role.transfer),
Role.pauser.name: self.get_role_members(Role.pauser)
}
def is_erc721(self, address: str) -> bool:
erc165 = ERC165(self.get_client(), address)
return erc165.supports_interface.call(InterfaceIdErc721)
def is_erc1155(self, address: str) -> bool:
erc165 = ERC165(self.get_client(), address)
return erc165.supports_interface.call(InterfaceIdErc1155)
def __get_token_uri(self, token_id: int) -> ModuleTypes:
module = self.get_abi_module()
uri = ""
try:
uri = module.token_uri(token_id)
except:
pass
if uri != "":
return uri
try:
uri = module.uri(token_id)
except:
pass
return uri
| 1.992188 | 2 |
data_util.py | shiyu-wangbyte/leadopt | 0 | 12730 | # Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains utility code for reading packed data files.
"""
import os
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import h5py
import tqdm
# Atom typing
#
# Atom typing is the process of figuring out which layer each atom should be
# written to. For ease of testing, the packed data file contains a lot of
# potentially useful atomic information which can be distilled during the
# data loading process.
#
# Atom typing is implemented by map functions of the type:
# (atom descriptor) -> (layer index)
#
# If the layer index is -1, the atom is ignored.
class AtomTyper(object):
def __init__(self, fn, num_layers):
"""Initialize an atom typer.
Args:
fn: a function of type:
(atomic_num, aro, hdon, hacc, pcharge) -> (mask)
num_layers: number of output layers (<=32)
"""
self._fn = fn
self._num_layers = num_layers
def size(self):
return self._num_layers
def apply(self, *args):
return self._fn(*args)
class CondAtomTyper(AtomTyper):
def __init__(self, cond_func):
assert len(cond_func) <= 16
def _fn(*args):
v = 0
for k in range(len(cond_func)):
if cond_func[k](*args):
v |= 1 << k
return v
super(CondAtomTyper, self).__init__(_fn, len(cond_func))
REC_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num != 0
]),
# (C,N,O,S,*)
'simple': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (H,C,N,O,S,*)
'simple_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 1,
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta_mix': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
lambda num, aro, hdon, hacc, pcharge: num == 1, # hydrogen
lambda num, aro, hdon, hacc, pcharge: num == 6, # carbon
lambda num, aro, hdon, hacc, pcharge: num == 7, # nitrogen
lambda num, aro, hdon, hacc, pcharge: num == 8, # oxygen
lambda num, aro, hdon, hacc, pcharge: num == 16, # sulfur
])
}
LIG_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num: num != 0
]),
'simple': CondAtomTyper([
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
]),
'simple_h': CondAtomTyper([
lambda num: num == 1, # hydrogen
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
])
}
class FragmentDataset(Dataset):
"""Utility class to work with the packed fragments.h5 format."""
def __init__(self, fragment_file, rec_typer=REC_TYPER['simple'],
lig_typer=LIG_TYPER['simple'], filter_rec=None, filter_smi=None,
fdist_min=None, fdist_max=None, fmass_min=None, fmass_max=None,
verbose=False, lazy_loading=True):
"""Initializes the fragment dataset.
Args:
fragment_file: path to fragments.h5
rec_typer: AtomTyper for receptor
lig_typer: AtomTyper for ligand
filter_rec: list of receptor ids to use (or None to use all)
skip_remap: if True, don't prepare atom type information
(filtering options):
fdist_min: minimum fragment distance
fdist_max: maximum fragment distance
fmass_min: minimum fragment mass (Da)
fmass_max: maximum fragment mass (Da)
"""
self._rec_typer = rec_typer
self._lig_typer = lig_typer
self.verbose = verbose
self._lazy_loading = lazy_loading
self.rec = self._load_rec(fragment_file, rec_typer)
self.frag = self._load_fragments(fragment_file, lig_typer)
self.valid_idx = self._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose)
def _load_rec(self, fragment_file, rec_typer):
"""Loads receptor information."""
f = h5py.File(fragment_file, 'r')
rec_coords = f['rec_coords'][()]
rec_types = f['rec_types'][()]
rec_lookup = f['rec_lookup'][()]
r = range(len(rec_types))
if self.verbose:
r = tqdm.tqdm(r, desc='Remap receptor atoms')
rec_remapped = np.zeros(len(rec_types), dtype=np.uint16)
if not self._lazy_loading:
for i in r:
rec_remapped[i] = rec_typer.apply(*rec_types[i])
rec_loaded = np.zeros(len(rec_lookup)).astype(np.bool)
# create rec mapping
rec_mapping = {}
for i in range(len(rec_lookup)):
rec_mapping[rec_lookup[i][0].decode('ascii')] = i
rec = {
'rec_coords': rec_coords,
'rec_types': rec_types,
'rec_remapped': rec_remapped,
'rec_lookup': rec_lookup,
'rec_mapping': rec_mapping,
'rec_loaded': rec_loaded
}
f.close()
return rec
def _load_fragments(self, fragment_file, lig_typer):
"""Loads fragment information."""
f = h5py.File(fragment_file, 'r')
frag_data = f['frag_data'][()]
frag_lookup = f['frag_lookup'][()]
frag_smiles = f['frag_smiles'][()]
frag_mass = f['frag_mass'][()]
frag_dist = f['frag_dist'][()]
frag_lig_smi = None
frag_lig_idx = None
if 'frag_lig_smi' in f.keys():
frag_lig_smi = f['frag_lig_smi'][()]
frag_lig_idx = f['frag_lig_idx'][()]
# unpack frag data into separate structures
frag_coords = frag_data[:,:3].astype(np.float32)
frag_types = frag_data[:,3].astype(np.uint8)
frag_remapped = np.zeros(len(frag_types), dtype=np.uint16)
if not self._lazy_loading:
for i in range(len(frag_types)):
frag_remapped[i] = lig_typer.apply(frag_types[i])
frag_loaded = np.zeros(len(frag_lookup)).astype(np.bool)
# find and save connection point
r = range(len(frag_lookup))
if self.verbose:
r = tqdm.tqdm(r, desc='Frag connection point')
frag_conn = np.zeros((len(frag_lookup), 3))
for i in r:
_,f_start,f_end,_,_ = frag_lookup[i]
fdat = frag_data[f_start:f_end]
found = False
for j in range(len(fdat)):
if fdat[j][3] == 0:
frag_conn[i,:] = tuple(fdat[j])[:3]
found = True
break
assert found, "missing fragment connection point at %d" % i
frag = {
'frag_coords': frag_coords, # d_idx -> (x,y,z)
'frag_types': frag_types, # d_idx -> (type)
'frag_remapped': frag_remapped, # d_idx -> (layer)
'frag_lookup': frag_lookup, # f_idx -> (rec_id, fstart, fend, pstart, pend)
'frag_conn': frag_conn, # f_idx -> (x,y,z)
'frag_smiles': frag_smiles, # f_idx -> smiles
'frag_mass': frag_mass, # f_idx -> mass
'frag_dist': frag_dist, # f_idx -> dist
'frag_lig_smi': frag_lig_smi,
'frag_lig_idx': frag_lig_idx,
'frag_loaded': frag_loaded
}
f.close()
return frag
def _get_valid_examples(self, filter_rec, filter_smi, fdist_min, fdist_max, fmass_min,
fmass_max, verbose):
"""Returns an array of valid fragment indexes.
"Valid" in this context means the fragment belongs to a receptor in
filter_rec and the fragment abides by the optional mass/distance
constraints.
"""
# keep track of valid examples
valid_mask = np.ones(self.frag['frag_lookup'].shape[0]).astype(np.bool)
num_frags = self.frag['frag_lookup'].shape[0]
# filter by receptor id
if filter_rec is not None:
valid_rec = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter rec')
for i in r:
rec = self.frag['frag_lookup'][i][0].decode('ascii')
if rec in filter_rec:
valid_rec[i] = 1
valid_mask *= valid_rec
# filter by ligand smiles string
if filter_smi is not None:
valid_lig = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter lig')
for i in r:
smi = self.frag['frag_lig_smi'][self.frag['frag_lig_idx'][i]]
smi = smi.decode('ascii')
if smi in filter_smi:
valid_lig[i] = 1
valid_mask *= valid_lig
# filter by fragment distance
if fdist_min is not None:
valid_mask[self.frag['frag_dist'] < fdist_min] = 0
if fdist_max is not None:
valid_mask[self.frag['frag_dist'] > fdist_max] = 0
# filter by fragment mass
if fmass_min is not None:
valid_mask[self.frag['frag_mass'] < fmass_min] = 0
if fmass_max is not None:
valid_mask[self.frag['frag_mass'] > fmass_max] = 0
# convert to a list of indexes
valid_idx = np.where(valid_mask)[0]
return valid_idx
def __len__(self):
"""Returns the number of valid fragment examples."""
return self.valid_idx.shape[0]
def __getitem__(self, idx):
"""Returns the Nth example.
Returns a dict with:
f_coords: fragment coordinates (Fx3)
f_types: fragment layers (Fx1)
p_coords: parent coordinates (Px3)
p_types: parent layers (Px1)
r_coords: receptor coordinates (Rx3)
r_types: receptor layers (Rx1)
conn: fragment connection point in the parent molecule (x,y,z)
smiles: fragment smiles string
"""
# convert to fragment index
frag_idx = self.valid_idx[idx]
return self.get_raw(frag_idx)
def get_raw(self, frag_idx):
# lookup fragment
rec_id, f_start, f_end, p_start, p_end = self.frag['frag_lookup'][frag_idx]
smiles = self.frag['frag_smiles'][frag_idx].decode('ascii')
conn = self.frag['frag_conn'][frag_idx]
# lookup receptor
rec_idx = self.rec['rec_mapping'][rec_id.decode('ascii')]
_, r_start, r_end = self.rec['rec_lookup'][rec_idx]
# fetch data
# f_coords = self.frag['frag_coords'][f_start:f_end]
# f_types = self.frag['frag_types'][f_start:f_end]
p_coords = self.frag['frag_coords'][p_start:p_end]
r_coords = self.rec['rec_coords'][r_start:r_end]
if self._lazy_loading and self.frag['frag_loaded'][frag_idx] == 0:
frag_types = self.frag['frag_types']
frag_remapped = self.frag['frag_remapped']
# load parent
for i in range(p_start, p_end):
frag_remapped[i] = self._lig_typer.apply(frag_types[i])
self.frag['frag_loaded'][frag_idx] = 1
if self._lazy_loading and self.rec['rec_loaded'][rec_idx] == 0:
rec_types = self.rec['rec_types']
rec_remapped = self.rec['rec_remapped']
# load receptor
for i in range(r_start, r_end):
rec_remapped[i] = self._rec_typer.apply(*rec_types[i])
self.rec['rec_loaded'][rec_idx] = 1
p_mask = self.frag['frag_remapped'][p_start:p_end]
r_mask = self.rec['rec_remapped'][r_start:r_end]
return {
# 'f_coords': f_coords,
# 'f_types': f_types,
'p_coords': p_coords,
'p_types': p_mask,
'r_coords': r_coords,
'r_types': r_mask,
'conn': conn,
'smiles': smiles
}
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._lig_typer.size()
def rec_layers(self):
return self._rec_typer.size()
class SharedFragmentDataset(object):
def __init__(self, dat, filter_rec=None, filter_smi=None, fdist_min=None,
fdist_max=None, fmass_min=None, fmass_max=None):
self._dat = dat
self.valid_idx = self._dat._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose=True)
def __len__(self):
return self.valid_idx.shape[0]
def __getitem__(self, idx):
frag_idx = self.valid_idx[idx]
return self._dat.get_raw(frag_idx)
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self._dat.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._dat.lig_layers()
def rec_layers(self):
return self._dat.rec_layers()
class FingerprintDataset(Dataset):
def __init__(self, fingerprint_file):
"""Initializes a fingerprint dataset.
Args:
fingerprint_file: path to a fingerprint .h5 file
"""
self.fingerprints = self._load_fingerprints(fingerprint_file)
def _load_fingerprints(self, fingerprint_file):
"""Loads fingerprint information."""
f = h5py.File(fingerprint_file, 'r')
fingerprint_data = f['fingerprints'][()]
fingerprint_smiles = f['smiles'][()]
# create smiles->idx mapping
fingerprint_mapping = {}
for i in range(len(fingerprint_smiles)):
sm = fingerprint_smiles[i].decode('ascii')
fingerprint_mapping[sm] = i
fingerprints = {
'fingerprint_data': fingerprint_data,
'fingerprint_mapping': fingerprint_mapping,
'fingerprint_smiles': fingerprint_smiles,
}
f.close()
return fingerprints
def for_smiles(self, smiles):
"""Return a Tensor of fingerprints for a list of smiles.
Args:
smiles: size N list of smiles strings (as str not bytes)
"""
fp = np.zeros((len(smiles), self.fingerprints['fingerprint_data'].shape[1]))
for i in range(len(smiles)):
fp_idx = self.fingerprints['fingerprint_mapping'][smiles[i]]
fp[i] = self.fingerprints['fingerprint_data'][fp_idx]
return torch.Tensor(fp)
| 2.34375 | 2 |
add_socket_response_event.py | Kur0den/kur0bot | 1 | 12731 | <reponame>Kur0den/kur0bot<gh_stars>1-10
from discord.gateway import DiscordWebSocket, utils, _log, KeepAliveHandler, ReconnectWebSocket
async def received_message(self, msg, /):
if type(msg) is bytes:
self._buffer.extend(msg)
if len(msg) < 4 or msg[-4:] != b'\x00\x00\xff\xff':
return
msg = self._zlib.decompress(self._buffer)
msg = msg.decode('utf-8')
self._buffer = bytearray()
self.log_receive(msg)
msg = utils._from_json(msg)
_log.debug('For Shard ID %s: WebSocket Event: %s', self.shard_id, msg)
# add dispatch
self._dispatch('socket_response', msg)
event = msg.get('t')
if event:
self._dispatch('socket_event_type', event)
op = msg.get('op')
data = msg.get('d')
seq = msg.get('s')
if seq is not None:
self.sequence = seq
if self._keep_alive:
self._keep_alive.tick()
if op != self.DISPATCH:
if op == self.RECONNECT:
# "reconnect" can only be handled by the Client
# so we terminate our connection and raise an
# internal exception signalling to reconnect.
_log.debug('Received RECONNECT opcode.')
await self.close()
raise ReconnectWebSocket(self.shard_id)
if op == self.HEARTBEAT_ACK:
if self._keep_alive:
self._keep_alive.ack()
return
if op == self.HEARTBEAT:
if self._keep_alive:
beat = self._keep_alive.get_payload()
await self.send_as_json(beat)
return
if op == self.HELLO:
interval = data['heartbeat_interval'] / 1000.0
self._keep_alive = KeepAliveHandler(ws=self, interval=interval, shard_id=self.shard_id)
# send a heartbeat immediately
await self.send_as_json(self._keep_alive.get_payload())
self._keep_alive.start()
return
if op == self.INVALIDATE_SESSION:
if data is True:
await self.close()
raise ReconnectWebSocket(self.shard_id)
self.sequence = None
self.session_id = None
_log.info('Shard ID %s session has been invalidated.', self.shard_id)
await self.close(code=1000)
raise ReconnectWebSocket(self.shard_id, resume=False)
_log.warning('Unknown OP code %s.', op)
return
if event == 'READY':
self._trace = trace = data.get('_trace', [])
self.sequence = msg['s']
self.session_id = data['session_id']
# pass back shard ID to ready handler
data['__shard_id__'] = self.shard_id
_log.info('Shard ID %s has connected to Gateway: %s (Session ID: %s).',
self.shard_id, ', '.join(trace), self.session_id)
elif event == 'RESUMED':
self._trace = trace = data.get('_trace', [])
# pass back the shard ID to the resumed handler
data['__shard_id__'] = self.shard_id
_log.info('Shard ID %s has successfully RESUMED session %s under trace %s.',
self.shard_id, self.session_id, ', '.join(trace))
try:
func = self._discord_parsers[event]
except KeyError:
_log.debug('Unknown event %s.', event)
else:
func(data)
# remove the dispatched listeners
removed = []
for index, entry in enumerate(self._dispatch_listeners):
if entry.event != event:
continue
future = entry.future
if future.cancelled():
removed.append(index)
continue
try:
valid = entry.predicate(data)
except Exception as exc:
future.set_exception(exc)
removed.append(index)
else:
if valid:
ret = data if entry.result is None else entry.result(data)
future.set_result(ret)
removed.append(index)
for index in reversed(removed):
del self._dispatch_listeners[index]
DiscordWebSocket.received_message = received_message
| 2.25 | 2 |
Image_detection_codes/Keras_training/test2.py | pasadyash/CitizenServiceApp | 0 | 12732 | import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from dataset_pothole import pothole
from keras.models import model_from_json
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = pothole.load_data()
print(X_train.shape)
print()
print (y_train.shape)
print()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 200, 200, 1)
X_test = X_test.reshape(X_test.shape[0], 200, 200, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 3380
X_test /= 3380
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 4)
Y_test = np_utils.to_categorical(y_test, 4)
# 7. Define model architecture
nb_classes = 4
# number of epochs to train
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(200, 200, 1)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
convout2 = Activation('relu')
model.add(convout2)
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=2, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
| 2.890625 | 3 |
platform/web/api/device/models.py | JMSHDev/regent.dev | 1 | 12733 | import hashlib
import random
import string
import logging
from django.db import models
LOG = logging.getLogger(__name__)
class Device(models.Model):
name = models.CharField(max_length=50, unique=True)
customer = models.CharField(max_length=50)
agent_status = models.CharField(max_length=10, default="offline")
program_status = models.CharField(max_length=10, default="down")
last_updated = models.DateTimeField(auto_now=True)
def delete_mqtt_credentials(self):
self.auth.all().delete()
self.acl.all().delete()
class MqttAuth(models.Model):
username = models.CharField(max_length=100, unique=True)
password = models.CharField(max_length=100)
salt = models.CharField(max_length=10)
activated = models.BooleanField(default=False)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="auth", related_query_name="auth", null=True
)
def __str__(self):
return "activated" if self.activated else "not activated"
@classmethod
def create(cls, username, password, activated, device=None):
salt = "".join(random.choice(string.ascii_letters) for _ in range(10))
password = hashlib.sha256((password + salt).encode("utf-8")).hexdigest()
return MqttAuth(username=username, password=password, salt=salt, activated=activated, device=device)
class MqttAcl(models.Model):
allow = models.SmallIntegerField()
ipaddr = models.CharField(max_length=60, null=True)
username = models.CharField(max_length=100, null=True)
clientid = models.CharField(max_length=100, null=True)
access = models.SmallIntegerField()
topic = models.CharField(max_length=100)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="acl", related_query_name="acl", null=True
)
class Telemetry(models.Model):
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="telemetry", related_query_name="telemetry"
)
created_on = models.DateTimeField(auto_now_add=True)
state = models.JSONField()
| 2.25 | 2 |
armageddon/__init__.py | acse-ns1321/asteroid-impact-simulator | 0 | 12734 | # flake8:NOQA
"""Python asteroid airburst calculator"""
from .solver import *
from .damage import *
from .locator import *
from .mapping import *
| 1.070313 | 1 |
proxy_server/backend_services.py | lmanzurv/django_proxy_server | 11 | 12735 | from django.contrib.auth import SESSION_KEY
from django.core.cache import cache
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError
from proxy_server.response import AJAX_REQUEST
import httplib, json, proxy_server
def invoke_backend_service(method, function_path, json_data=dict(), request=None, response_token=True, public=False, secure=False):
error_message = None
try:
if public is False and request is None:
error_message = 'A private web service must receive Django\'s request'
raise Exception
if response_token is True and request is None:
error_message = 'A web service cannot expect a response token and not receive Django\'s request'
raise Exception
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.API_KEY] = settings.SECRET_KEY
if request is not None:
pk = cache.get(AJAX_REQUEST, None)
if pk:
request.user.pk = pk
cache.delete(AJAX_REQUEST)
headers[proxy_server.USER_TOKEN] = request.user.pk
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
return 403, None
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
return 204, None
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response_token is True:
user_dict = None
if SESSION_KEY in request.session:
user_dict = cache.get(request.session[SESSION_KEY])
cache.delete(request.session[SESSION_KEY])
request.session[SESSION_KEY] = response_json[proxy_server.USER_TOKEN]
request.user.pk = response_json[proxy_server.USER_TOKEN]
request.session[proxy_server.EXPIRATION_DATE] = response_json[proxy_server.EXPIRATION_DATE]
if user_dict:
user_dict['pk'] = request.user.pk
cache.set(request.session[SESSION_KEY], user_dict)
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
result = None
if proxy_server.RESPONSE in response_json:
result = response_json[proxy_server.RESPONSE]
return 200, result
else:
code = response.status
if proxy_server.ERROR in response_json:
error_message = response_json[proxy_server.ERROR][proxy_server.MESSAGE]
raise Exception(code)
else:
error_message = response.reason
raise Exception(code)
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return code, error
def invoke_backend_service_as_proxy(request, method, function_path, json_data=dict(), response_token=True, secure=False):
error_message = None
try:
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.USER_TOKEN] = request.META.get(proxy_server.HTTP_USER_TOKEN)
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
headers[proxy_server.API_KEY] = request.META.get(proxy_server.HTTP_API_KEY)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
resp = HttpResponse(status=response.status, reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
resp = HttpResponse(status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
resp = HttpResponse(response_data, status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return HttpResponseServerError(json.dumps(error), content_type='application/json')
| 2.109375 | 2 |
py/trawl_analyzer/TrawlSensorsDB_model.py | nwfsc-fram/pyFieldSoftware | 0 | 12736 | <reponame>nwfsc-fram/pyFieldSoftware<gh_stars>0
# from peewee import *
from playhouse.apsw_ext import TextField, IntegerField, PrimaryKeyField
from py.trawl_analyzer.Settings import SensorsModel as BaseModel
# database = SqliteDatabase('data\clean_sensors.db', **{})
class UnknownField(object):
def __init__(self, *_, **__): pass
class EnviroNetRawFiles(BaseModel):
activation_datetime = TextField(db_column='ACTIVATION_DATETIME', null=True)
deactivation_datetime = TextField(db_column='DEACTIVATION_DATETIME', null=True)
deployed_equipment = IntegerField(db_column='DEPLOYED_EQUIPMENT_ID', null=True)
enviro_net_raw_files = PrimaryKeyField(db_column='ENVIRO_NET_RAW_FILES_ID')
haul = TextField(db_column='HAUL_ID', null=True)
raw_file = TextField(db_column='RAW_FILE', null=True)
class Meta:
db_table = 'ENVIRO_NET_RAW_FILES'
class EnviroNetRawStrings(BaseModel):
date_time = TextField(db_column='DATE_TIME', index=True, null=True)
deployed_equipment = IntegerField(db_column='DEPLOYED_EQUIPMENT_ID', null=True)
enviro_net_raw_strings = PrimaryKeyField(db_column='ENVIRO_NET_RAW_STRINGS_ID')
haul = TextField(db_column='HAUL_ID', null=True)
raw_strings = TextField(db_column='RAW_STRINGS', null=True)
class Meta:
db_table = 'ENVIRO_NET_RAW_STRINGS'
class RawSentences(BaseModel):
date_time = TextField(db_column='DATE_TIME', null=True)
deployed_equipment = IntegerField(db_column='DEPLOYED_EQUIPMENT_ID', null=True)
raw_sentence = TextField(db_column='RAW_SENTENCE', null=True)
raw_sentence_id = PrimaryKeyField(db_column='RAW_SENTENCE_ID')
class Meta:
db_table = 'RAW_SENTENCES'
| 2.171875 | 2 |
src/statemachine.py | CEOAI-ABM/SIR-Modelling | 1 | 12737 | import transitions
from functools import partial
# from transitions import transitions.Machine
# TODO: whenever there is a state chage store the following
# (DAY,function_called) -> Stored for every person for agent status, state and Testing state
class AgentStatusA(object):
"""The Statemachine of the agent"""
status = ['Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation']
def __init__(self):
"""Agent Status class is responsible for figuring out the Mobility of the agent, the agent mobility can be
'Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation'
"""
super(AgentStatusA, self).__init__()
self.ADDED_BIT = True
self.TruthStatus = None
self.Last_Added_Placeholder = None
self.buffer = []
self.Status = self.status[0]
# def log_update(self,message):
def update_objects(self,TruthStatus):
"""Update object of Virusmodel
Args:
TruthStatus (object): Truth State object to update
"""
self.TruthStatus = TruthStatus
def __remove_from_transport__(self):
if self.useTN == True:
self.City.TravellingCitizens.remove(self)
#print('Person {} removed from travelling list of City {}. New length = {}'.format(self.IntID, self.City.Name, len(self.City.TravellingCitizens)))
def _remove_(self):
"""Remove from workplace and transport list
"""
if self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj !=None:
self.buffer.append('_remove_')
obj.Working.remove(self)
self.ADDED_BIT = False
self.__remove_from_transport__()
def _add_(self):
"""Add to workplace and transport list
"""
if ~self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj != None:
if obj.Working!=None:
self.buffer.append('_add_')
obj.Working.add(self)
self.ADDED_BIT = True
if self.useTN == True:
self.City.TravellingCitizens.add(self)
def _left_(self):
"""Leave city, calls remove
"""
self._remove_()
def _entered_(self):
"""Come back to city
"""
self._add_()
def __remove_from_placeholder__(self):
"""Remove the person from the Truth Status Placeholders
Returns:
bool: Whether Removed or not
"""
try:
if self.Last_Added_Placeholder == 0: # If he is AFreeP
self.TruthStatus.AFreeP.remove(self)
return True
elif self.Last_Added_Placeholder == 1: # If he was Quarentined
self.TruthStatus.AQuarentinedP.remove(self)
return True
elif self.Last_Added_Placeholder == 2: # If he was Isolated
self.TruthStatus.SIsolatedP.remove(self)
return True
elif self.Last_Added_Placeholder == 3: # If he was Hospitalized
self.TruthStatus.SHospitalizedP.remove(self)
return True
elif self.Last_Added_Placeholder == 4: # If he was Icu
self.TruthStatus.SIcuP.remove(self)
return True
else:
return False
except:
self.about()
raise
def leave_city(self):
acceptable_states = [self.status[0]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[2]
self._left_()
self.__remove_from_placeholder__()
self.Last_Added_Placeholder = None
def enter_city(self):
acceptable_states = [self.status[2]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[0]
self._entered_()
if self.is_Asymptomatic():
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
def quarentined(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[2]]
assert self.Status in acceptable_states
if self.Last_Added_Placeholder != 1:
self.__remove_from_placeholder__()
if self.is_Free(): # If free add to quarentined placeholders
self.TruthStatus.AQuarentinedP.add(self)
self.Last_Added_Placeholder = 1
self.Status = self.status[1]
self._remove_()
def hospitalized(self,DAY):
acceptable_states = [self.status[0],self.status[1]]
assert self.Status in acceptable_states
self.Status = self.status[3]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SHospitalizedP.add(self)
self.Last_Added_Placeholder = 3
def admit_icu(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[3]]
assert self.Status in acceptable_states
self.Status = self.status[4]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIcuP.add(self)
self.Last_Added_Placeholder = 4
def isolate(self,Today):
acceptable_states = [self.status[0],self.status[1],self.status[3],self.status[4],self.status[5]]
assert self.Status in acceptable_states
if self.Status == self.status[0] or self.Status == self.status[1]:
self.show_symptoms(Today)
if self.Last_Added_Placeholder != 2:
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIsolatedP.add(self)
self.Last_Added_Placeholder = 2
self.Status = self.status[5]
self._remove_()
def is_Free(self):
return self.Status == self.status[0]
def is_Quarentined(self):
return self.Status == self.status[1]
def is_Out_of_City(self):
return self.Status == self.status[2]
def is_Hospitalized(self):
return self.Status == self.status[3]
def is_ICU(self):
return self.Status == self.status[4]
def is_Isolation(self):
return self.Status == self.status[5]
class AgentStateA(AgentStatusA):
states = ['Healthy','Asymptomatic','Symptomatic','Recovered','Died']
def __init__(self):
"""Agent status is the status of person with respect ot the virus
"""
super(AgentStateA, self).__init__()
#self = person
self.State = self.states[0]
self.TruthStatus = None
def infected(self,DAY):
acceptable_states = [self.states[0]]
assert self.State in acceptable_states
self.State = self.states[1]
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
self.History["Infected"] = DAY
def show_symptoms(self,DAY):
acceptable_states = [self.states[1],self.states[2]]
assert self.State in acceptable_states
self.State = self.states[2]
self.History["Symptomatic"] = DAY
def recover(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[3]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RRecoveredP.add(self)
self.Last_Added_Placeholder =5
self.History["Recovered"] = DAY
self.History["Died"] = -1
def die(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[4]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RDiedP.add(self)
self.Last_Added_Placeholder = 6
self.History["Recovered"] = -1
self.History["Died"] = DAY
def is_Healthy(self):
return self.State == self.states[0]
def is_Asymptomatic(self):
return self.State == self.states[1]
def is_Symptomatic(self):
return self.State == self.states[2]
def is_Recovered(self):
return self.State == self.states[3]
def is_Died(self):
return self.State == self.states[4]
class TestingState(object):
"""Summary
Attributes:
in_stack (bool): Description
machine (TYPE): Description
state (str): Description
tested (bool): Description
"""
machine = transitions.Machine(model=None, states=['Not_tested', 'Awaiting_Testing', 'Tested_Positive','Tested_Negative'], initial='Not_tested',
transitions=[
{'trigger': 'awaiting_test', 'source': ['Not_tested','Awaiting_Testing','Tested_Negative'], 'dest': 'Awaiting_Testing','before':'add_to_TestingQueue'},
{'trigger': 'tested_positive', 'source': 'Awaiting_Testing', 'dest': 'Tested_Positive','before':'tested_positive_func'},
{'trigger': 'tested_negative', 'source': 'Awaiting_Testing', 'dest': 'Tested_Negative','before':'tested_negative_func'},
])
def __init__(self):
"""This is responsible for updating testing state of the person
Deleted Parameters:
person (object): Home object
VM (object): Virusmodel object
"""
super().__init__()
self.state = 'Not_tested'
def __remove_from_testing_list__(self):
self.City.TestingQueue.remove(self)
def add_to_TestingQueue(self, PrivateTest=False):
"""Summary
"""
# This function is for the City to add citizens into testingQueue
if PrivateTest == False:
if self.state != 'Awaiting_Testing' :
self.City.TestingQueue.append(self)
if self.state == 'Tested_Negative':
self.City.TestedP['Negative'].remove(self)
#print('City {} added person {}'.format(self.City.Name, self.IntID))
#pass type of test
def tested_positive_func(self,Today, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Positive'].add(self)
self.City.NumTestedPositive += 1
if PrivateTest == False:
self.__remove_from_testing_list__()
if self.is_Quarentined():
self.isolate(Today)
def tested_negative_func(self, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Negative'].add(self)
if PrivateTest == False:
self.__remove_from_testing_list__()
def __getattribute__(self, item):
"""Summary
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
try:
return super(TestingState, self).__getattribute__(item)
except AttributeError:
if item in self.machine.events:
return partial(self.machine.events[item].trigger, self)
raise
| 3.046875 | 3 |
hard-gists/5898352/snippet.py | jjhenkel/dockerizeme | 21 | 12738 | <gh_stars>10-100
import os
import scipy.io.wavfile as wav
# install lame
# install bleeding edge scipy (needs new cython)
fname = 'XC135672-Red-winged\ Blackbird1301.mp3'
oname = 'temp.wav'
cmd = 'lame --decode {0} {1}'.format( fname,oname )
os.system(cmd)
data = wav.read(oname)
# your code goes here
print len(data[1])
| 2.3125 | 2 |
examples/home-assistant/custom_components/evacalor/config_flow.py | fredericvl/pyevacalor | 2 | 12739 | """Config flow for Eva Calor."""
from collections import OrderedDict
import logging
import uuid
from pyevacalor import ( # pylint: disable=redefined-builtin
ConnectionError,
Error as EvaCalorError,
UnauthorizedError,
evacalor,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from .const import CONF_UUID, DOMAIN
_LOGGER = logging.getLogger(__name__)
def conf_entries(hass):
"""Return the email tuples for the domain."""
return set(
entry.data[CONF_EMAIL] for entry in hass.config_entries.async_entries(DOMAIN)
)
class EvaCalorConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Eva Calor Config Flow handler."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def _entry_in_configuration_exists(self, user_input) -> bool:
"""Return True if config already exists in configuration."""
email = user_input[CONF_EMAIL]
if email in conf_entries(self.hass):
return True
return False
async def async_step_user(self, user_input=None):
"""User initiated integration."""
errors = {}
if user_input is not None:
# Validate user input
email = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
if self._entry_in_configuration_exists(user_input):
return self.async_abort(reason="device_already_configured")
try:
gen_uuid = str(uuid.uuid1())
evacalor(email, password, gen_uuid)
except UnauthorizedError:
errors["base"] = "unauthorized"
except ConnectionError:
errors["base"] = "connection_error"
except EvaCalorError:
errors["base"] = "unknown_error"
if "base" not in errors:
return self.async_create_entry(
title=DOMAIN,
data={
CONF_EMAIL: email,
CONF_PASSWORD: password,
CONF_UUID: gen_uuid,
},
)
else:
user_input = {}
data_schema = OrderedDict()
data_schema[vol.Required(CONF_EMAIL, default=user_input.get(CONF_EMAIL))] = str
data_schema[
vol.Required(CONF_PASSWORD, default=user_input.get(CONF_PASSWORD))
] = str
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=errors
)
| 2.15625 | 2 |
library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py | osi-awoodall/OSI-Samples-OCS | 0 | 12740 | # Dataview.py
#
import json
from .DataviewQuery import DataviewQuery
from .DataviewMapping import DataviewMapping
from .DataviewIndexConfig import DataviewIndexConfig
from .DataviewGroupRule import DataviewGroupRule
class Dataview(object):
"""
Dataview definition
"""
def __init__(
self,
id=None,
name=None,
description=None,
queries=None,
mappings=None,
indexConfig=None,
indexDataType=None,
groupRules=[],
):
"""
:param id: required
:param name: not required
:param description: not required
:param queries: query string required
:param mappings: array of Dataviewmapping not required
:param indexConfig: DataviewindexConfig not required
:param indexDataType: Currently limited to "DateTime" required
:param groupRules: Array of DataviewGroupRule not required
"""
self.__id = id
self.__name = name
self.__description = description
if queries:
self.__queries = queries
else:
self.__queries = DataviewQuery()
if mappings:
self.__mappings = mappings
self.__indexConfig = indexConfig
self.__indexDataType = indexDataType
self.__groupRules = groupRules
@property
def Id(self):
"""
Get the id required
:return:
"""
return self.__id
@Id.setter
def Id(self, id):
"""
Set the id required
:param id:
:return:
"""
self.__id = id
@property
def Name(self):
"""
Name can be duplicated in a namespace not required
:return:
"""
return self.__name
@Name.setter
def Name(self, name):
"""
Name can be duplicated in a namespace not required
:param name:
:return:
"""
self.__name = name
@property
def Description(self):
"""
Add an esy to understand description not required
:return:
"""
return self.__description
@Description.setter
def Description(self, description):
"""
Add an esy to understand description not required
:param description:
:return:
"""
self.__description = description
@property
def Queries(self):
"""
Query string required
:return:
"""
return self.__queries
@Queries.setter
def Queries(self, queries):
"""
Array of dataviequery required
:param queries:
:return:
"""
self.__queries = queries
@property
def Mappings(self):
"""
array of Dataviewmapping not required
:return:
"""
return self.__mappings
@Mappings.setter
def Mappings(self, mappings):
"""
array of Dataviewmapping not required
:param mappings:
:return:
"""
self.__mappings = mappings
@property
def IndexConfig(self):
"""
DataviewindexConfig not required
:return:
"""
return self.__indexConfig
@IndexConfig.setter
def IndexConfig(self, indexConfig):
"""
DataviewindexConfig not required
:param indexConfig:
:return:
"""
self.__indexConfig = indexConfig
@property
def IndexDataType(self):
"""
Currently limited to "DateTime" required
:return:
"""
return self.__indexDataType
@IndexDataType.setter
def IndexDataType(self, indexDataType):
"""
Currently limited to "DateTime" required
:param indexDataType:
:return:
"""
self.__indexDataType = indexDataType
@property
def GroupRules(self):
"""
Array of DataviewGroupRule not required
:return:
"""
return self.__groupRules
@GroupRules.setter
def GroupRules(self, groupRules):
"""
Array of DataviewGroupRule not required
:param groupRules:
:return:
"""
self.__groupRules = groupRules
def toJson(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = {"Id": self.Id}
dictionary["Queries"] = self.Queries.toDictionary()
# optional properties
if hasattr(self, "Name"):
dictionary["Name"] = self.Name
if hasattr(self, "Description"):
dictionary["Description"] = self.Description
if hasattr(self, "Mappings") and self.Mappings is not None:
dictionary["Mappings"] = self.Mappings.toDictionary()
if hasattr(self, "IndexConfig") and self.IndexConfig is not None:
dictionary["IndexConfig"] = self.IndexConfig.toDictionary()
if hasattr(self, "IndexDataType"):
dictionary["IndexDataType"] = self.IndexDataType
if hasattr(self, "GroupRules"):
dictionary["GroupRules"] = []
for value in self.GroupRules:
dictionary["GroupRules"].append(value.toDictionary())
return dictionary
@staticmethod
def fromJson(jsonObj):
return Dataview.fromDictionary(jsonObj)
@staticmethod
def fromDictionary(content):
dataview = Dataview()
if not content:
return dataview
if "Id" in content:
dataview.Id = content["Id"]
if "Name" in content:
dataview.Name = content["Name"]
if "Description" in content:
dataview.Description = content["Description"]
if "Queries" in content:
dataview.Queries = DataviewQuery.fromDictionary(content["Queries"])
if "Mappings" in content:
dataview.Mappings = DataviewMapping.fromDictionary(content["Mappings"])
if "IndexConfig" in content:
dataview.IndexConfig = DataviewIndexConfig.fromDictionary(
content["IndexConfig"]
)
if "IndexDataType" in content:
dataview.IndexDataType = content["IndexDataType"]
if "GroupRules" in content:
groupRules = content["GroupRules"]
if groupRules is not None and len(groupRules) > 0:
dataview.GroupRules = []
for value in groupRules:
dataview.GroupRules.append(DataviewGroupRule.fromDictionary(value))
return dataview | 2.453125 | 2 |
PYTHON_Code/TestGUI.py | ROBO-BEV/BARISTO | 8 | 12741 | <gh_stars>1-10
from tkinter import *
window0 = Tk()
window0.geometry('960x540')
#tk.iconbitmap(default='ROBO_BEV_LOGO.ico')
window0.title("BARISTO")
photo = PhotoImage(file="Page1.png")
widget = Label(window0, image=photo)
widget.photo = photo
widget = Label(window0, text="10", fg="white", font=("Source Sans Pro",50))
#widget = Label(window0, text="9", fg="white")
widget.pack()
window0.mainloop()
| 2.984375 | 3 |
handlers/play.py | AftahBagas/AlphaMusik | 0 | 12742 | from os import path
from telethon import Client
from telethon.types import Message, Voice
from callsmusic import callsmusic, queues
import converter
from downloaders import youtube
from config import BOT_NAME as bn, DURATION_LIMIT
from helpers.filters import command, other_filters
from helpers.decorators import errors
from helpers.errors import DurationLimitError
from helpers.gets import get_url, get_file_name
from telethon.types import InlineKeyboardButton, InlineKeyboardMarkup
@Client.on_message(command("lplay") & other_filters)
@errors
async def play(_, message: Message):
lel = await message.reply("🔄 **Processing** sounds...")
sender_id = message.from_user.id
sender_name = message.from_user.first_name
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="🔊 Group Support",
url="https://t.me/VcgMusicGroup")
]
]
)
audio = (message.reply_to_message.audio or message.reply_to_message.voice) if message.reply_to_message else None
url = get_url(message)
if audio:
if round(audio.duration / 60) > DURATION_LIMIT:
raise DurationLimitError(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed to play!"
)
file_name = get_file_name(audio)
file_path = await converter.convert(
(await message.reply_to_message.download(file_name))
if not path.isfile(path.join("downloads", file_name)) else file_name
)
elif url:
file_path = await converter.convert(youtube.download(url))
else:
return await lel.edit_text("❗ You did not give me anything to play!")
if message.chat.id in callsmusic.pytgcalls.active_calls:
position = await queues.put(message.chat.id, file=file_path)
await lel.edit(f"#⃣ **Queued** at position {position}!")
else:
callsmusic.pytgcalls.join_group_call(message.chat.id, file_path)
await message.reply_photo(
photo="https://telegra.ph/file/a4fa687ed647cfef52402.jpg",
reply_markup=keyboard,
caption="▶️ **Playing** here the song requested by {}!".format(
message.from_user.mention()
),
)
return await lel.delete()
| 2.328125 | 2 |
Sindri/Properties.py | mrcsbrn/TCC_software | 11 | 12743 | <filename>Sindri/Properties.py
from __future__ import annotations
from constants import DBL_EPSILON
class DeltaProp(object):
def __init__(self, cp: float, h: float, s: float, g: float, u: float, a: float):
self.Cp = cp
self.H = h
self.S = s
self.G = g
self.U = u
self.A = a
def subtract(self, dp2: DeltaProp) -> DeltaProp:
cp = self.Cp - dp2.Cp
h = self.H - dp2.H
s = self.S - dp2.S
g = self.G - dp2.G
u = self.U - dp2.U
a = self.A - dp2.A
return DeltaProp(cp, h, s, g, u, a)
def isEqual(self, dp2: DeltaProp, tol=1e-5) -> bool:
if (
self._relAbsErr(self.Cp, dp2.Cp) < tol
and self._relAbsErr(self.H, dp2.H) < tol
and self._relAbsErr(self.S, dp2.S) < tol
and self._relAbsErr(self.G, dp2.G) < tol
and self._relAbsErr(self.U, dp2.U) < tol
and self._relAbsErr(self.A, dp2.A) < tol
):
return True
return False
def _relAbsErr(self, x: float, y: float) -> float:
if abs(x) < DBL_EPSILON:
return abs(x - y)
return abs((x - y) / x)
class VaporPressure(object):
"""
Class containing information about the vapor pressure of a single substance system.
"""
def __init__(self):
self.EOS = 0
self.AW = 0
self.LK = 0
self.Antoine = 0
self.AntonieLog = 0
def setEOS(self, v: float):
self.EOS = v
def setAW(self, v: float):
self.AW = v
def setLK(self, v: float):
self.LK = v
def setAntoine(self, v: float, log=""):
self.Antoine = v
self.AntonieLog = log
def getAWerr(self) -> float:
return self._relError(self.EOS, self.AW)
def getLKerr(self) -> float:
return self._relError(self.EOS, self.LK)
def getAntoineerr(self) -> float:
return self._relError(self.EOS, self.Antoine)
def _relError(self, _x: float, _y: float) -> float:
if abs(_x) < DBL_EPSILON:
return _x - _y
return (_x - _y) / _x
class Props(object):
def __init__(self):
self.P = 0
self.T = 0
self.Z = 0
self.V = 0
self.rho = 0
self.Pvp = 0
self.Fugacity = 0
self.Props = 0
self.IGProps = 0
self.log = ""
def setRho(self, v: float):
self.rho = v
def setPvp(self, v: VaporPressure):
self.Pvp = v
def setProps(self, v: DeltaProp):
self.Props = v
def setIGProps(self, v: DeltaProp):
self.IGProps = v
def setIGProps(self, v: float):
self.Fugacity = v
| 2.875 | 3 |
algorithms/randcommuns.py | eXascaleInfolab/clubmark | 14 | 12744 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Brief: Produces rand disjoint communities (clusters) for the given network with sizes similar in the ground truth.
:Description:
Takes number of the resulting communities and their sizes from the specified groundtruth (actually any sample
of the community structure, the real ground truth is not required) and fills stubs of the clusters with
randomly selected nodes from the input network with all their neighbors.
Note: Produced result is a random disjoint partitioning, so if the 'ground truth' had overlapping clusters, then
the number of nodes in the last cluster will be less than in the sample.
:Authors: <NAME> <<EMAIL>>
:Organizations: eXascale lab <http://exascale.info/>, ScienceWise <http://sciencewise.info/>,
Lumais <http://www.lumais.com/>
:Date: 2015-07
"""
from __future__ import print_function, division # Required for stderr output, must be the first import
import sys
import os # Pathes processing
#import igraph as ig
import random as rand
try:
# ATTENTION: Python3 newer treats imports as realtive and results in error here unlike Python2
from utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401
except ImportError:
# Note: this case should be the second because explicit relative imports cause various errors
# under Python2 and Python3, which complicates thier handling
from .utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401
# Default number of the resulting clusterings (partitions, i.e files that contain disjoint clusters)
_RESNUM = 1
class Params(object):
"""Input parameters (arguments)"""
def __init__(self):
"""Parameters:
groundtruth - flile name of the ground truth clustering
network - flile name of the input network
dirnet - whether the input network is directed
outnum - number of the resulting clusterings
randseed - seed for the clustering generation (automatically generated if not specified)
outpseed - whether to output the seed (automatically set to True on if the seed is generated automatically)
outdir - output directory
outname - base name of the output file based on the network name
outext - extenstion of the output files based on the groundtruth extension
"""
self.groundtruth = None
self.network = None
self.dirnet = False
self.outnum = _RESNUM
self.randseed = None
self.outpseed = False
self.outdir = None
self.outname = None
self.outext = ''
def parseParams(args):
"""Parse user-specified parameters
returns - parsed input arguments, Params()
"""
assert isinstance(args, (tuple, list)) and args, 'Input arguments must be specified'
prm = Params()
for arg in args:
# Validate input format
preflen = 3
if arg[0] != '-' or len(arg) <= preflen:
raise ValueError('Unexpected argument: ' + arg)
if arg[1] == 'g':
prm.groundtruth = arg[preflen:]
prm.outext = os.path.splitext(prm.groundtruth)[1]
elif arg[1] == 'i':
pos = arg.find('=', 2)
if pos == -1 or arg[2] not in 'ud=' or len(arg) == pos + 1:
raise ValueError('Unexpected argument: ' + arg)
pos += 1
prm.network = arg[pos:]
prm.outname, netext = os.path.splitext(os.path.split(prm.network)[1])
prm.dirnet = asymnet(netext.lower(), arg[2] == 'd')
if not prm.outname:
raise ValueError('Invalid network name (is a directory): ' + prm.network)
elif arg[1] == 'n':
prm.outnum = int(arg[preflen:])
assert prm.outnum >= 1, 'outnum must be a natural number'
elif arg[1] == 'r':
prm.randseed = arg[preflen:]
elif arg[1] == 'o':
prm.outdir = arg[preflen:]
else:
raise ValueError('Unexpected argument: ' + arg)
if not (prm.groundtruth and prm.network):
raise ValueError('Input network and groundtruth file names must be specified')
if not prm.outdir:
prm.outdir = os.path.split(prm.network)[0]
if not prm.outdir:
prm.outdir = '.'
if not prm.randseed:
try:
prm.randseed = ''.join(str(ord(c)) for c in os.urandom(8))
except NotImplementedError:
prm.randseed = str(rand.random())
prm.outpseed = True
return prm
def randcommuns(*args):
"""Generate random clusterings for the specified network"""
prm = parseParams(args)
print('Starting randcommuns clustering:'
'\n\tgroundtruth: {}'
'\n\t{} network: {}'
'\n\t{} cls of {} in {} with randseed: {}'
.format(prm.groundtruth, 'directed' if prm.dirnet else 'undirected', prm.network
, prm.outnum, prm.outname + prm.outext, prm.outdir, prm.randseed))
# Load Data from simple real-world networks
graph = loadNsl(prm.network, prm.dirnet) # ig.Graph.Read_Ncol(network, directed=dirnet) # , weights=False
# Load statistics from the ground thruth
groundstat = []
with open(prm.groundtruth, 'r') as fground:
for line in fground:
# Skip empty lines and comments (possible header)
if not line or line[0] == '#':
continue
groundstat.append(len(line.split()))
# Create outpdir if required
if prm.outdir and not os.path.exists(prm.outdir):
os.makedirs(prm.outdir)
# Geneate rand clsuterings
rand.seed(prm.randseed)
while prm.outnum > 0:
prm.outnum -= 1
# Active (remained) nodes indices of the input network
actnodes = set(graph.vs.indices) #pylint: disable=E1101
clusters = [] # Forming clusters
# Reference size of the ground truth clusters (they migh have overlaps unlike the current partitioning)
for clmarg in groundstat:
nodes = [] # Content of the current cluster
# Check whether all nodes of the initial network are mapped
if not actnodes:
break
# Select subsequent rand node
ind = rand.sample(actnodes, 1)[0]
actnodes.remove(ind)
nodes.append(ind)
inode = 0 # Index of the node in the current cluster
# Select neighbors of the selected nodes to fill the clusters
while len(nodes) < clmarg and actnodes:
for nd in graph.vs[nodes[inode]].neighbors(): #pylint: disable=E1136
if nd.index not in actnodes:
continue
actnodes.remove(nd.index)
nodes.append(nd.index)
if len(nodes) >= clmarg or not actnodes:
break
inode += 1
if inode >= len(nodes) and len(nodes) < clmarg and actnodes:
ind = rand.sample(actnodes, 1)[0]
actnodes.remove(ind)
nodes.append(ind)
# Use original labels of the nodes
clusters.append(graph.vs[ind]['name'] for ind in nodes) #pylint: disable=E1136
# Output resulting clusters
with open('/'.join((prm.outdir, ''.join((prm.outname, '_', str(prm.outnum), prm.outext)))), 'w') as fout:
for cl in clusters:
# Note: print() unlike fout.write() appends the newline
print(' '.join(cl), file=fout)
# Output randseed used for the generated clusterings
# Output to the dir above if possible to not mix cluster levels with rand seed
if prm.outpseed:
with open('/'.join((prm.outdir, (os.path.splitext(prm.outname)[0] + '.seed'))), 'w') as fout:
# Note: print() unlike fout.write() appends the newline
print(prm.randseed, file=fout)
print('Random clusterings are successfully generated')
if __name__ == '__main__':
if len(sys.argv) > 2:
randcommuns(*sys.argv[1:])
else:
print('\n'.join(('Produces random disjoint partitioning (clusters are formed with rand nodes and their neighbors)'
' for the input network specified in the NSL format (generalizaiton of NCOL, SNAP, etc.)\n',
'Usage: {app} -g=<ground_truth> -i[{{u, d}}]=<input_network> [-n=<res_num>] [-r=<rand_seed>] [-o=<outp_dir>]',
'',
' -g=<ground_truth> - ground truth clustering as a template for sizes of the resulting communities',
' -i[X]=<input_network> - file of the input network in the format: <src_id> <dst_id> [<weight>]',
' Xu - undirected input network (<src_id> <dst_id> implies also <dst_id> <src_id>). Default',
' Xd - directed input network (both <src_id> <dst_id> and <dst_id> <src_id> are specified)',
' NOTE: (un)directed flag is considered only for the networks with non-NSL file extension',
' -n=<res_num> - number of the resulting clusterings to generate. Default: {resnum}',
' -r=<rand_seed> - random seed, string. Default: value from the system rand source (otherwise current time)',
' -o=<output_communities> - . Default: ./<input_network>/'
)).format(app=sys.argv[0], resnum=_RESNUM))
| 2.53125 | 3 |
utils/save_atten.py | xiaomengyc/SPG | 152 | 12745 | <reponame>xiaomengyc/SPG
import numpy as np
import cv2
import os
import torch
import os
import time
from torchvision import models, transforms
from torch.utils.data import DataLoader
from torch.optim import SGD
from torch.autograd import Variable
idx2catename = {'voc20': ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse',
'motorbike','person','pottedplant','sheep','sofa','train','tvmonitor'],
'coco80': ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet',
'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']}
class SAVE_ATTEN(object):
def __init__(self, save_dir='save_bins', dataset=None):
# type: (object, object) -> object
self.save_dir = save_dir
if dataset is not None:
self.idx2cate = self._get_idx2cate_dict(datasetname=dataset)
else:
self.idx2cate = None
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def save_top_5_pred_labels(self, preds, org_paths, global_step):
img_num = np.shape(preds)[0]
for idx in xrange(img_num):
img_name = org_paths[idx].strip().split('/')[-1]
if '.JPEG' in img_name:
img_id = img_name[:-5]
elif '.png' in img_name or '.jpg' in img_name:
img_id = img_name[:-4]
out = img_id + ' ' + ' '.join(map(str, preds[idx,:])) + '\n'
out_file = os.path.join(self.save_dir, 'pred_labels.txt')
if global_step == 0 and idx==0 and os.path.exists(out_file):
os.remove(out_file)
with open(out_file, 'a') as f:
f.write(out)
def save_masked_img_batch(self, path_batch, atten_batch, label_batch):
#img_num = np.shape(atten_batch)[0]
img_num = atten_batch.size()[0]
# fid = open('imagenet_val_shape.txt', 'a')
# print(np.shape(img_batch), np.shape(label_batch), np.shape(org_size_batch), np.shape(atten_batch))
for idx in xrange(img_num):
atten = atten_batch[idx]
atten = atten.cpu().data.numpy()
label = label_batch[idx]
label = int(label)
self._save_masked_img(path_batch[idx], atten,label)
def _get_idx2cate_dict(self, datasetname=None):
if datasetname not in idx2catename.keys():
print 'The given %s dataset category names are not available. The supported are: %s'\
%(str(datasetname),','.join(idx2catename.keys()))
return None
else:
return {idx:cate_name for idx, cate_name in enumerate(idx2catename[datasetname])}
def _save_masked_img(self, img_path, atten, label):
'''
save masked images with only one ground truth label
:param path:
:param img:
:param atten:
:param org_size:
:param label:
:param scores:
:param step:
:param args:
:return:
'''
if not os.path.isfile(img_path):
raise 'Image not exist:%s'%(img_path)
img = cv2.imread(img_path)
org_size = np.shape(img)
w = org_size[0]
h = org_size[1]
attention_map = atten[label,:,:]
atten_norm = attention_map
print(np.shape(attention_map), 'Max:', np.max(attention_map), 'Min:',np.min(attention_map))
# min_val = np.min(attention_map)
# max_val = np.max(attention_map)
# atten_norm = (attention_map - min_val)/(max_val - min_val)
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
img_id = img_path.strip().split('/')[-1]
img_id = img_id.strip().split('.')[0]
save_dir = os.path.join(self.save_dir, img_id+'.png')
cv2.imwrite(save_dir, img)
def get_img_id(self, path):
img_id = path.strip().split('/')[-1]
return img_id.strip().split('.')[0]
def save_top_5_atten_maps(self, atten_fuse_batch, top_indices_batch, org_paths, topk=5):
'''
Save top-5 localization maps for generating bboxes
:param atten_fuse_batch: normalized last layer feature maps of size (batch_size, C, W, H), type: numpy array
:param top_indices_batch: ranked predicted labels of size (batch_size, C), type: numpy array
:param org_paths:
:param args:
:return:
'''
img_num = np.shape(atten_fuse_batch)[0]
for idx in xrange(img_num):
img_id = org_paths[idx].strip().split('/')[-1][:-4]
for k in range(topk):
atten_pos = top_indices_batch[idx, k]
atten_map = atten_fuse_batch[idx, atten_pos,:,:]
heat_map = cv2.resize(atten_map, dsize=(224, 224))
# heat_map = cv2.resize(atten_map, dsize=(img_shape[1], img_shape[0]))
heat_map = heat_map* 255
save_path = os.path.join(self.save_dir, 'heat_maps', 'top%d'%(k+1))
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = os.path.join(save_path,img_id+'.png')
cv2.imwrite(save_path, heat_map)
# def save_heatmap_segmentation(self, img_path, atten, gt_label, save_dir=None, size=(224,224), maskedimg=False):
# assert np.ndim(atten) == 4
#
# labels_idx = np.where(gt_label[0]==1)[0] if np.ndim(gt_label)==2 else np.where(gt_label==1)[0]
#
# if save_dir is None:
# save_dir = self.save_dir
# if not os.path.exists(save_dir):
# os.mkdir(save_dir)
#
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
# for i in range(batch_size):
# img, size = self.read_img(img_path[i], size=size)
# atten_img = atten[i] #get attention maps for the i-th img of the batch
# img_name = self.get_img_id(img_path[i])
# img_dir = os.path.join(save_dir, img_name)
# if not os.path.exists(img_dir):
# os.mkdir(img_dir)
# for k in labels_idx:
# atten_map_k = atten_img[k,:,:]
# atten_map_k = cv2.resize(atten_map_k, dsize=size)
# if maskedimg:
# img_to_save = self._add_msk2img(img, atten_map_k)
# else:
# img_to_save = self.normalize_map(atten_map_k)*255.0
#
# save_path = os.path.join(img_dir, '%d.png'%(k))
# cv2.imwrite(save_path, img_to_save)
def normalize_map(self, atten_map):
min_val = np.min(atten_map)
max_val = np.max(atten_map)
atten_norm = (atten_map - min_val)/(max_val - min_val)
return atten_norm
def _add_msk2img(self, img, msk, isnorm=True):
if np.ndim(img) == 3:
assert np.shape(img)[0:2] == np.shape(msk)
else:
assert np.shape(img) == np.shape(msk)
if isnorm:
min_val = np.min(msk)
max_val = np.max(msk)
atten_norm = (msk - min_val)/(max_val - min_val)
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
w_img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
return w_img
def _draw_text(self, pic, txt, pos='topleft'):
font = cv2.FONT_HERSHEY_SIMPLEX #multiple line
txt = txt.strip().split('\n')
stat_y = 30
for t in txt:
pic = cv2.putText(pic,t,(10,stat_y), font, 0.8,(255,255,255),2,cv2.LINE_AA)
stat_y += 30
return pic
def _mark_score_on_picture(self, pic, score_vec, label_idx):
score = score_vec[label_idx]
txt = '%.3f'%(score)
pic = self._draw_text(pic, txt, pos='topleft')
return pic
def get_heatmap_idxes(self, gt_label):
labels_idx = []
if np.ndim(gt_label) == 1:
labels_idx = np.expand_dims(gt_label, axis=1).astype(np.int)
elif np.ndim(gt_label) == 2:
for row in gt_label:
idxes = np.where(row[0]==1)[0] if np.ndim(row)==2 else np.where(row==1)[0]
labels_idx.append(idxes.tolist())
else:
labels_idx = None
return labels_idx
def get_map_k(self, atten, k, size=(224,224)):
atten_map_k = atten[k,:,:]
# print np.max(atten_map_k), np.min(atten_map_k)
atten_map_k = cv2.resize(atten_map_k, dsize=size)
return atten_map_k
def read_img(self, img_path, size=(224,224)):
img = cv2.imread(img_path)
if img is None:
print "Image does not exist. %s" %(img_path)
exit(0)
if size == (0,0):
size = np.shape(img)[:2]
else:
img = cv2.resize(img, size)
return img, size[::-1]
def get_masked_img(self, img_path, atten, gt_label,
size=(224,224), maps_in_dir=False, save_dir=None, only_map=False):
assert np.ndim(atten) == 4
save_dir = save_dir if save_dir is not None else self.save_dir
if isinstance(img_path, list) or isinstance(img_path, tuple):
batch_size = len(img_path)
label_indexes = self.get_heatmap_idxes(gt_label)
for i in range(batch_size):
img, size = self.read_img(img_path[i], size)
img_name = img_path[i].split('/')[-1]
img_name = img_name.strip().split('.')[0]
if maps_in_dir:
img_save_dir = os.path.join(save_dir, img_name)
os.mkdir(img_save_dir)
for k in label_indexes[i]:
atten_map_k = self.get_map_k(atten[i], k , size)
msked_img = self._add_msk2img(img, atten_map_k)
suffix = str(k+1)
if only_map:
save_img = (self.normalize_map(atten_map_k)*255).astype(np.int)
else:
save_img = msked_img
if maps_in_dir:
cv2.imwrite(os.path.join(img_save_dir, suffix + '.png'), save_img)
else:
cv2.imwrite(os.path.join(save_dir, img_name + '_' + suffix + '.png'), save_img)
# if score_vec is not None and labels_idx is not None:
# msked_img = self._mark_score_on_picture(msked_img, score_vec, labels_idx[k])
# if labels_idx is not None:
# suffix = self.idx2cate.get(labels_idx[k], k)
# def get_masked_img_ml(self, img_path, atten, save_dir=None, size=(224,224),
# gt_label=None, score_vec=None):
# assert np.ndim(atten) == 4
#
# if gt_label is not None and self.idx2cate is not None:
# labels_idx = np.where(gt_label[0]==1)[0] if np.ndim(gt_label)==2 else np.where(gt_label==1)[0]
# else:
# labels_idx = None
#
#
# if save_dir is not None:
# self.save_dir = save_dir
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
# for i in range(batch_size):
# img = cv2.imread(img_path[i])
# if img is None:
# print "Image does not exist. %s" %(img_path[i])
# exit(0)
#
# else:
# atten_img = atten[i] #get attention maps for the i-th img
# img_name = img_path[i].split('/')[-1]
# for k in range(np.shape(atten_img)[0]):
# if size == (0,0):
# w, h, _ = np.shape(img)
# # h, w, _ = np.shape(img)
# else:
# h, w = size
# img = cv2.resize(img, dsize=(h, w))
# atten_map_k = atten_img[k,:,:]
# # print np.max(atten_map_k), np.min(atten_map_k)
# atten_map_k = cv2.resize(atten_map_k, dsize=(h,w))
# msked_img = self._add_msk2img(img, atten_map_k)
# if score_vec is not None and labels_idx is not None:
# msked_img = self._mark_score_on_picture(msked_img, score_vec, labels_idx[k])
# if labels_idx is not None:
# suffix = self.idx2cate.get(labels_idx[k], k)
# else:
# suffix = str(k)
# if '.' in img_name:
# img_name = img_name.strip().split('.')[0]
# cv2.imwrite(os.path.join(self.save_dir, img_name + '_' + suffix + '.png'), msked_img)
#
#
# def get_masked_img(self, img_path, atten, save_dir=None, size=(224,224), combine=True):
# '''
#
# :param img_path:
# :param atten:
# :param size: if it is (0,0) use original image size, otherwise use the specified size.
# :param combine:
# :return:
# '''
#
# if save_dir is not None:
# self.save_dir = save_dir
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
#
# for i in range(batch_size):
# atten_norm = atten[i]
# min_val = np.min(atten_norm)
# max_val = np.max(atten_norm)
# atten_norm = (atten_norm - min_val)/(max_val - min_val)
# # print np.max(atten_norm), np.min(atten_norm)
# img = cv2.imread(img_path[i])
# if img is None:
# print "Image does not exist. %s" %(img_path[i])
# exit(0)
#
# if size == (0,0):
# w, h, _ = np.shape(img)
# # h, w, _ = np.shape(img)
# else:
# h, w = size
# img = cv2.resize(img, dsize=(h, w))
#
# atten_norm = cv2.resize(atten_norm, dsize=(h,w))
# # atten_norm = cv2.resize(atten_norm, dsize=(w,h))
# atten_norm = atten_norm* 255
# heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
# img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
#
#
# # font = cv2.FONT_HERSHEY_SIMPLEX
# # cv2.putText(img,'OpenCV \n hello',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
#
# img_name = img_path[i].split('/')[-1]
# print os.path.join(self.save_dir, img_name)
# cv2.imwrite(os.path.join(self.save_dir, img_name), img)
def get_atten_map(self, img_path, atten, save_dir=None, size=(321,321)):
'''
:param img_path:
:param atten:
:param size: if it is (0,0) use original image size, otherwise use the specified size.
:param combine:
:return:
'''
if save_dir is not None:
self.save_dir = save_dir
if isinstance(img_path, list) or isinstance(img_path, tuple):
batch_size = len(img_path)
for i in range(batch_size):
atten_norm = atten[i]
min_val = np.min(atten_norm)
max_val = np.max(atten_norm)
atten_norm = (atten_norm - min_val)/(max_val - min_val)
# print np.max(atten_norm), np.min(atten_norm)
h, w = size
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
# atten_norm = cv2.resize(atten_norm, dsize=(w,h))
atten_norm = atten_norm* 255
img_name = img_path[i].split('/')[-1]
img_name = img_name.replace('jpg', 'png')
cv2.imwrite(os.path.join(self.save_dir, img_name), atten_norm)
class DRAW(object):
def __init__(self):
pass
def draw_text(self, img, text):
if isinstance(text, dict):
pass
| 2.109375 | 2 |
integration/config/service_names.py | hawflau/serverless-application-model | 0 | 12746 | <gh_stars>0
COGNITO = "Cognito"
SERVERLESS_REPO = "ServerlessRepo"
MODE = "Mode"
XRAY = "XRay"
LAYERS = "Layers"
HTTP_API = "HttpApi"
IOT = "IoT"
CODE_DEPLOY = "CodeDeploy"
ARM = "ARM"
GATEWAY_RESPONSES = "GatewayResponses"
MSK = "MSK"
KMS = "KMS"
CWE_CWS_DLQ = "CweCwsDlq"
CODE_SIGN = "CodeSign"
MQ = "MQ"
USAGE_PLANS = "UsagePlans"
SCHEDULE_EVENT = "ScheduleEvent"
DYNAMO_DB = "DynamoDB"
KINESIS = "Kinesis"
SNS = "SNS"
SQS = "SQS"
CUSTOM_DOMAIN = "CustomDomain"
| 1.320313 | 1 |
tools/onnx_utilis/export_vfe_weight.py | neolixcn/OpenPCDet | 0 | 12747 | import onnx
import onnxruntime
import torch
import onnx.numpy_helper
# added by huxi, load rpn config
from pcdet.pointpillar_quantize_config import load_rpn_config_json
# ========================================
config_dict = load_rpn_config_json.get_config()
onnx_model_file = config_dict["vfe_onnx_file"]
onnx_model = onnx.load(onnx_model_file)
onnx.checker.check_model(onnx_model)
#check model
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
#[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "linear.weight"]
[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "14"]
[tensor_bn_gamma] = [t for t in onnx_model.graph.initializer if t.name == "norm.weight"]
[tensor_bn_beta] = [t for t in onnx_model.graph.initializer if t.name == "norm.bias"]
[tensor_bn_mean] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_mean"]
[tensor_bn_var] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_var"]
mat_w = onnx.numpy_helper.to_array(tensor_mat_weight)
mat_w = mat_w.transpose()
mat_w_list = list(mat_w.flatten())
bn_gamma_w = onnx.numpy_helper.to_array(tensor_bn_gamma)
bn_gamma_w_list = list(bn_gamma_w.flatten())
bn_beta_w = onnx.numpy_helper.to_array(tensor_bn_beta)
bn_beta_w_list = list(bn_beta_w.flatten())
bn_mean_w = onnx.numpy_helper.to_array(tensor_bn_mean)
bn_mean_w_list = list(bn_mean_w.flatten())
bn_var_w = onnx.numpy_helper.to_array(tensor_bn_var)
bn_var_w_list = list(bn_var_w.flatten())
result_line = ""
exported_vfe_weight_file = config_dict["vfe_exported_weight_file"]
with open(exported_vfe_weight_file, 'w') as f:
for idx,val in enumerate(mat_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_gamma_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_beta_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_mean_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_var_w_list):
result_line += str(val)
result_line += " "
f.write(result_line)
| 1.914063 | 2 |
color_extractor/cluster.py | hcoura/color-extractor | 276 | 12748 | <filename>color_extractor/cluster.py
from sklearn.cluster import KMeans
from .exceptions import KMeansException
from .task import Task
class Cluster(Task):
"""
Use the K-Means algorithm to group pixels by clusters. The algorithm tries
to determine the optimal number of clusters for the given pixels.
"""
def __init__(self, settings=None):
if settings is None:
settings = {}
super(Cluster, self).__init__(settings)
self._kmeans_args = {
'max_iter': 50,
'tol': 1.0,
}
def get(self, img):
a = self._settings['algorithm']
if a == 'kmeans':
return self._jump(img)
else:
raise ValueError('Unknown algorithm {}'.format(a))
def _kmeans(self, img, k):
kmeans = KMeans(n_clusters=k, **self._kmeans_args)
try:
kmeans.fit(img)
except:
raise KMeansException()
return kmeans.inertia_, kmeans.labels_, kmeans.cluster_centers_
def _jump(self, img):
npixels = img.size
best = None
prev_distorsion = 0
largest_diff = float('-inf')
for k in range(self._settings['min_k'], self._settings['max_k']):
compact, labels, centers = self._kmeans(img, k)
distorsion = Cluster._square_distorsion(npixels, compact, 1.5)
diff = prev_distorsion - distorsion
prev_distorsion = distorsion
if diff > largest_diff:
largest_diff = diff
best = k, labels, centers
return best
@staticmethod
def _default_settings():
return {
'min_k': 2,
'max_k': 7,
'algorithm': 'kmeans',
}
@staticmethod
def _square_distorsion(npixels, compact, y):
return pow(compact / npixels, -y)
| 2.96875 | 3 |
notebooks/datasets.py | jweill-aws/jupyterlab-data-explorer | 173 | 12749 | <reponame>jweill-aws/jupyterlab-data-explorer
#
# @license BSD-3-Clause
#
# Copyright (c) 2019 Project Jupyter Contributors.
# Distributed under the terms of the 3-Clause BSD License.
import IPython.display
import pandas
def output_url(url):
IPython.display.publish_display_data(
{"application/x.jupyter.relative-dataset-urls+json": [url]}
)
| 1.617188 | 2 |
django_india/conf.py | k-mullapudi/django-india | 0 | 12750 | <filename>django_india/conf.py
import django.conf
url_bases = {
'geonames': {
'dump': 'http://download.geonames.org/export/dump/',
'zip': 'http://download.geonames.org/export/zip/',
},
}
india_country_code = 'IN'
files = {
'state': {
'filename': '',
'urls': [
url_bases['geonames']['dump'] + '{filename}',
],
'fields': [
]
},
'district': {
'filename': '',
'urls': [
url_bases['geonames']['dump'] + '{filename}',
],
'fields': [
]
},
'city': {
'filename': '',
'urls': [
url_bases['geonames']['dump'] + '{filename}',
],
'fields': [
]
}
}
LANGUAGE_DATA = {
}
class AppSettings(object):
"""
A holder for app-specific default settings that allows overriding via
the project's settings.
"""
def __getattribute__(self, attr):
if attr == attr.upper():
try:
return getattr(django.conf.settings, attr)
except AttributeError:
pass
return super(AppSettings, self).__getattribute__(attr)
| 2.015625 | 2 |
src/dao/evaluation_dao.py | Asconius/trading-bot | 2 | 12751 | <filename>src/dao/evaluation_dao.py<gh_stars>1-10
from decimal import Decimal
from typing import List
from src.dao.dao import DAO
from src.dto.attempt_dto import AttemptDTO
from src.entity.evaluation_entity import EvaluationEntity
from src.utils.utils import Utils
class EvaluationDAO:
@staticmethod
def create(summation: Decimal, funds: str, attempt: AttemptDTO) -> None:
evaluation: EvaluationEntity = EvaluationEntity()
evaluation.timestamp = Utils.now()
evaluation.sum = str(summation)
evaluation.funds = funds
Utils.set_attributes(evaluation, amount_buy=str(attempt.amount_buy), distance_buy=str(attempt.distance_buy),
delta_buy=str(attempt.delta_buy), amount_sell=str(attempt.amount_sell),
distance_sell=str(attempt.distance_sell), delta_sell=str(attempt.delta_sell))
DAO.persist(evaluation)
@staticmethod
def read_order_by_sum() -> EvaluationEntity:
return EvaluationEntity.query.order_by(EvaluationEntity.sum.desc()).first()
@staticmethod
def read_attempt(attempt: AttemptDTO) -> EvaluationEntity:
return EvaluationEntity.query.filter_by(
amount_buy=attempt.amount_buy).filter_by(
distance_buy=attempt.distance_buy).filter_by(
delta_buy=attempt.delta_buy).filter_by(
amount_sell=attempt.amount_sell).filter_by(
distance_sell=attempt.distance_sell).filter_by(
delta_sell=attempt.delta_sell).first()
@staticmethod
def read_all() -> List[EvaluationEntity]:
return EvaluationEntity.query.all()
| 2.421875 | 2 |
src/lib/others/info_gathering/finder/finding_comment.py | nahuelhm17/vault_scanner | 230 | 12752 | #! /usr/bin/python
import requests
import re
from bs4 import BeautifulSoup
import colors
class FindingComments(object):
def __init__(self, url):
self.url = url
self.comment_list = ['<!--(.*)-->']
self.found_comments = {}
def get_soure_code(self):
resp_text = requests.get(self.url).text
return resp_text
def find_comment(self):
source_code = self.get_soure_code()
for comment in self.comment_list:
comments = re.findall(comment, source_code)
self.found_comments[comment] = comments
def parse_comments(self):
self.find_comment()
comment_dict = {}
if len(self.found_comments) > 0:
for comment_code, comment in self.found_comments.items():
colors.success('Found for {} : {}'
.format(comment_code, comment))
comment_dict[comment_code] = comment
else:
colors.error('No comment found')
return comment_dict
| 3.09375 | 3 |
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py | WenJinfeng/PyCG | 121 | 12753 | d = {"1": "a"}
d[1]
d["1"]
| 2.5 | 2 |
scripts/link_assignment.py | metagenomics/antibio | 4 | 12754 | #!/usr/bin/python
# This program revises the existing overview file.
# If a keyword is found in an Abstract of an accession of a gene, the url of the abstract is added to the overview file
# The revised overview.txt is created in the same directory of the old one and named overview_new.txt
"""
Usage: link_assignment.py -o <overview> -pub <pubhits>
-h --help Please enter the files overview.txt and the pubhits.
"""
from docopt import docopt
from sys import argv
import csv
import os
import util
def load_pubhits_in_dict(pubhits_path):
with open(pubhits_path, 'r') as pubhits_file:
pubhits_reader = csv.reader(pubhits_file, delimiter='\t', )
return dict((row[util.PUBHITS_GENE_ID_INDEX].strip(), row) for row in pubhits_reader)
def build_overview_link(pubhits_dict, gene_id, links):
"""
builds the pubhits link out of the gene id and the pubhits dict
:param pubhits_dict: pubhits dictionary
:param gene_id: gene id
:param links: existsing links
:return: links
"""
pubhits_acc = pubhits_dict[gene_id][util.PUBHITS_ACC_INDEX]
pubhits_link = pubhits_dict[gene_id][util.PUBHITS_LINK_INDEX]
if links.strip() == util.NO_LINK:
new_links = [pubhits_acc + ":" + pubhits_link]
else:
new_links = [links, pubhits_acc + ":" + pubhits_link]
overview_link = ','.join(new_links)
if not overview_link or overview_link == util.TODO:
overview_link = util.NO_KEYWORDS
return overview_link
def set_link_in_row(old_row, pubhits_dict):
"""
set link in existing overview row (dictionary)
:param old_row: overview row
:param pubhits_dict: pubhits dictionary
:return: revised overview row
"""
gene_id = old_row[util.GENE_ID]
if (gene_id in pubhits_dict):
old_row[util.LINKS] = build_overview_link(pubhits_dict, gene_id, old_row[util.LINKS])
return old_row
def main():
args = docopt(__doc__, argv[1:])
overview_path = args['<overview>']
pubhits = args['<pubhits>']
new_overview_path = os.path.splitext(overview_path)[0] + "_new.txt"
pubhits_dict = load_pubhits_in_dict(pubhits)
with open(overview_path, 'r') as overview, open(new_overview_path, 'w') as new_overview:
overview_reader = csv.DictReader(overview, delimiter='\t')
overview_writer = csv.DictWriter(new_overview, delimiter='\t', extrasaction='ignore',
fieldnames=overview.readline().rstrip('\n').split("\t"))
overview.seek(0)
overview_writer.writeheader()
for overview_row in overview_reader:
overview_row = set_link_in_row(overview_row, pubhits_dict)
overview_writer.writerow(overview_row)
if __name__ == '__main__':
main()
| 3.109375 | 3 |
app/views.py | LauretteMongina/Instagram-clone | 0 | 12755 | from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import *
import cloudinary
import cloudinary.uploader
import cloudinary.api
from django.http import HttpResponseRedirect, JsonResponse
from .forms import RegistrationForm, UpdateUserForm, UpdateUserProfileForm, ImageForm, CommentForm
from django.contrib.auth import login, authenticate
from .models import Image, Comment, Profile
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.views.generic import RedirectView
from .email import send_welcome_email
# Create your views here.
def registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
email = form.cleaned_data.get('email')
login(request, user)
# recipient = Profile(user = user,email =email)
# recipient.save()
# send_welcome_email(user,email)
return redirect('index')
else:
form = RegistrationForm()
return render(request, 'registration/registration.html', {'form': form})
@login_required(login_url='login')
def index(request):
images = Image.objects.all()
users = User.objects.exclude(id=request.user.id)
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
# image.user = request.user.profile
image.save()
return HttpResponseRedirect(request.path_info)
else:
form = ImageForm()
params = {
'images': images,
'form': form,
'users': users,
}
return render(request, 'insta/index.html', params)
@login_required(login_url='login')
def profile(request, username):
images = request.user.profile.images.all()
print(images)
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
profile_form = UpdateUserProfileForm(request.POST, request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return HttpResponseRedirect(request.path_info)
else:
user_form = UpdateUserForm(instance=request.user)
profile_form = UpdateUserProfileForm(instance=request.user)
params = {
'profile_form': profile_form,
'user_form': user_form,
'images': images,
}
return render(request, 'insta/profile.html', params)
@login_required(login_url='/accounts/login/')
def user_profile(request, username):
user_prof = get_object_or_404(User, username=username)
if request.user == user_prof:
return redirect('profile', username=request.user.username)
user_posts = user_prof.profile.images.all()
params = {
'user_prof': user_prof,
'user_posts': user_posts,}
return render(request, 'insta/user.html', params)
@login_required(login_url='/accounts/login/')
def like_image(request, id):
likes = Likes.objects.filter(image_id=id).first()
if Likes.objects.filter(image_id=id, user_id=request.user.id).exists():
likes.delete()
image = Image.objects.get(id=id)
if image.like_count == 0:
image.like_count = 0
image.save()
else:
image.like_count -= 1
image.save()
return redirect('/')
else:
likes = Likes(image_id=id, user_id=request.user.id)
likes.save()
# increase the number of likes by 1 for the image
image = Image.objects.get(id=id)
image.like_count = image.like_count + 1
image.save()
return redirect('/')
@login_required(login_url='login')
def search(request):
if 'search_user' in request.GET and request.GET['search_user']:
search_term = request.GET.get('search_user')
results = Profile.search(search_term)
print(results)
message = f'{search_term}'
title = message
return render(request, 'search.html', {'success': message})
else:
message = 'You havent searched for any term'
return render(request, 'insta/search.html', {'danger': message})
@login_required(login_url='login')
def comment(request, id):
image = get_object_or_404(Image, pk=id)
comments = image.comments.all()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comments = form.cleaned_data['comment']
comment.image = image
comment.user = request.user.profile
image.save()
comment.save()
return HttpResponseRedirect(request.path_info)
else:
form = CommentForm()
params = {
'image': image,
'form': form,
'comments':comments,
}
# image = Image.objects.get(id=id)
# image.comments_count = image.comments_count + 1
image.save()
return render(request, 'insta/single.html', params) | 2.09375 | 2 |
at_export_config.py | Fmstrat/FreeCAD-ArchTextures | 21 | 12756 | <reponame>Fmstrat/FreeCAD-ArchTextures<gh_stars>10-100
import FreeCAD, FreeCADGui
from arch_texture_utils.resource_utils import iconPath
import arch_texture_utils.qtutils as qtutils
from arch_texture_utils.selection_utils import findSelectedTextureConfig
class ExportTextureConfigCommand:
toolbarName = 'ArchTexture_Tools'
commandName = 'Export_Config'
def GetResources(self):
return {'MenuText': "Export Texture Config",
'ToolTip' : "Exports the configuration stored inside a TextureConfig object to a file",
'Pixmap': iconPath('ExportConfig.svg')
}
def Activated(self):
textureConfig = findSelectedTextureConfig()
if textureConfig is None:
qtutils.showInfo("No TextureConfig selected", "Select exactly one TextureConfig object to export its content")
return
selectedFile = qtutils.userSelectedFile('Export Location', qtutils.JSON_FILES, False)
if selectedFile is None:
return
fileObject = open(selectedFile, 'w')
textureConfig.export(fileObject)
def IsActive(self):
"""If there is no active document we can't do anything."""
return not FreeCAD.ActiveDocument is None
if __name__ == "__main__":
command = ExportTextureConfigCommand();
if command.IsActive():
command.Activated()
else:
qtutils.showInfo("No open Document", "There is no open document")
else:
import archtexture_toolbars
archtexture_toolbars.toolbarManager.registerCommand(ExportTextureConfigCommand()) | 1.992188 | 2 |
barcode/charsets/ean.py | Azd325/python-barcode | 0 | 12757 | <reponame>Azd325/python-barcode<gh_stars>0
EDGE = '101'
MIDDLE = '01010'
CODES = {
'A': (
'0001101', '0011001', '0010011', '0111101', '0100011', '0110001',
'0101111', '0111011', '0110111', '0001011'
),
'B': (
'0100111', '0110011', '0011011', '0100001', '0011101', '0111001',
'0000101', '0010001', '0001001', '0010111'
),
'C': (
'1110010', '1100110', '1101100', '1000010', '1011100', '1001110',
'1010000', '1000100', '1001000', '1110100'
),
}
LEFT_PATTERN = (
'AAAAAA', 'AABABB', 'AABBAB', 'AABBBA', 'ABAABB', 'ABBAAB', 'ABBBAA',
'ABABAB', 'ABABBA', 'ABBABA'
)
| 2.40625 | 2 |
Sushant_Boosting/code.py | sushant-bahekar/ga-learner-dsmp-repo | 0 | 12758 | # --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
df.head(5)
X = df.drop(['customerID','Churn'],1)
y = df['Churn']
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
#Replacing spaces with 'NaN' in train dataset
X_train['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Replacing spaces with 'NaN' in test dataset
X_test['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Converting the type of column from X_train to float
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
#Converting the type of column from X_test to float
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
#Filling missing values
X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True)
X_test['TotalCharges'].fillna(X_train['TotalCharges'].mean(), inplace=True)
#Check value counts
print(X_train.isnull().sum())
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
#Label encoding test data
for x in cat_cols:
le = LabelEncoder()
X_test[x] = le.fit_transform(X_test[x])
#Encoding train data target
y_train = y_train.replace({'No':0, 'Yes':1})
#Encoding test data target
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train, X_test, y_train, y_test)
ada_model = AdaBoostClassifier(random_state = 0)
ada_model.fit(X_train, y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test, y_pred)
ada_score
ada_cm = confusion_matrix(y_test, y_pred)
ada_cm
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test, y_pred)
xgb_cm = confusion_matrix(y_test, y_pred)
xgb_cr = classification_report(y_test, y_pred)
clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test, y_pred)
clf_cm = confusion_matrix(y_test, y_pred)
clf_cr = classification_report(y_test, y_pred)
print(xgb_score, clf_score)
print(xgb_cm, clf_cm)
print(xgb_cr, xgb_cr)
| 3.34375 | 3 |
slsgd.py | xcgoner/ecml2019-slsgd | 3 | 12759 | import argparse, time, logging, os, math, random
os.environ["MXNET_USE_OPERATOR_TUNING"] = "0"
import numpy as np
from scipy import stats
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler
from os import listdir
import os.path
import argparse
import pickle
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_size = mpi_comm.Get_size()
mpi_rank = mpi_comm.Get_rank()
# print('rank: %d' % (mpi_rank), flush=True)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="dir of the data", required=True)
parser.add_argument("--valdir", type=str, help="dir of the val data", required=True)
parser.add_argument("--batchsize", type=int, help="batchsize", default=8)
parser.add_argument("--epochs", type=int, help="epochs", default=100)
parser.add_argument("--interval", type=int, help="log interval", default=10)
parser.add_argument("--nsplit", type=int, help="number of split", default=40)
parser.add_argument("--lr", type=float, help="learning rate", default=0.001)
parser.add_argument("--alpha", type=float, help="moving average", default=1.0)
parser.add_argument("--alpha-decay", type=float, help="decay factor of alpha", default=0.5)
parser.add_argument("--alpha-decay-epoch", type=str, help="epoch of alpha decay", default='800')
parser.add_argument("--log", type=str, help="dir of the log file", default='train_cifar100.log')
parser.add_argument("--classes", type=int, help="number of classes", default=20)
parser.add_argument("--iterations", type=int, help="number of local epochs", default=50)
parser.add_argument("--aggregation", type=str, help="aggregation method", default='mean')
parser.add_argument("--nbyz", type=int, help="number of Byzantine workers", default=0)
parser.add_argument("--trim", type=int, help="number of trimmed workers on one side", default=0)
# parser.add_argument("--lr-decay", type=float, help="lr decay rate", default=0.1)
# parser.add_argument("--lr-decay-epoch", type=str, help="lr decay epoch", default='400')
parser.add_argument("--iid", type=int, help="IID setting", default=0)
parser.add_argument("--model", type=str, help="model", default='mobilenetv2_1.0')
parser.add_argument("--save", type=int, help="save", default=0)
parser.add_argument("--start-epoch", type=int, help="epoch start from", default=-1)
parser.add_argument("--seed", type=int, help="random seed", default=733)
args = parser.parse_args()
# print(args, flush=True)
filehandler = logging.FileHandler(args.log)
streamhandler = logging.StreamHandler()
if mpi_rank == 0:
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
mx.random.seed(args.seed + mpi_rank)
random.seed(args.seed + mpi_rank)
np.random.seed(args.seed + mpi_rank)
data_dir = os.path.join(args.dir, 'dataset_split_{}'.format(args.nsplit))
train_dir = os.path.join(data_dir, 'train')
# val_dir = os.path.join(data_dir, 'val')
val_train_dir = os.path.join(args.valdir, 'train')
val_val_dir = os.path.join(args.valdir, 'val')
training_files = []
for filename in sorted(listdir(train_dir)):
absolute_filename = os.path.join(train_dir, filename)
training_files.append(absolute_filename)
context = mx.cpu()
classes = args.classes
def get_train_batch(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_train_batch_byz(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(classes - 1 - L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(classes - 1 - L)
def get_val_train_batch(data_dir):
test_filename = os.path.join(data_dir, 'train_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_val_val_batch(data_dir):
test_filename = os.path.join(data_dir, 'val_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
train_data_list = []
for training_file in training_files:
[train_X, train_Y] = get_train_batch(training_file)
train_dataset = mx.gluon.data.dataset.ArrayDataset(train_X, train_Y)
train_data = gluon.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
train_data_list.append(train_data)
[val_train_X, val_train_Y] = get_val_train_batch(val_train_dir)
val_train_dataset = mx.gluon.data.dataset.ArrayDataset(val_train_X, val_train_Y)
val_train_data = gluon.data.DataLoader(val_train_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
[val_val_X, val_val_Y] = get_val_val_batch(val_val_dir)
val_val_dataset = mx.gluon.data.dataset.ArrayDataset(val_val_X, val_val_Y)
val_val_data = gluon.data.DataLoader(val_val_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
model_name = args.model
if model_name == 'default':
net = gluon.nn.Sequential()
with net.name_scope():
# First convolutional layer
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# Second convolutional layer
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Third convolutional layer
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Flatten and apply fullly connected layers
net.add(gluon.nn.Flatten())
# net.add(gluon.nn.Dense(512, activation="relu"))
# net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dropout(rate=0.25))
net.add(gluon.nn.Dense(classes))
else:
model_kwargs = {'ctx': context, 'pretrained': False, 'classes': classes}
net = get_model(model_name, **model_kwargs)
if model_name.startswith('cifar') or model_name == 'default':
net.initialize(mx.init.Xavier(), ctx=context)
else:
net.initialize(mx.init.MSRAPrelu(), ctx=context)
# # no weight decay
# for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
# v.wd_mult = 0.0
optimizer = 'sgd'
lr = args.lr
# optimizer_params = {'momentum': 0.9, 'learning_rate': lr, 'wd': 0.0001}
optimizer_params = {'momentum': 0.0, 'learning_rate': lr, 'wd': 0.0}
# lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
alpha_decay_epoch = [int(i) for i in args.alpha_decay_epoch.split(',')]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
loss_func = gluon.loss.SoftmaxCrossEntropyLoss()
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
train_cross_entropy = mx.metric.CrossEntropy()
# warmup
# print('warm up', flush=True)
trainer.set_learning_rate(0.01)
# train_data = random.choice(train_data_list)
train_data = train_data_list[90]
for local_epoch in range(5):
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
if args.start_epoch > 0:
break
if args.start_epoch > 0:
break
# # force initialization
# train_data = random.choice(train_data_list)
# for i, (data, label) in enumerate(train_data):
# outputs = net(data)
if mpi_rank == 0:
params_prev = [param.data().copy() for param in net.collect_params().values()]
else:
params_prev = None
nd.waitall()
# broadcast
params_prev = mpi_comm.bcast(params_prev, root=0)
for param, param_prev in zip(net.collect_params().values(), params_prev):
param.set_data(param_prev)
if mpi_rank == 0:
worker_list = list(range(mpi_size))
training_file_index_list = [i for i in range(len(training_files))]
alpha = args.alpha
randperm_choice_list = []
randperm_list = [i for i in range(args.nsplit)]
for i in range(int(math.ceil(args.epochs * mpi_size / args.nsplit))):
random.shuffle(randperm_list)
randperm_choice_list = randperm_choice_list + randperm_list
if args.start_epoch > 0:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (args.start_epoch))
net.load_parameters(filename, ctx=context)
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f'%(args.start_epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha))
nd.waitall()
time_0 = time.time()
for epoch in range(args.start_epoch+1, args.epochs):
# train_metric.reset()
# if epoch in lr_decay_epoch:
# lr = lr * args.lr_decay
if epoch in alpha_decay_epoch:
alpha = alpha * args.alpha_decay
tic = time.time()
if args.iid == 0:
if mpi_rank == 0:
training_file_index_sublist = randperm_choice_list[(mpi_size * epoch):(mpi_size * epoch + mpi_size)]
# logger.info(training_file_index_sublist)
else:
training_file_index_sublist = None
training_file_index = mpi_comm.scatter(training_file_index_sublist, root=0)
train_data = train_data_list[training_file_index]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
trainer.set_learning_rate(lr)
if alpha < 1:
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
param_prev[:] = param.data() * (1-alpha)
# select byz workers
if args.nbyz > 0:
if mpi_rank == 0:
random.shuffle(worker_list)
byz_worker_list = worker_list[0:args.nbyz]
else:
byz_worker_list = None
byz_worker_list = mpi_comm.bcast(byz_worker_list, root=0)
else:
byz_worker_list = []
if mpi_rank in byz_worker_list:
# byz worker
[byz_train_X, byz_train_Y] = get_train_batch_byz(random.choice(training_files))
byz_train_dataset = mx.gluon.data.dataset.ArrayDataset(byz_train_X, byz_train_Y)
byz_train_data = gluon.data.DataLoader(byz_train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
net.initialize(mx.init.MSRAPrelu(), ctx=context, force_reinit=True)
for local_epoch in range(args.iterations):
for i, (data, label) in enumerate(byz_train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
else:
# train
# local epoch
for local_epoch in range(args.iterations):
if args.iid == 1:
train_data = random.choice(train_data_list)
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
# aggregation
nd.waitall()
params_np = [param.data().copy().asnumpy() for param in net.collect_params().values()]
params_np_list = mpi_comm.gather(params_np, root=0)
if mpi_rank == 0:
n_params = len(params_np)
if args.aggregation == "trim" or args.trim > 0:
params_np = [ ( stats.trim_mean( np.stack( [params[j] for params in params_np_list], axis=0), args.trim/mpi_size, axis=0 ) ) for j in range(n_params) ]
else:
params_np = [ ( np.mean( np.stack( [params[j] for params in params_np_list], axis=0), axis=0 ) ) for j in range(n_params) ]
else:
params_np = None
params_np = mpi_comm.bcast(params_np, root=0)
params_nd = [ nd.array(param_np) for param_np in params_np ]
for param, param_nd in zip(net.collect_params().values(), params_nd):
param.set_data(param_nd)
if alpha < 1:
# moving average
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
weight = param.data()
weight[:] = weight * alpha + param_prev
# test
nd.waitall()
toc = time.time()
if ( epoch % args.interval == 0 or epoch == args.epochs-1 ) :
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f, time=%f, elapsed=%f'%(epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha, toc-tic, time.time()-time_0))
# logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f'%(epoch, top1, top5))
if args.save == 1:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (epoch))
net.save_parameters(filename)
nd.waitall()
| 1.945313 | 2 |
predictor.py | MIC-DKFZ/DetectionAndRegression | 40 | 12760 | #!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from multiprocessing import Pool
import pickle
import time
import numpy as np
import torch
from scipy.stats import norm
from collections import OrderedDict
import plotting as plg
import utils.model_utils as mutils
import utils.exp_utils as utils
def get_mirrored_patch_crops(patch_crops, org_img_shape):
mirrored_patch_crops = []
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2],
ii[3], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
return mirrored_patch_crops
def get_mirrored_patch_crops_ax_dep(patch_crops, org_img_shape, mirror_axes):
mirrored_patch_crops = []
for ax_ix, axes in enumerate(mirror_axes):
if isinstance(axes, (int, float)) and int(axes) == 0:
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0],
ii[2], ii[3], ii[4], ii[5]]
for ii in patch_crops])
elif isinstance(axes, (int, float)) and int(axes) == 1:
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
elif hasattr(axes, "__iter__") and (tuple(axes) == (0, 1) or tuple(axes) == (1, 0)):
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
else:
raise Exception("invalid mirror axes {} in get mirrored patch crops".format(axes))
return mirrored_patch_crops
def apply_wbc_to_patient(inputs):
"""
wrapper around prediction box consolidation: weighted box clustering (wbc). processes a single patient.
loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes,
aggregates and stores results in new list.
:return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D
predictions, and a dummy batch dimension of 1 for 3D predictions.
:return. pid: string. patient id.
"""
regress_flag, in_patient_results_list, pid, class_dict, clustering_iou, n_ens = inputs
out_patient_results_list = [[] for _ in range(len(in_patient_results_list))]
for bix, b in enumerate(in_patient_results_list):
for cl in list(class_dict.keys()):
boxes = [(ix, box) for ix, box in enumerate(b) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([b[1]['box_coords'] for b in boxes])
box_scores = np.array([b[1]['box_score'] for b in boxes])
box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes])
box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes])
try:
box_patch_id = np.array([b[1]['patch_id'] for b in boxes])
except KeyError: #backward compatibility for already saved pred results ... omg
box_patch_id = np.array([b[1]['ens_ix'] for b in boxes])
box_regressions = np.array([b[1]['regression'] for b in boxes]) if regress_flag else None
box_rg_bins = np.array([b[1]['rg_bin'] if 'rg_bin' in b[1].keys() else float('NaN') for b in boxes])
box_rg_uncs = np.array([b[1]['rg_uncertainty'] if 'rg_uncertainty' in b[1].keys() else float('NaN') for b in boxes])
if 0 not in box_scores.shape:
keep_scores, keep_coords, keep_n_missing, keep_regressions, keep_rg_bins, keep_rg_uncs = \
weighted_box_clustering(box_coords, box_scores, box_center_factor, box_n_overlaps, box_rg_bins, box_rg_uncs,
box_regressions, box_patch_id, clustering_iou, n_ens)
for boxix in range(len(keep_scores)):
clustered_box = {'box_type': 'det', 'box_coords': keep_coords[boxix],
'box_score': keep_scores[boxix], 'cluster_n_missing': keep_n_missing[boxix],
'box_pred_class_id': cl}
if regress_flag:
clustered_box.update({'regression': keep_regressions[boxix],
'rg_uncertainty': keep_rg_uncs[boxix],
'rg_bin': keep_rg_bins[boxix]})
out_patient_results_list[bix].append(clustered_box)
# add gt boxes back to new output list.
out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt'])
return [out_patient_results_list, pid]
def weighted_box_clustering(box_coords, scores, box_pc_facts, box_n_ovs, box_rg_bins, box_rg_uncs,
box_regress, box_patch_id, thresh, n_ens):
"""Consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling.
clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the
average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered
its position within the patch is) and the size of the corresponding box.
The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position
(1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique
patches in the cluster, which did not contribute any predict any boxes.
:param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs).
:param box_coords: y1, x1, y2, x2, (z1), (z2).
:param scores: confidence scores.
:param box_pc_facts: patch-center factors from position on patch tiles.
:param box_n_ovs: number of patch overlaps at box position.
:param box_rg_bins: regression bin predictions.
:param box_rg_uncs: (n_dets,) regression uncertainties (from model mrcnn_aleatoric).
:param box_regress: (n_dets, n_regression_features).
:param box_patch_id: ensemble index.
:param thresh: threshold for iou_matching.
:param n_ens: number of models, that are ensembled. (-> number of expected predictions per position).
:return: keep_scores: (n_keep) new scores of boxes to be kept.
:return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept.
"""
dim = 2 if box_coords.shape[1] == 4 else 3
y1 = box_coords[:,0]
x1 = box_coords[:,1]
y2 = box_coords[:,2]
x2 = box_coords[:,3]
areas = (y2 - y1 + 1) * (x2 - x1 + 1)
if dim == 3:
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
areas *= (z2 - z1 + 1)
# order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24)
order = scores.argsort()[::-1]
keep_scores = []
keep_coords = []
keep_n_missing = []
keep_regress = []
keep_rg_bins = []
keep_rg_uncs = []
while order.size > 0:
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order])
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
inter = w * h
if dim == 3:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0, zz2 - zz1 + 1)
inter *= d
# overlap between currently highest scoring box and all boxes.
ovr = inter / (areas[i] + areas[order] - inter)
ovr_fl = inter.astype('float64') / (areas[i] + areas[order] - inter.astype('float64'))
assert np.all(ovr==ovr_fl), "ovr {}\n ovr_float {}".format(ovr, ovr_fl)
# get all the predictions that match the current box to build one cluster.
matches = np.nonzero(ovr > thresh)[0]
match_n_ovs = box_n_ovs[order[matches]]
match_pc_facts = box_pc_facts[order[matches]]
match_patch_id = box_patch_id[order[matches]]
match_ov_facts = ovr[matches]
match_areas = areas[order[matches]]
match_scores = scores[order[matches]]
# weight all scores in cluster by patch factors, and size.
match_score_weights = match_ov_facts * match_areas * match_pc_facts
match_scores *= match_score_weights
# for the weighted average, scores have to be divided by the number of total expected preds at the position
# of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is
# multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be
# in areas of different overlaps).
n_expected_preds = n_ens * np.mean(match_n_ovs)
# the number of missing predictions is obtained as the number of patches,
# which did not contribute any prediction to the current cluster.
n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0]))
# missing preds are given the mean weighting
# (expected prediction is the mean over all predictions in cluster).
denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights)
# compute weighted average score for the cluster
avg_score = np.sum(match_scores) / denom
# compute weighted average of coordinates for the cluster. now only take existing
# predictions into account.
avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)]
if dim == 3:
avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores))
avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores))
if box_regress is not None:
# compute wt. avg. of regression vectors (component-wise average)
avg_regress = np.sum(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0) / np.sum(
match_scores)
avg_rg_bins = np.round(np.sum(box_rg_bins[order[matches]] * match_scores) / np.sum(match_scores))
avg_rg_uncs = np.sum(box_rg_uncs[order[matches]] * match_scores) / np.sum(match_scores)
else:
avg_regress = np.array(float('NaN'))
avg_rg_bins = np.array(float('NaN'))
avg_rg_uncs = np.array(float('NaN'))
# some clusters might have very low scores due to high amounts of missing predictions.
# filter out the with a conservative threshold, to speed up evaluation.
if avg_score > 0.01:
keep_scores.append(avg_score)
keep_coords.append(avg_coords)
keep_n_missing.append((n_missing_preds / n_expected_preds * 100)) # relative
keep_regress.append(avg_regress)
keep_rg_uncs.append(avg_rg_uncs)
keep_rg_bins.append(avg_rg_bins)
# get index of all elements that were not matched and discard all others.
inds = np.nonzero(ovr <= thresh)[0]
inds_where = np.where(ovr<=thresh)[0]
assert np.all(inds == inds_where), "inds_nonzero {} \ninds_where {}".format(inds, inds_where)
order = order[inds]
return keep_scores, keep_coords, keep_n_missing, keep_regress, keep_rg_bins, keep_rg_uncs
def apply_nms_to_patient(inputs):
in_patient_results_list, pid, class_dict, iou_thresh = inputs
out_patient_results_list = []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch in in_patient_results_list:
batch_el_boxes = []
for cl in list(class_dict.keys()):
det_boxes = [box for box in batch if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([box['box_coords'] for box in det_boxes])
box_scores = np.array([box['box_score'] for box in det_boxes])
if 0 not in box_scores.shape:
keep_ix = mutils.nms_numpy(box_coords, box_scores, iou_thresh)
else:
keep_ix = []
batch_el_boxes += [det_boxes[ix] for ix in keep_ix]
batch_el_boxes += [box for box in batch if box['box_type'] == 'gt']
out_patient_results_list.append(batch_el_boxes)
assert len(in_patient_results_list) == len(out_patient_results_list), "batch dim needs to be maintained, in: {}, out {}".format(len(in_patient_results_list), len(out_patient_results_list))
return [out_patient_results_list, pid]
def nms_2to3D(dets, thresh):
"""
Merges 2D boxes to 3D cubes. For this purpose, boxes of all slices are regarded as lying in one slice.
An adaptation of Non-maximum suppression is applied where clusters are found (like in NMS) with the extra constraint
that suppressed boxes have to have 'connected' z coordinates w.r.t the core slice (cluster center, highest
scoring box, the prevailing box). 'connected' z-coordinates are determined
as the z-coordinates with predictions until the first coordinate for which no prediction is found.
example: a cluster of predictions was found overlap > iou thresh in xy (like NMS). The z-coordinate of the highest
scoring box is 50. Other predictions have 23, 46, 48, 49, 51, 52, 53, 56, 57.
Only the coordinates connected with 50 are clustered to one cube: 48, 49, 51, 52, 53. (46 not because nothing was
found in 47, so 47 is a 'hole', which interrupts the connection). Only the boxes corresponding to these coordinates
are suppressed. All others are kept for building of further clusters.
This algorithm works better with a certain min_confidence of predictions, because low confidence (e.g. noisy/cluttery)
predictions can break the relatively strong assumption of defining cubes' z-boundaries at the first 'hole' in the cluster.
:param dets: (n_detections, (y1, x1, y2, x2, scores, slice_id)
:param thresh: iou matchin threshold (like in NMS).
:return: keep: (n_keep,) 1D tensor of indices to be kept.
:return: keep_z: (n_keep, [z1, z2]) z-coordinates to be added to boxes, which are kept in order to form cubes.
"""
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
where maximum is taken needs to be the lower coordinate"""
scores = dets[:, -2]
slice_id = dets[:, -1]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
keep_z = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself: okay?
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1 + 1)
w = np.maximum(0.0, xx2 - xx1 + 1)
inter = h * w
iou = inter / (areas[i] + areas[order] - inter)
matches = np.argwhere(
iou > thresh) # get all the elements that match the current box and have a lower score
slice_ids = slice_id[order[matches]]
core_slice = slice_id[int(i)]
upper_holes = [ii for ii in np.arange(core_slice, np.max(slice_ids)) if ii not in slice_ids]
lower_holes = [ii for ii in np.arange(np.min(slice_ids), core_slice) if ii not in slice_ids]
max_valid_slice_id = np.min(upper_holes) if len(upper_holes) > 0 else np.max(slice_ids)
min_valid_slice_id = np.max(lower_holes) if len(lower_holes) > 0 else np.min(slice_ids)
z_matches = matches[(slice_ids <= max_valid_slice_id) & (slice_ids >= min_valid_slice_id)]
# expand by one z voxel since box content is surrounded w/o overlap, i.e., z-content computed as z2-z1
z1 = np.min(slice_id[order[z_matches]]) - 1
z2 = np.max(slice_id[order[z_matches]]) + 1
keep.append(i)
keep_z.append([z1, z2])
order = np.delete(order, z_matches, axis=0)
return keep, keep_z
def apply_2d_3d_merging_to_patient(inputs):
"""
wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension)
and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression
(Detailed methodology is described in nms_2to3D).
:return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]].
:return. pid: string. patient id.
"""
in_patient_results_list, pid, class_dict, merge_3D_iou = inputs
out_patient_results_list = []
for cl in list(class_dict.keys()):
det_boxes, slice_ids = [], []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch_ix, batch in enumerate(in_patient_results_list):
batch_element_det_boxes = [(ix, box) for ix, box in enumerate(batch) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
det_boxes += batch_element_det_boxes
slice_ids += [batch_ix] * len(batch_element_det_boxes)
box_coords = np.array([batch[1]['box_coords'] for batch in det_boxes])
box_scores = np.array([batch[1]['box_score'] for batch in det_boxes])
slice_ids = np.array(slice_ids)
if 0 not in box_scores.shape:
keep_ix, keep_z = nms_2to3D(
np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou)
else:
keep_ix, keep_z = [], []
# store kept predictions in new results list and add corresponding z-dimension info to coordinates.
for kix, kz in zip(keep_ix, keep_z):
keep_box = det_boxes[kix][1]
keep_box['box_coords'] = list(keep_box['box_coords']) + kz
out_patient_results_list.append(keep_box)
gt_boxes = [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
assert np.all([len(box["box_coords"]) == 6 for box in gt_boxes]), "expanded preds to 3D but GT is 2D."
out_patient_results_list += gt_boxes
return [[out_patient_results_list], pid] # additional list wrapping is extra batch dim.
class Predictor:
"""
Prediction pipeline:
- receives a patched patient image (n_patches, c, y, x, (z)) from patient data loader.
- forwards patches through model in chunks of batch_size. (method: batch_tiling_forward)
- unmolds predictions (boxes and segmentations) to original patient coordinates. (method: spatial_tiling_forward)
Ensembling (mode == 'test'):
- for inference, forwards 4 mirrored versions of image to through model and unmolds predictions afterwards
accordingly (method: data_aug_forward)
- for inference, loads multiple parameter-sets of the trained model corresponding to different epochs. for each
parameter-set loops over entire test set, runs prediction pipeline for each patient. (method: predict_test_set)
Consolidation of predictions:
- consolidates a patient's predictions (boxes, segmentations) collected over patches, data_aug- and temporal ensembling,
performs clustering and weighted averaging (external function: apply_wbc_to_patient) to obtain consistent outptus.
- for 2D networks, consolidates box predictions to 3D cubes via clustering (adaption of non-maximum surpression).
(external function: apply_2d_3d_merging_to_patient)
Ground truth handling:
- dissmisses any ground truth boxes returned by the model (happens in validation mode, patch-based groundtruth)
- if provided by data loader, adds patient-wise ground truth to the final predictions to be passed to the evaluator.
"""
def __init__(self, cf, net, logger, mode):
self.cf = cf
self.batch_size = cf.batch_size
self.logger = logger
self.mode = mode
self.net = net
self.n_ens = 1
self.rank_ix = '0'
self.regress_flag = any(['regression' in task for task in self.cf.prediction_tasks])
if self.cf.merge_2D_to_3D_preds:
assert self.cf.dim == 2, "Merge 2Dto3D only valid for 2D preds, but current dim is {}.".format(self.cf.dim)
if self.mode == 'test':
last_state_path = os.path.join(self.cf.fold_dir, 'last_state.pth')
try:
self.model_index = torch.load(last_state_path)["model_index"]
self.model_index = self.model_index[self.model_index["rank"] <= self.cf.test_n_epochs]
except FileNotFoundError:
raise FileNotFoundError('no last_state/model_index file in fold directory. '
'seems like you are trying to run testing without prior training...')
self.n_ens = cf.test_n_epochs
if self.cf.test_aug_axes is not None:
self.n_ens *= (len(self.cf.test_aug_axes)+1)
self.example_plot_dir = os.path.join(cf.test_dir, "example_plots")
os.makedirs(self.example_plot_dir, exist_ok=True)
def batch_tiling_forward(self, batch):
"""
calls the actual network forward method. in patch-based prediction, the batch dimension might be overladed
with n_patches >> batch_size, which would exceed gpu memory. In this case, batches are processed in chunks of
batch_size. validation mode calls the train method to monitor losses (returned ground truth objects are discarded).
test mode calls the test forward method, no ground truth required / involved.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
img = batch['data']
if img.shape[0] <= self.batch_size:
if self.mode == 'val':
# call training method to monitor losses
results_dict = self.net.train_forward(batch, is_validation=True)
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
elif self.mode == 'test':
results_dict = self.net.test_forward(batch, return_masks=self.cf.return_masks_in_test)
else: # needs batch tiling
split_ixs = np.split(np.arange(img.shape[0]), np.arange(img.shape[0])[::self.batch_size])
chunk_dicts = []
for chunk_ixs in split_ixs[1:]: # first split is elements before 0, so empty
b = {k: batch[k][chunk_ixs] for k in batch.keys()
if (isinstance(batch[k], np.ndarray) and batch[k].shape[0] == img.shape[0])}
if self.mode == 'val':
chunk_dicts += [self.net.train_forward(b, is_validation=True)]
else:
chunk_dicts += [self.net.test_forward(b, return_masks=self.cf.return_masks_in_test)]
results_dict = {}
# flatten out batch elements from chunks ([chunk, chunk] -> [b, b, b, b, ...])
results_dict['boxes'] = [item for d in chunk_dicts for item in d['boxes']]
results_dict['seg_preds'] = np.array([item for d in chunk_dicts for item in d['seg_preds']])
if self.mode == 'val':
# if hasattr(self.cf, "losses_to_monitor"):
# loss_names = self.cf.losses_to_monitor
# else:
# loss_names = {name for dic in chunk_dicts for name in dic if 'loss' in name}
# estimate patient loss by mean over batch_chunks. Most similar to training loss.
results_dict['torch_loss'] = torch.mean(torch.cat([d['torch_loss'] for d in chunk_dicts]))
results_dict['class_loss'] = np.mean([d['class_loss'] for d in chunk_dicts])
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
return results_dict
def spatial_tiling_forward(self, batch, patch_crops = None, n_aug='0'):
"""
forwards batch to batch_tiling_forward method and receives and returns a dictionary with results.
if patch-based prediction, the results received from batch_tiling_forward will be on a per-patch-basis.
this method uses the provided patch_crops to re-transform all predictions to whole-image coordinates.
Patch-origin information of all box-predictions will be needed for consolidation, hence it is stored as
'patch_id', which is a unique string for each patch (also takes current data aug and temporal epoch instances
into account). all box predictions get additional information about the amount overlapping patches at the
respective position (used for consolidation).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- monitor_values (only in validation mode)
returned dict is a flattened version with 1 batch instance (3D) or slices (2D)
"""
if patch_crops is not None:
#print("patch_crops not None, applying patch center factor")
patches_dict = self.batch_tiling_forward(batch)
results_dict = {'boxes': [[] for _ in range(batch['original_img_shape'][0])]}
#bc of ohe--> channel dim of seg has size num_classes
out_seg_shape = list(batch['original_img_shape'])
out_seg_shape[1] = patches_dict["seg_preds"].shape[1]
out_seg_preds = np.zeros(out_seg_shape, dtype=np.float16)
patch_overlap_map = np.zeros_like(out_seg_preds, dtype='uint8')
for pix, pc in enumerate(patch_crops):
if self.cf.dim == 3:
out_seg_preds[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += patches_dict['seg_preds'][pix]
patch_overlap_map[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += 1
elif self.cf.dim == 2:
out_seg_preds[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += patches_dict['seg_preds'][pix]
patch_overlap_map[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += 1
out_seg_preds[patch_overlap_map > 0] /= patch_overlap_map[patch_overlap_map > 0]
results_dict['seg_preds'] = out_seg_preds
for pix, pc in enumerate(patch_crops):
patch_boxes = patches_dict['boxes'][pix]
for box in patch_boxes:
# add unique patch id for consolidation of predictions.
box['patch_id'] = self.rank_ix + '_' + n_aug + '_' + str(pix)
# boxes from the edges of a patch have a lower prediction quality, than the ones at patch-centers.
# hence they will be down-weighted for consolidation, using the 'box_patch_center_factor', which is
# obtained by a gaussian distribution over positions in the patch and average over spatial dimensions.
# Also the info 'box_n_overlaps' is stored for consolidation, which represents the amount of
# overlapping patches at the box's position.
c = box['box_coords']
#box_centers = np.array([(c[ii] + c[ii+2])/2 for ii in range(len(c)//2)])
box_centers = [(c[ii] + c[ii + 2]) / 2 for ii in range(2)]
if self.cf.dim == 3:
box_centers.append((c[4] + c[5]) / 2)
box['box_patch_center_factor'] = np.mean(
[norm.pdf(bc, loc=pc, scale=pc * 0.8) * np.sqrt(2 * np.pi) * pc * 0.8 for bc, pc in
zip(box_centers, np.array(self.cf.patch_size) / 2)])
if self.cf.dim == 3:
c += np.array([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])
int_c = [int(np.floor(ii)) if ix%2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]])
results_dict['boxes'][0].append(box)
else:
c += np.array([pc[0], pc[2], pc[0], pc[2]])
int_c = [int(np.floor(ii)) if ix % 2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(
patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])
results_dict['boxes'][pc[4]].append(box)
if self.mode == 'val':
results_dict['torch_loss'] = patches_dict['torch_loss']
results_dict['class_loss'] = patches_dict['class_loss']
else:
results_dict = self.batch_tiling_forward(batch)
for b in results_dict['boxes']:
for box in b:
box['box_patch_center_factor'] = 1
box['box_n_overlaps'] = 1
box['patch_id'] = self.rank_ix + '_' + n_aug
return results_dict
def data_aug_forward(self, batch):
"""
in val_mode: passes batch through to spatial_tiling method without data_aug.
in test_mode: if cf.test_aug is set in configs, createst 4 mirrored versions of the input image,
passes all of them to the next processing step (spatial_tiling method) and re-transforms returned predictions
to original image version.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
patch_crops = batch['patch_crop_coords'] if self.patched_patient else None
results_list = [self.spatial_tiling_forward(batch, patch_crops)]
org_img_shape = batch['original_img_shape']
if self.mode == 'test' and self.cf.test_aug_axes is not None:
if isinstance(self.cf.test_aug_axes, (int, float)):
self.cf.test_aug_axes = (self.cf.test_aug_axes,)
#assert np.all(np.array(self.cf.test_aug_axes)<self.cf.dim), "test axes {} need to be spatial axes".format(self.cf.test_aug_axes)
if self.patched_patient:
# apply mirror transformations to patch-crop coordinates, for correct tiling in spatial_tiling method.
mirrored_patch_crops = get_mirrored_patch_crops_ax_dep(patch_crops, batch['original_img_shape'],
self.cf.test_aug_axes)
self.logger.info("mirrored patch crop coords for patched patient in test augs!")
else:
mirrored_patch_crops = [None] * 3
img = np.copy(batch['data'])
for n_aug, sp_axis in enumerate(self.cf.test_aug_axes):
#sp_axis = np.array(axis) #-2 #spatial axis index
axis = np.array(sp_axis)+2
if isinstance(sp_axis, (int, float)):
# mirroring along one axis at a time
batch['data'] = np.flip(img, axis=axis).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis+2]
coords[sp_axis+2] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(chunk_dict['seg_preds'], axis=axis)
elif hasattr(sp_axis, "__iter__") and tuple(sp_axis)==(0,1) or tuple(sp_axis)==(1,0):
#NEED: mirrored patch crops are given as [(y-axis), (x-axis), (y-,x-axis)], obey this order!
# mirroring along two axes at same time
batch['data'] = np.flip(np.flip(img, axis=axis[0]), axis=axis[1]).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis[0]] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]+2]
coords[sp_axis[0]+2] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]]
coords[sp_axis[1]] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]+2]
coords[sp_axis[1]+2] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(np.flip(chunk_dict['seg_preds'], axis=axis[0]), axis=axis[1]).copy()
else:
raise Exception("Invalid axis type {} in test augs".format(type(axis)))
results_list.append(chunk_dict)
batch['data'] = img
# aggregate all boxes/seg_preds per batch element from data_aug predictions.
results_dict = {}
results_dict['boxes'] = [[item for d in results_list for item in d['boxes'][batch_instance]]
for batch_instance in range(org_img_shape[0])]
# results_dict['seg_preds'] = np.array([[item for d in results_list for item in d['seg_preds'][batch_instance]]
# for batch_instance in range(org_img_shape[0])])
results_dict['seg_preds'] = np.stack([dic['seg_preds'] for dic in results_list], axis=1)
# needs segs probs in seg_preds entry:
results_dict['seg_preds'] = np.sum(results_dict['seg_preds'], axis=1) #add up seg probs from different augs per class
if self.mode == 'val':
results_dict['torch_loss'] = results_list[0]['torch_loss']
results_dict['class_loss'] = results_list[0]['class_loss']
return results_dict
def load_saved_predictions(self):
"""loads raw predictions saved by self.predict_test_set. aggregates and/or merges 2D boxes to 3D cubes for
evaluation (if model predicts 2D but evaluation is run in 3D), according to settings config.
:return: list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'batch_dices': dice scores as recorded in raw prediction results.
- 'seg_preds': not implemented yet. could replace dices by seg preds to have raw seg info available, however
would consume critically large memory amount. todo evaluation of instance/semantic segmentation.
"""
results_file = 'pred_results.pkl' if not self.cf.hold_out_test_set else 'pred_results_held_out.pkl'
if not self.cf.hold_out_test_set or not self.cf.ensemble_folds:
self.logger.info("loading saved predictions of fold {}".format(self.cf.fold))
with open(os.path.join(self.cf.fold_dir, results_file), 'rb') as handle:
results_list = pickle.load(handle)
box_results_list = [(res_dict["boxes"], pid) for res_dict, pid in results_list]
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor
self.logger.info('loaded raw test set predictions with n_patients = {} and n_ens = {}'.format(
len(results_list), self.n_ens))
else:
self.logger.info("loading saved predictions of hold-out test set")
fold_dirs = sorted([os.path.join(self.cf.exp_dir, f) for f in os.listdir(self.cf.exp_dir) if
os.path.isdir(os.path.join(self.cf.exp_dir, f)) and f.startswith("fold")])
results_list = []
folds_loaded = 0
for fold in range(self.cf.n_cv_splits):
fold_dir = os.path.join(self.cf.exp_dir, 'fold_{}'.format(fold))
if fold_dir in fold_dirs:
with open(os.path.join(fold_dir, results_file), 'rb') as handle:
fold_list = pickle.load(handle)
results_list += fold_list
folds_loaded += 1
else:
self.logger.info("Skipping fold {} since no saved predictions found.".format(fold))
box_results_list = []
for res_dict, pid in results_list: #without filtering gt out:
box_results_list.append((res_dict['boxes'], pid))
#it's usually not right to filter out gts here, is it?
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor * folds_loaded
# -------------- aggregation of boxes via clustering -----------------
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou {} and n_ens {} over {} patients'.format(
self.cf.clustering_iou, self.n_ens, len(box_results_list)))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii
in box_results_list]
del box_results_list
pool = Pool(processes=self.cf.n_workers)
box_results_list = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou {} over {} patients.'.format(
self.cf.clustering_iou, len(box_results_list)))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in box_results_list]
del box_results_list
box_results_list = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2Dto3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in box_results_list]
box_results_list = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
for ix in range(len(results_list)):
assert np.all(results_list[ix][1] == box_results_list[ix][1]), "pid mismatch between loaded and aggregated results"
results_list[ix][0]["boxes"] = box_results_list[ix][0]
return results_list # holds (results_dict, pid)
def predict_patient(self, batch):
"""
predicts one patient.
called either directly via loop over validation set in exec.py (mode=='val')
or from self.predict_test_set (mode=='test).
in val mode: adds 3D ground truth info to predictions and runs consolidation and 2Dto3D merging of predictions.
in test mode: returns raw predictions (ground truth addition, consolidation, 2D to 3D merging are
done in self.predict_test_set, because patient predictions across several epochs might be needed
to be collected first, in case of temporal ensembling).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
#if self.mode=="test":
# self.logger.info('predicting patient {} for fold {} '.format(np.unique(batch['pid']), self.cf.fold))
# True if patient is provided in patches and predictions need to be tiled.
self.patched_patient = 'patch_crop_coords' in list(batch.keys())
# forward batch through prediction pipeline.
results_dict = self.data_aug_forward(batch)
#has seg probs in entry 'seg_preds'
if self.mode == 'val':
for b in range(batch['patient_bb_target'].shape[0]):
for t in range(len(batch['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['patient_bb_target'][b][t],
'class_targets': batch['patient_class_targets'][b][t]}
for name in self.cf.roi_items:
gt_box.update({name : batch['patient_'+name][b][t]})
results_dict['boxes'][b].append(gt_box)
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
if self.patched_patient and self.cf.clustering == "wbc":
wbc_input = [self.regress_flag, results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou, self.n_ens]
results_dict['boxes'] = apply_wbc_to_patient(wbc_input)[0]
elif self.patched_patient:
nms_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou]
results_dict['boxes'] = apply_nms_to_patient(nms_inputs)[0]
if self.cf.merge_2D_to_3D_preds:
results_dict['2D_boxes'] = results_dict['boxes']
merge_dims_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.merge_3D_iou]
results_dict['boxes'] = apply_2d_3d_merging_to_patient(merge_dims_inputs)[0]
return results_dict
def predict_test_set(self, batch_gen, return_results=True):
"""
wrapper around test method, which loads multiple (or one) epoch parameters (temporal ensembling), loops through
the test set and collects predictions per patient. Also flattens the results per patient and epoch
and adds optional ground truth boxes for evaluation. Saves out the raw result list for later analysis and
optionally consolidates and returns predictions immediately.
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo evaluation of instance/semantic segmentation.
"""
# -------------- raw predicting -----------------
dict_of_patients_results = OrderedDict()
set_of_result_types = set()
self.model_index = self.model_index.sort_values(by="rank")
# get paths of all parameter sets to be loaded for temporal ensembling. (or just one for no temp. ensembling).
weight_paths = [os.path.join(self.cf.fold_dir, file_name) for file_name in self.model_index["file_name"]]
for rank_ix, weight_path in enumerate(weight_paths):
self.logger.info(('tmp ensembling over rank_ix:{} epoch:{}'.format(rank_ix, weight_path)))
self.net.load_state_dict(torch.load(weight_path))
self.net.eval()
self.rank_ix = str(rank_ix)
plot_batches = np.random.choice(np.arange(batch_gen['n_test']),
size=min(batch_gen['n_test'], self.cf.n_test_plots), replace=False)
with torch.no_grad():
for i in range(batch_gen['n_test']):
batch = next(batch_gen['test'])
pid = np.unique(batch['pid'])
assert len(pid)==1
pid = pid[0]
if not pid in dict_of_patients_results.keys(): # store batch info in patient entry of results dict.
dict_of_patients_results[pid] = {}
dict_of_patients_results[pid]['results_dicts'] = []
dict_of_patients_results[pid]['patient_bb_target'] = batch['patient_bb_target']
for name in self.cf.roi_items:
dict_of_patients_results[pid]["patient_"+name] = batch["patient_"+name]
stime = time.time()
results_dict = self.predict_patient(batch) #only holds "boxes", "seg_preds"
# needs ohe seg probs in seg_preds entry:
results_dict['seg_preds'] = np.argmax(results_dict['seg_preds'], axis=1)[:,np.newaxis]
print("\rpredicting patient {} with weight rank {} (progress: {}/{}) took {:.2f}s".format(
str(pid), rank_ix, (rank_ix)*batch_gen['n_test']+(i+1), len(weight_paths)*batch_gen['n_test'],
time.time()-stime), end="", flush=True)
if i in plot_batches and (not self.patched_patient or 'patient_data' in batch.keys()):
try:
# view qualitative results of random test case
out_file = os.path.join(self.example_plot_dir,
'batch_example_test_{}_rank_{}.png'.format(self.cf.fold, rank_ix))
utils.split_off_process(plg.view_batch, self.cf, batch, results_dict,
has_colorchannels=self.cf.has_colorchannels,
show_gt_labels=True, show_seg_ids='dice' in self.cf.metrics,
get_time="test-example plot", out_file=out_file)
except Exception as e:
self.logger.info("WARNING: error in view_batch: {}".format(e))
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class( results_dict['seg_preds'],
batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
dict_of_patients_results[pid]['results_dicts'].append({k:v for k,v in results_dict.items()
if k in ["boxes", "batch_dices"]})
# collect result types to know which ones to look for when saving
set_of_result_types.update(dict_of_patients_results[pid]['results_dicts'][-1].keys())
# -------------- re-order, save raw results -----------------
self.logger.info('finished predicting test set. starting aggregation of predictions.')
results_per_patient = []
for pid, p_dict in dict_of_patients_results.items():
# dict_of_patients_results[pid]['results_list'] has length batch['n_test']
results_dict = {}
# collect all boxes/seg_preds of same batch_instance over temporal instances.
b_size = len(p_dict['results_dicts'][0]["boxes"])
for res_type in [rtype for rtype in set_of_result_types if rtype in ["boxes", "batch_dices"]]:#, "seg_preds"]]:
if not 'batch' in res_type: #assume it's results on batch-element basis
results_dict[res_type] = [[item for rank_dict in p_dict['results_dicts'] for item in rank_dict[res_type][batch_instance]]
for batch_instance in range(b_size)]
else:
results_dict[res_type] = []
for dict in p_dict['results_dicts']:
if 'dice' in res_type:
item = dict[res_type] #dict['batch_dices'] has shape (num_seg_classes,)
assert len(item) == self.cf.num_seg_classes, \
"{}, {}".format(len(item), self.cf.num_seg_classes)
else:
raise NotImplementedError
results_dict[res_type].append(item)
# rdict[dice] shape (n_rank_epochs (n_saved_ranks), nsegclasses)
# calc mean over test epochs so inline with shape from sampling
results_dict[res_type] = np.mean(results_dict[res_type], axis=0) #maybe error type with other than dice
if not hasattr(self.cf, "eval_test_separately") or not self.cf.eval_test_separately:
# add unpatched 2D or 3D (if dim==3 or merge_2D_to_3D) ground truth boxes for evaluation.
for b in range(p_dict['patient_bb_target'].shape[0]):
for targ in range(len(p_dict['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords':p_dict['patient_bb_target'][b][targ],
'class_targets': p_dict['patient_class_targets'][b][targ]}
for name in self.cf.roi_items:
gt_box.update({name: p_dict["patient_"+name][b][targ]})
results_dict['boxes'][b].append(gt_box)
results_per_patient.append([results_dict, pid])
out_string = 'pred_results_held_out' if self.cf.hold_out_test_set else 'pred_results'
with open(os.path.join(self.cf.fold_dir, '{}.pkl'.format(out_string)), 'wb') as handle:
pickle.dump(results_per_patient, handle)
if return_results:
# -------------- results processing, clustering, etc. -----------------
final_patient_box_results = [ (res_dict["boxes"], pid) for res_dict,pid in results_per_patient ]
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou = {} and n_ens = {}.'.format(
self.cf.clustering_iou, self.n_ens))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou = {}.'.format(self.cf.clustering_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in final_patient_box_results]
del final_patient_box_results
final_patient_box_results = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2D-to-3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
# final_patient_box_results holds [avg_boxes, pid] if wbc
for ix in range(len(results_per_patient)):
assert results_per_patient[ix][1] == final_patient_box_results[ix][1], "should be same pid"
results_per_patient[ix][0]["boxes"] = final_patient_box_results[ix][0]
# results_per_patient = [(res_dict["boxes"] = boxes, pid) for (boxes,pid) in final_patient_box_results]
return results_per_patient # holds list of (results_dict, pid)
| 1.882813 | 2 |
archive/jonesboro/__init__.py | jayktee/scrapers-us-municipal | 67 | 12761 | <filename>archive/jonesboro/__init__.py
from pupa.scrape import Jurisdiction
from legistar.ext.pupa import LegistarPeopleScraper
class Jonesboro(Jurisdiction):
division_id = 'ocd-division/country:us/state:ar/place:jonesboro'
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ar/place:jonesboro/government'
name = 'Jonesboro City Council'
url = 'http://jonesboro.legistar.com/'
scrapers = {
"people": LegistarPeopleScraper,
}
| 1.773438 | 2 |
src/config.py | La-tale/MessyTable | 32 | 12762 | <filename>src/config.py<gh_stars>10-100
import yaml
import os
def parse_config(args):
"""
prepare configs
"""
file_dir = os.path.dirname(os.path.realpath('__file__'))
messytable_dir = os.path.realpath(os.path.join(file_dir, '..'))
config_pathname = os.path.join(messytable_dir,'models',args.config_dir,'train.yaml')
config = yaml.load(open(config_pathname, 'r'))
config['messytable_dir'] = messytable_dir
config['config_dir'] = os.path.join(messytable_dir,'models',args.config_dir)
config['data_dir'] = os.path.join(messytable_dir, 'data') if 'data_dir' not in config else config['data_dir'] # NOTE: either indicate data_dir or put the data in messytable/data
config['img_dir'] = os.path.join(config['data_dir'],'images')
config['train_label_pathname'] = os.path.join(config['data_dir'],'labels',config['train_json'])
config['num_workers'] = config['num_workers'] if 'num_workers' in config else 16
config['milestones'] = config['milestones'] if 'milestones' in config else [60, 80]
config['split_samples_in_func'] = config['split_samples_in_func'] if 'split_samples_in_func' in config else True
config['loss_func'] = config['loss_func'] if 'loss_func' in config else 'ERROR_LOSS_FUNC'
config['triplet_margin'] = config['triplet_margin'] if 'triplet_margin' in config else 0.3
config['data_augmentation'] = config['data_augmentation'] if 'data_augmentation' in config else False
config['cropped_img_size'] = (config['cropped_height'],config['cropped_width'])
config['original_img_size'] = (config['img_height'],config['img_width'])
config['scene_ratio'] = config['scene_ratio'] if 'scene_ratio' in config else 1.0
config['cam_selected_num'] = config['cam_selected_num'] if 'cam_selected_num' in config else 8
config['triplet_sampling_ratio'] = config['triplet_sampling_ratio'] if 'triplet_sampling_ratio' in config else [0.5,0.3,0.2]
config['image_pairs_per_batch'] = config['image_pairs_per_batch'] if 'image_pairs_per_batch' in config else 24
config['triplet_batch_size'] = config['triplet_batch_size'] if 'triplet_batch_size' in config else config['batch_size']
config['learning_rate'] = float(config['learning_rate'])
config['zoomout_crop_num'] = 'single_crop' if len(config['zoomout_ratio']) == 1 else 'multi_crops'
# make cam_pairs
test_cam_pairs = []
for i in range(1,9):
for j in range(i+1,10):
test_cam_pairs.append((str(i),str(j)))
reversed_cam_pairs = []
for cam_pair in test_cam_pairs:
reversed_cam_pairs.append((cam_pair[1],cam_pair[0]))
config['test_cam_pairs'] = test_cam_pairs
config['train_cam_pairs'] = test_cam_pairs + reversed_cam_pairs
config['cam_list'] = [str(i) for i in range(1,10)]
return config
| 2.3125 | 2 |
tests/settings.py | josemarimanio/django-adminlte2-templates | 10 | 12763 | <filename>tests/settings.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = <KEY>' # nosec
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'adminlte2_templates',
'tests',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(BASE_DIR, 'tests/templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'adminlte2_templates.context_processors.template',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
| 1.703125 | 2 |
two-variables-function-fitting/fxy_gen.py | ettoremessina/fitting-with-mlp-using-tensorflow | 9 | 12764 | <filename>two-variables-function-fitting/fxy_gen.py
import argparse
import numpy as np
import csv
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='fxy_gen.py generates a synthetic dataset file calling a two-variables real function on a rectangle')
parser.add_argument('--dsout',
type=str,
dest='ds_output_filename',
required=True,
help='dataset output file (csv format)')
parser.add_argument('--fxy',
type=str,
dest='func_xy_body',
required=True,
help='f(x, y) body (lamba format)')
parser.add_argument('--rxbegin',
type=float,
dest='range_xbegin',
required=False,
default=-5.0,
help='begin x range (default:-5.0)')
parser.add_argument('--rxend',
type=float,
dest='range_xend',
required=False,
default=+5.0,
help='end x range (default:+5.0)')
parser.add_argument('--rybegin',
type=float,
dest='range_ybegin',
required=False,
default=-5.0,
help='begin y range (default:-5.0)')
parser.add_argument('--ryend',
type=float,
dest='range_yend',
required=False,
default=+5.0,
help='end y range (default:+5.0)')
parser.add_argument('--rstep',
type=float,
dest='range_step',
required=False,
default=0.01,
help='step range (default: 0.01)')
args = parser.parse_args()
print("#### Started {} {} ####".format(__file__, args));
x_values = np.arange(args.range_xbegin, args.range_xend, args.range_step, dtype=float)
y_values = np.arange(args.range_ybegin, args.range_yend, args.range_step, dtype=float)
func_xy = eval('lambda x, y: ' + args.func_xy_body)
csv_ds_output_file = open(args.ds_output_filename, 'w')
with csv_ds_output_file:
writer = csv.writer(csv_ds_output_file, delimiter=',')
for i in range(0, x_values.size):
for j in range(0, y_values.size):
writer.writerow([x_values[i], y_values[j], func_xy(x_values[i], y_values[j])])
print("#### Terminated {} ####".format(__file__));
| 3.0625 | 3 |
custom/icds_reports/dashboard_utils.py | tstalka/commcare-hq | 0 | 12765 | <filename>custom/icds_reports/dashboard_utils.py<gh_stars>0
from corehq.apps.locations.util import location_hierarchy_config
from custom.icds_reports.utils import icds_pre_release_features
def get_dashboard_template_context(domain, couch_user):
context = {}
context['location_hierarchy'] = location_hierarchy_config(domain)
context['user_location_id'] = couch_user.get_location_id(domain)
context['all_user_location_id'] = list(couch_user.get_sql_locations(
domain
).location_ids())
context['state_level_access'] = 'state' in set(
[loc.location_type.code for loc in couch_user.get_sql_locations(
domain
)]
)
context['have_access_to_features'] = icds_pre_release_features(couch_user)
context['have_access_to_all_locations'] = couch_user.has_permission(
domain, 'access_all_locations'
)
if context['have_access_to_all_locations']:
context['user_location_id'] = None
if couch_user.is_web_user():
context['is_web_user'] = True
return context
| 1.898438 | 2 |
data_input.py | zpcore/OnePass | 0 | 12766 | import json
import string, sys
from random import *
class Token:
def __init__(self):
self.company, self.website, self.email, self.username, self.password = None, None, None, None, None
def get_input(self):
while(self.company in (None,'')):
self.company = input('Account Association:')
if(self.company in (None,'')):
print('Account Association cannot be null, try again.')
self.website = input('Website linked to the account:')
self.email = input('Email linked to the account:')
# while(self.email in (None,'')):
# self.email = input('Registered Email:')
# if(self.email in (None,'')):
# print('Email cannot be null, try again.')
while(self.username in (None,'')):
self.username = input('Username:')
if(self.username in (None,'')):
print('Username cannot be null, try again.')
while(self.password in (None,'')):
select = input('Random generate a password for you? Type Y or N. ').strip().lower()
if(select in ('y','yes')):
characters = string.ascii_letters + string.punctuation + string.digits
low_bound, up_bound = 10, 20
password = "".join(choice(characters) for x in range(randint(low_bound, up_bound)))
self.password = password
print('auto generated password:'+self.password)
elif(select in ('n','no')):
self.password = input('Password:')
if(self.password in (None,'')):
print('Password cannot be null, try again.')
else:
print('Incorrect choice. Try again.')
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Token):
return super().default(obj)
return obj.__dict__
# tok = Token()
# tok.get_input()
# print(json.dumps(tok, cls=MyEncoder)) | 3.46875 | 3 |
examples/python/test_dict.py | SmartEconomyWorkshop/workshop | 79 | 12767 | <gh_stars>10-100
from boa_test.tests.boa_test import BoaTest
from boa.compiler import Compiler
from neo.Settings import settings
from neo.Prompt.Commands.BuildNRun import TestBuild
class TestContract(BoaTest):
def test_dict1(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest1.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0].GetMap(), dict)
self.assertEqual(results[0].GetBoolean(), True)
def test_dict2(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest2.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 7)
def test_dict3(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest3.py' % TestContract.dirname).default
out = output.write()
string_ouput = output.to_s()
self.assertGreater(len(string_ouput), 0)
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0].GetMap(), dict)
| 2.203125 | 2 |
deployment_classifier/setup.py | m-santh/VayuAnukulani | 1 | 12768 | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['tensorflow==1.8.0','pandas==0.23.1','setuptools==38.7.0','numpy==1.14.1','Keras==2.1.4','scikit_learn==0.19.1','h5py']
setup(
name='classifier',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='My training application package.',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
zip_safe=False
)
| 1.328125 | 1 |
day5.py | zsmoore/Advent-Of-Code-2017 | 0 | 12769 | <filename>day5.py<gh_stars>0
import sys
import copy
def main():
in_file = open(sys.argv[1], 'r')
jumps = []
for line in in_file.readlines():
jumps.append(int(line.strip()))
#print(compute_exit(jumps))
print(compute_exit2(jumps))
def compute_exit(jump_list):
current_ind = 0
step_num = 0
while True:
if current_ind < 0 or current_ind >= len(jump_list):
return step_num
step = jump_list[current_ind]
jump_list[current_ind] += 1
current_ind += step
step_num += 1
def compute_exit2(jump_list):
current_ind = 0
step_num = 0
while True:
if current_ind < 0 or current_ind >= len(jump_list):
return step_num
step = jump_list[current_ind]
if step >= 3:
jump_list[current_ind] -= 1
elif step <= -3:
jump_list[current_ind] += 1
else:
jump_list[current_ind] += 1
current_ind += step
step_num += 1
if __name__ == "__main__":
main()
| 3.28125 | 3 |
pycudasirecon/_recon_params.py | tlambert03/pycudasirecon | 2 | 12770 | import os
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from typing import Optional, Sequence
from pydantic import BaseModel, Field, FilePath
@contextmanager
def temp_config(**kwargs):
"""A context manager that creates a temporary config file for SIMReconstructor.
`**kwargs` should be valid keyword arguments for :class:`ReconParams`.
"""
params = ReconParams(**kwargs)
tf = NamedTemporaryFile(delete=False)
tf.file.write(params.to_config().encode()) # type: ignore
tf.close()
try:
yield tf
finally:
os.unlink(tf.name)
class ReconParams(BaseModel):
otf_file: Optional[FilePath] = Field(None, description="OTF file")
usecorr: bool = Field(
False, description="use the flat-field correction file provided"
)
ndirs: int = Field(default=3, description="number of directions")
nphases: int = Field(default=5, description="number of phases per direction")
nordersout: int = Field(
0, description="number of output orders; must be <= norders"
)
angle0: float = Field(1.648, description="angle of the first direction in radians")
ls: float = Field(0.172, description="line spacing of SIM pattern in microns")
na: float = Field(1.42, description="Detection numerical aperture")
nimm: float = Field(1.515, description="refractive index of immersion medium")
zoomfact: float = Field(2, description="lateral oversampling factor")
explodefact: float = Field(
1,
description="artificially exploding the reciprocal-space "
"distance between orders by this factor",
)
zzoom: int = Field(1, description="axial zoom factor")
nofilteroverlaps: bool = Field(
False,
description="do not filter the overlaping region between bands "
"usually used in trouble shooting",
)
background: float = Field(0, description="camera readout background")
wiener: float = Field(0.01, description="Wiener constant")
forcemodamp: Optional[Sequence[float]] = Field(
None, description="modamps forced to these values"
)
k0angles: Optional[Sequence[float]] = Field(
None, description="user given pattern vector k0 angles for all directions"
)
otfRA: bool = Field(True, description="using rotationally averaged OTF")
otfPerAngle: bool = Field(True, description="using one OTF per SIM angle")
fastSI: bool = Field(
True,
description="SIM data is organized in Z->Angle->Phase order; "
"default being Angle->Z->Phase",
)
k0searchAll: bool = Field(False, description="search for k0 at all time points")
norescale: bool = Field(False, description="bleach correcting for z") # TODO
equalizez: bool = Field(True, description="bleach correcting for z")
equalizet: bool = Field(True, description="bleach correcting for time")
dampenOrder0: bool = Field(True, description="dampen order-0 in final assembly")
nosuppress: bool = Field(
False,
description="do not suppress DC singularity in final assembly "
"(good idea for 2D/TIRF data)",
)
nokz0: bool = Field(
True, description="do not use kz=0 plane of the 0th order in the final assembly"
)
gammaApo: float = Field(
1, description="output apodization gamma; 1.0 means triangular apo"
)
bessel: bool = Field(False, description="bessel-SIM data")
besselExWave: float = Field(
0.488, description="Bessel SIM excitation wavelength in microns"
)
besselNA: float = Field(0.144, description="Bessel SIM excitation NA)")
deskew: float = Field(
0,
description="Deskew angle; if not 0.0 then perform deskewing before processing",
)
deskewshift: int = Field(
0,
description="If deskewed, the output image's extra shift in X (positive->left)",
)
noRecon: bool = Field(
False,
description="No reconstruction will be performed; "
"useful when combined with --deskew",
)
cropXY: int = Field(
0, description="Crop the XY dimension to this number; 0 means no cropping"
)
xyres: float = Field(0.1, description="XY pixel size")
zres: float = Field(0.2, description="Z step size")
zresPSF: float = Field(0.15, description="Z step size of the PSF")
wavelength: int = Field(530, description="emission wavelength in nanometers")
writeTitle: bool = Field(
False,
description="Write command line to image header "
"(may cause issues with bioformats)",
)
def to_config(self, exclude_unset=True):
lines = []
for k, v in self.dict(exclude_unset=exclude_unset).items():
if k == "k0angles":
v = ",".join(str(x) for x in v)
if isinstance(v, bool):
v = int(v)
lines.append(f'{k.replace("_", "-")}={v}')
return "\n".join(lines)
| 2.359375 | 2 |
tesseract_converters/tesseract_to_sa_converter.py | superannotateai/annotateonline-input-converters | 10 | 12771 | <reponame>superannotateai/annotateonline-input-converters
import os
import json
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help='Path to input files or folder\
with tesseract dict format.\
File name structure \
[IMAGE_NAME]___tess.json',
required=True
)
parser.add_argument(
'--output',
help='Path to output folder.\
File name structure \
[IMAGE_NAME]___objects.json'
)
parser.add_argument(
'--verbose',
default='0',
choices=['0', '1', '2'],
help="0 -- Doesn't print anything,\
1 -- Prints number of converted files,\
2 -- Prints number of converted files and unconverted files path."
)
args = parser.parse_args()
input_files_list = get_input_list(args.input)
file_name = [os.path.basename(file) for file in input_files_list]
output_files_list = []
if args.output == None:
output_files_list = get_output_list(file_name)
else:
output_files_list = get_output_list(file_name, args.output)
converter(input_files_list, output_files_list, args.verbose)
def get_input_list(pathname):
input_files_list = []
try:
if os.path.isfile(pathname):
input_files_list.append(os.path.abspath(pathname))
else:
list_files = os.listdir(pathname)
abs_path = os.path.abspath(pathname)
for file in list_files:
input_files_list.append(os.path.join(abs_path, file))
except IOError:
print("ERROR: '%s' file or folder doesn't exist!" % (pathname))
return input_files_list
def get_output_list(input_list, pathname='./output'):
if os.path.exists(pathname):
abs_path = os.path.abspath(pathname)
else:
os.makedirs(pathname)
abs_path = os.path.abspath(pathname)
output_files_list = []
for file in input_list:
output_files_list.append(
os.path.join(abs_path,
file.split("___")[0] + "___objects.json")
)
return output_files_list
def converter(input_files_list, output_files_list, verbose=0):
converted = 0
for file_in, file_out in zip(input_files_list, output_files_list):
try:
file_json = json.load(open(file_in))
output = []
for i in range(len(file_json['level'])):
if file_json["text"][i] != "" and file_json["text"][i] != " ":
dd = {
"type": "bbox",
"points":
{
"x1":
file_json["left"][i],
"y1":
file_json["top"][i],
"x2":
file_json["left"][i] +
file_json["width"][i],
"y2":
file_json["top"][i] + file_json["height"][i]
},
"className": "Text",
"classId": 2031,
"pointLabels": {
"0": file_json["text"][i]
},
"attributes": [],
"probability": 100,
"locked": False,
"visible": True,
"groupId": 0,
"imageId": 0
}
output.append(dd)
json.dump(output, open(file_out, "w"), indent=2)
converted += 1
except ValueError:
if verbose == '2':
print("WARNING: '%s' file is not json format!" % (file_in))
if int(verbose) > 0:
print(
"Converted to sa format: %d of %d" %
(converted, len(input_files_list))
)
if __name__ == '__main__':
main() | 2.75 | 3 |
fhirclient/r4models/contract_tests.py | cspears-mitre/CapStatement | 1 | 12772 | <filename>fhirclient/r4models/contract_tests.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import contract
from .fhirdate import FHIRDate
class ContractTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Contract", js["resourceType"])
return contract.Contract(js)
def testContract1(self):
inst = self.instantiate_from("pcd-example-notOrg.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract1(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract1(inst2)
def implContract1(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notOrg")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].offer.text, "Withhold this order and any results or related objects from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-from")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data from specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract2(self):
inst = self.instantiate_from("contract-example-ins-policy.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract2(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract2(inst2)
def implContract2(self, inst):
self.assertEqual(inst.applies.start.date, FHIRDate("2017-01-01").date)
self.assertEqual(inst.applies.start.as_json(), "2017-01-01")
self.assertEqual(inst.id, "INS-101")
self.assertEqual(inst.identifier[0].system, "http://xyz-insurance.com/forms")
self.assertEqual(inst.identifier[0].value, "YCSCWLN(01-2017)")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.term[0].asset[0].period[0].start.date, FHIRDate("2017-06-01").date)
self.assertEqual(inst.term[0].asset[0].period[0].start.as_json(), "2017-06-01")
self.assertEqual(inst.term[0].asset[0].subtype[0].text, "sample")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].code, "RicardianContract")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.date, FHIRDate("1995").date)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.as_json(), "1995")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].entityCodeableConcept.text, "<NAME>")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].factor, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.system, "http://somewhere.motor-vehicle.com/vin")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.value, "XXSVT34-7665t952236")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.value, 200.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].points, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].quantity.value, 1)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.value, 200.0)
self.assertEqual(inst.term[0].group[0].offer.text, "Eligible Providers")
self.assertEqual(inst.term[0].group[1].offer.text, "Responsibility for Payment")
self.assertEqual(inst.term[0].group[2].group[0].group[0].offer.text, "Emergency Room Copay")
self.assertEqual(inst.term[0].group[2].group[0].group[1].offer.text, "Professional Visit Copay")
self.assertEqual(inst.term[0].group[2].group[0].offer.text, "Copays")
self.assertEqual(inst.term[0].group[2].group[1].offer.text, "Calendar Year Deductible")
self.assertEqual(inst.term[0].group[2].group[2].offer.text, "Out-Of-Pocket Maximum")
self.assertEqual(inst.term[0].group[2].group[3].group[0].offer.text, "Ambulance Services")
self.assertEqual(inst.term[0].group[2].group[3].group[1].offer.text, "Dental Services")
self.assertEqual(inst.term[0].group[2].group[3].group[2].offer.text, "Diagnostic Services")
self.assertEqual(inst.term[0].group[2].group[3].group[3].offer.text, "Emergency Room Services")
self.assertEqual(inst.term[0].group[2].group[3].group[4].offer.text, "Hospital Inpatient Care")
self.assertEqual(inst.term[0].group[2].group[3].offer.text, "Medical Services")
self.assertEqual(inst.term[0].group[2].offer.text, "List of Benefits")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "healthinsurance")
self.assertEqual(inst.type.coding[0].display, "Health Insurance")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/contract-type")
def testContract3(self):
inst = self.instantiate_from("contract-example-42cfr-part2.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract3(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract3(inst2)
def implContract3(self, inst):
self.assertEqual(inst.applies.start.date, FHIRDate("2013-11-01T21:18:27-04:00").date)
self.assertEqual(inst.applies.start.as_json(), "2013-11-01T21:18:27-04:00")
self.assertEqual(inst.contentDerivative.coding[0].code, "registration")
self.assertEqual(inst.contentDerivative.coding[0].system, "http://terminology.hl7.org/CodeSystem/contract-content-derivative")
self.assertEqual(inst.id, "C-2121")
self.assertEqual(inst.issued.date, FHIRDate("2013-11-01T21:18:27-04:00").date)
self.assertEqual(inst.issued.as_json(), "2013-11-01T21:18:27-04:00")
self.assertEqual(inst.legal[0].contentAttachment.contentType, "application/pdf")
self.assertEqual(inst.legal[0].contentAttachment.language, "en-US")
self.assertEqual(inst.legal[0].contentAttachment.title, "MDHHS-5515 Consent To Share Your Health Information")
self.assertEqual(inst.legal[0].contentAttachment.url, "http://org.mihin.ecms/ConsentDirective-2121")
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2016-07-19T18:18:42.108-04:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2016-07-19T18:18:42.108-04:00")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.meta.versionId, "1")
self.assertEqual(inst.signer[0].signature[0].type[0].code, "1.2.840.10065.1.12.1.1")
self.assertEqual(inst.signer[0].signature[0].type[0].system, "urn:iso-astm:E1762-95:2013")
self.assertEqual(inst.signer[0].signature[0].when.date, FHIRDate("2017-02-08T10:57:34+01:00").date)
self.assertEqual(inst.signer[0].signature[0].when.as_json(), "2017-02-08T10:57:34+01:00")
self.assertEqual(inst.signer[0].type.code, "SELF")
self.assertEqual(inst.signer[0].type.system, "http://org.mdhhs.fhir.consent-signer-type")
self.assertEqual(inst.status, "executed")
self.assertEqual(inst.subType[0].coding[0].code, "hcd")
self.assertEqual(inst.subType[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentcategorycodes")
self.assertEqual(inst.term[0].action[0].intent.coding[0].code, "HPRGRP")
self.assertEqual(inst.term[0].action[0].intent.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.term[0].action[0].status.text, "Sample")
self.assertEqual(inst.term[0].action[0].subject[0].role.coding[0].code, "IR")
self.assertEqual(inst.term[0].action[0].subject[0].role.coding[0].display, "Recipient")
self.assertEqual(inst.term[0].action[0].subject[0].role.coding[0].system, "http://org.mdhhs.fhir.consent-actor-type")
self.assertEqual(inst.term[0].action[0].subject[0].role.text, "Recipient of restricted health information")
self.assertEqual(inst.term[0].action[0].subject[1].role.coding[0].code, "IS")
self.assertEqual(inst.term[0].action[0].subject[1].role.coding[0].display, "Sender")
self.assertEqual(inst.term[0].action[0].subject[1].role.coding[0].system, "http://org.mdhhs.fhir.consent-actor-type")
self.assertEqual(inst.term[0].action[0].subject[1].role.text, "Sender of restricted health information")
self.assertEqual(inst.term[0].action[0].type.coding[0].code, "action-a")
self.assertEqual(inst.term[0].action[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/contractaction")
self.assertEqual(inst.term[0].asset[0].period[0].end.date, FHIRDate("2019-11-01T21:18:27-04:00").date)
self.assertEqual(inst.term[0].asset[0].period[0].end.as_json(), "2019-11-01T21:18:27-04:00")
self.assertEqual(inst.term[0].asset[0].period[0].start.date, FHIRDate("2013-11-01T21:18:27-04:00").date)
self.assertEqual(inst.term[0].asset[0].period[0].start.as_json(), "2013-11-01T21:18:27-04:00")
self.assertEqual(inst.term[0].offer.decision.coding[0].code, "OPTIN")
self.assertEqual(inst.term[0].offer.decision.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.term[0].offer.text, "Can't refuse")
self.assertEqual(inst.term[0].offer.type.coding[0].code, "statutory")
self.assertEqual(inst.term[0].offer.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/contracttermtypecodes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "OPTIN")
self.assertEqual(inst.type.coding[0].system, "http://org.mdhhs.fhir.consentdirective-type")
self.assertEqual(inst.type.text, "Opt-in consent directive")
def testContract4(self):
inst = self.instantiate_from("pcd-example-notLabs.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract4(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract4(inst2)
def implContract4(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notLabs")
self.assertEqual(inst.issued.date, FHIRDate("2014-08-17").date)
self.assertEqual(inst.issued.as_json(), "2014-08-17")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].group[0].offer.text, "Withhold orders from any provider.")
self.assertEqual(inst.term[0].group[0].subType.coding[0].code, "ServiceRequest")
self.assertEqual(inst.term[0].group[0].subType.coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.term[0].group[0].type.coding[0].code, "withhold-object-type")
self.assertEqual(inst.term[0].group[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.term[0].group[1].offer.text, "Withhold order results from any provider.")
self.assertEqual(inst.term[0].group[1].subType.coding[0].code, "DiagnosticReport")
self.assertEqual(inst.term[0].group[1].subType.coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.term[0].group[1].type.coding[0].code, "withhold-object-type")
self.assertEqual(inst.term[0].group[1].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.term[0].offer.text, "sample")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract5(self):
inst = self.instantiate_from("pcd-example-notThem.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract5(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract5(inst2)
def implContract5(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notThem")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.signer[0].signature[0].type[0].code, "1.2.840.10065.172.16.58.3")
self.assertEqual(inst.signer[0].signature[0].type[0].system, "urn:iso-astm:E1762-95:2013")
self.assertEqual(inst.signer[0].signature[0].when.date, FHIRDate("2013-06-08T10:57:34-07:00").date)
self.assertEqual(inst.signer[0].signature[0].when.as_json(), "2013-06-08T10:57:34-07:00")
self.assertEqual(inst.signer[0].type.code, "COVPTY")
self.assertEqual(inst.signer[0].type.system, "http://terminology.hl7.org/CodeSystem/contractsignertypecodes")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].offer.text, "Withhold this order and any results or related objects from specified nurse provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-from")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data from specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract6(self):
inst = self.instantiate_from("pcd-example-notAuthor.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract6(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract6(inst2)
def implContract6(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notAuthor")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].offer.text, "Withhold all data authored by Good Health provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-authored-by")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data authored by specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract7(self):
inst = self.instantiate_from("contract-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract7(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract7(inst2)
def implContract7(self, inst):
self.assertEqual(inst.id, "C-123")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/contract")
self.assertEqual(inst.identifier[0].value, "12347")
self.assertEqual(inst.legallyBindingAttachment.contentType, "application/pdf")
self.assertEqual(inst.legallyBindingAttachment.url, "http://www.aws3.com/storage/doc.pdf")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.rule[0].contentAttachment.contentType, "application/txt")
self.assertEqual(inst.rule[0].contentAttachment.url, "http://www.rfc-editor.org/bcp/bcp13.txt")
self.assertEqual(inst.term[0].asset[0].period[0].start.date, FHIRDate("2017-06-01").date)
self.assertEqual(inst.term[0].asset[0].period[0].start.as_json(), "2017-06-01")
self.assertEqual(inst.term[0].asset[0].subtype[0].text, "sample")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].code, "RicardianContract")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.date, FHIRDate("1995").date)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.as_json(), "1995")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].entityCodeableConcept.text, "<NAME>")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].factor, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.system, "http://somewhere.motor-vehicle.com/vin")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.value, "XXSVT34-7665t952236")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.value, 200.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].points, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].quantity.value, 1)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.value, 200.0)
self.assertEqual(inst.term[0].offer.text, "Can't refuse")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the contract</div>")
self.assertEqual(inst.text.status, "generated")
def testContract8(self):
inst = self.instantiate_from("pcd-example-notThis.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract8(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract8(inst2)
def implContract8(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notThis")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].applies.start.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.term[0].applies.start.as_json(), "2015-11-18")
self.assertEqual(inst.term[0].identifier.system, "http://example.org/fhir/term-items")
self.assertEqual(inst.term[0].identifier.value, "3347689")
self.assertEqual(inst.term[0].issued.date, FHIRDate("2015-11-01").date)
self.assertEqual(inst.term[0].issued.as_json(), "2015-11-01")
self.assertEqual(inst.term[0].offer.text, "Withhold this order and any results or related objects from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-identified-object-and-related")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold the identified object and any other resources that are related to this object.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
| 2.421875 | 2 |
evaluation/evaluation.py | Ennosigaeon/xautoml | 4 | 12773 | import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind
from sklearn.preprocessing import LabelEncoder
def load_data():
questionnaire = pd.read_excel('XAutoML.xlsx')
encoder = LabelEncoder()
encoder.classes_ = np.array(['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree'])
for c in questionnaire.columns:
try:
questionnaire.loc[:, c] = questionnaire.loc[:, c].str.strip().str.lower()
questionnaire.loc[:, c] = encoder.transform(questionnaire.loc[:, c])
except (AttributeError, ValueError):
pass
questionnaire.columns = questionnaire.columns.str.strip()
requirements = pd.read_excel('task_results.ods', sheet_name='Requirements', skiprows=1)
requirements = requirements.drop(index=[24], columns=['Unnamed: 1']).T
requirements.columns = requirements.iloc[0]
requirements = requirements[1:]
tasks = pd.read_excel('task_results.ods', sheet_name=0)
tasks = tasks.dropna(axis=1, how='all').dropna(axis=0, how='all')
tasks.index = tasks.iloc[:, 0]
tasks.drop(columns=tasks.columns[:2], inplace=True)
return questionnaire, requirements, tasks
def calculate_sus(df: pd.DataFrame):
invert = [False, False, True, False, True, False, True, False, True, True]
for c, inv in zip(df.columns, invert):
if inv:
df.loc[:, c] = 4 - df.loc[:, c]
df.loc[:, c] = df.loc[:, c] * 2.5
score = df.sum(axis=1)
print('###### System Usability Score ######')
print(df.mean(axis=0))
print(score.mean(), score.std())
print('\n\n')
def print_visual_design(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Visual Design ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def print_previous_knowledge(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Previous Knowledge ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def plot_priority_distribution(df: pd.DataFrame, group=False):
def calc_user_group(value: str):
return value.strip().split('.')[0]
x = []
y = []
m = []
for col in df:
y.append(df[col].to_list())
x.append([col] * df.shape[0])
m.append(df[col].index.map(calc_user_group))
x = np.array(x).flatten()
y = 24 - np.array(y).flatten()
m = np.array(m).flatten()
data = pd.DataFrame({'x': x, 'y': y, 'role': m})
mean = data.groupby(by=['x', 'role']).mean().reset_index()
mean = pd.DataFrame({
'Domain Expert': 24 - mean.loc[mean['role'] == 'Domain Expert', 'y'].reset_index(drop=True),
'Data Scientist': 24 - mean.loc[mean['role'] == 'Data Scientist', 'y'].reset_index(drop=True),
'AutoML Researcher': 24 - mean.loc[mean['role'] == 'AutoML Researcher', 'y'].reset_index(drop=True),
'All': 24 - data.groupby('x').mean()['y'].reset_index(drop=True)
})
print('Average card rank')
for _, row in mean.iterrows():
print(f'\\({row[0]:.1f}\\)\t& \\({row[1]:.1f}\\)\t& \\({row[2]:.1f}\\)\t& \\({row[3]:.1f}\\) \\\\')
print('\n\n')
if group:
replacements = {
'#01': ['#02', '#03', '#04'],
'#05': ['#06', '#07', '#08'],
'#09': ['#10', '#11', '#12'],
'#15': ['#16'],
'#19': ['#20'],
# '#22': ['#23', '#24']
}
for key, values in replacements.items():
for value in values:
data.loc[data['x'] == value, 'x'] = key
rename = {
'#01': 'Input Data',
'#05': 'Pre-Proc. Data',
'#09': 'Feat.-Eng. Data',
'#13': 'Complete Pipeline',
'#14': 'Search Space',
'#15': 'Search Strategy',
'#17': 'Perf. Metrics',
'#18': 'Perf. Visual.',
'#19': 'Explanations',
'#21': 'View Hyperparam.',
'#22': 'Comp. Perf.',
'#23': 'Comp. Pipelines',
'#24': 'Comp. Hyperparam.'
}
else:
rename = {
'#01': 'R01 View Input',
'#02': 'R02 Desc Input',
'#03': 'R03 Input Stat',
'#04': 'R04 Plot Input',
'#05': 'R05 View Pre-Proc',
'#06': 'R06 Desc Pre-Proc',
'#07': 'R07 Pre-Proc Stat',
'#08': 'R08 Plot Pre-Proc',
'#09': 'R09 View Feat-Eng',
'#10': 'R10 Feat-Eng Stat',
'#11': 'R11 Plot Feat-Eng',
'#12': 'R12 Desc Feat-Eng',
'#13': 'R13 Complete Pipe',
'#14': 'R14 Search Space',
'#15': 'R15 Pipe Search Strat',
'#16': 'R16 HP Search Strat',
'#17': 'R17 View Perf Metrics',
'#18': 'R18 Plot Perf Visual',
'#19': 'R19 Global Expl',
'#20': 'R20 Local Expl',
'#21': 'R21 View HP',
'#22': 'R22 Comp Perf',
'#23': 'R23 Comp Pipe',
'#24': 'R24 Comp HP'
}
for old, new in rename.items():
data.loc[data['x'] == old, 'x'] = new
data.loc[data['role'] == 'AutoML Researcher', 'role'] = 'Data Scientist'
print('Difference between user groups per card')
for card in data['x'].unique():
ds = data[(data['x'] == card) & (data['role'] == 'Data Scientist')]
de = data[(data['x'] == card) & (data['role'] == 'Domain Expert')]
t = ttest_ind(ds['y'].values, de['y'].values)
if t.pvalue < 0.05:
print(f'{card} {t.pvalue:.5f}')
print('\n\n')
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
fig.tight_layout()
sns.violinplot(data=data, x='x', y='y', hue='role', split=True, palette='pastel', ax=ax)
sns.despine(left=True)
ax.set_ylim(0, 24)
ax.set_yticklabels([])
ax.set_ylabel(None)
ax.set_xlabel(None)
box = ax.get_position()
if group:
plt.xticks(rotation=15)
fig.text(0.0125, 0.2, 'least important', rotation=90, va='bottom')
fig.text(0.0125, 0.95, 'most important', rotation=90, va='top')
ax.set_position([box.x0, box.y0 + box.height * 0.125, box.width, box.height * 0.875])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
else:
plt.xticks(rotation=25, ha='right', rotation_mode='anchor')
fig.text(0.025, 0.225, 'least important', rotation=90, va='bottom')
fig.text(0.025, 0.91, 'most important', rotation=90, va='top')
ax.set_position([box.x0 + 0.015, box.y0 + box.height * 0.15, box.width, box.height * 0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.13), ncol=2)
fig.show()
fig.savefig('requirement_cards.pdf')
def calculate_trust_result(text_df: pd.DataFrame, vis_df: pd.DataFrame):
def cohen_d(x: pd.Series, y: pd.Series):
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (x.mean() - y.mean()) / math.sqrt(((nx - 1) * x.std() ** 2 + (ny - 1) * y.std() ** 2) / dof)
vis_df.columns = text_df.columns
print('###### Trust ######')
for col in text_df:
if col == 'Role':
continue
text = text_df.loc[:, col]
vis = vis_df.loc[:, col]
t = ttest_ind(text.values, vis.values, alternative='less')
print(
f'{col}, \({text.mean() + 1:.2f} \pm {text.std():.2f}\), \({vis.mean() + 1:.2f} \pm {vis.std():.2f}\), \(p = {t.pvalue:.2e}\), \(d = {cohen_d(text, vis):.2f}\)')
text_de, vis_de = text_df[text_df['Role'] == 'domain expert'], vis_df[vis_df['Role'] == 'domain expert']
text_ar, vis_ar = text_df[text_df['Role'] == 'automl researcher'], vis_df[vis_df['Role'] == 'automl researcher']
text_ds, vis_ds = text_df[text_df['Role'] == 'data scientist'], vis_df[vis_df['Role'] == 'data scientist']
for col in text_df:
if col == 'Role':
continue
print(
f'\\({text_de[col].mean() + 1:.2f}\\)\t& \\({text_ds[col].mean() + 1:.2f}\\)\t& \\({text_ar[col].mean() + 1:.2f}\\)\t& \\({text_df[col].mean() + 1:.2f}\\) \\\\')
print(
f'\\({vis_de[col].mean() + 1:.2f}\\)\t& \\({vis_ds[col].mean() + 1:.2f}\\)\t& \\({vis_ar[col].mean() + 1:.2f}\\)\t& \\({vis_df[col].mean() + 1:.2f}\\) \\\\')
print('\n\n')
def calculate_task_success(df: pd.DataFrame):
encoder = LabelEncoder()
encoder.classes_ = np.array(['n', 'y'])
for c in df.columns:
df.loc[:, c] = encoder.transform(df.loc[:, c])
with pd.option_context('display.precision', 0):
print('Task success percentage')
print(df.mean(axis=1) * 100)
print(df.mean().mean() * 100)
print('\n\n')
def index(df: pd.DataFrame, slice_) -> pd.DataFrame:
df2 = df.iloc[:, slice_]
df2['Role'] = df['Role']
return df2
questionnaire, requirements, tasks = load_data()
print_visual_design(index(questionnaire, slice(27, 32)))
print_previous_knowledge(index(questionnaire, slice(6, 11)))
calculate_sus(index(questionnaire, slice(32, 42)))
plot_priority_distribution(requirements)
calculate_task_success(tasks)
calculate_trust_result(index(questionnaire, slice(14, 20)), index(questionnaire, slice(20, 26)))
print('Correlation ML expertise and understanding of ML model')
print(questionnaire.iloc[:, [6, 15]].corr())
| 3.03125 | 3 |
matplotlib/tutorials_python/colors/colors.py | gottaegbert/penter | 13 | 12774 | """
*****************
Specifying Colors
*****************
Matplotlib recognizes the following formats to specify a color:
* an RGB or RGBA (red, green, blue, alpha) tuple of float values in closed
interval ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)`` or ``(0.1, 0.2, 0.5, 0.3)``);
* a hex RGB or RGBA string (e.g., ``'#0f0f0f'`` or ``'#0f0f0f80'``;
case-insensitive);
* a shorthand hex RGB or RGBA string, equivalent to the hex RGB or RGBA
string obtained by duplicating each character, (e.g., ``'#abc'``, equivalent
to ``'#aabbcc'``, or ``'#abcd'``, equivalent to ``'#aabbccdd'``;
case-insensitive);
* a string representation of a float value in ``[0, 1]`` inclusive for gray
level (e.g., ``'0.5'``);
* one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``, they are the single
character short-hand notations for blue, green, red, cyan, magenta, yellow,
black, and white.
* a X11/CSS4 color name (case-insensitive);
* a name from the `xkcd color survey`_, prefixed with ``'xkcd:'`` (e.g.,
``'xkcd:sky blue'``; case insensitive);
* one of the Tableau Colors from the 'T10' categorical palette (the default
color cycle): ``{'tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'}``
(case-insensitive);
* a "CN" color spec, i.e. ``'C'`` followed by a number, which is an index into
the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``); the
indexing is intended to occur at rendering time, and defaults to black if the
cycle does not include color.
.. _xkcd color survey: https://xkcd.com/color/rgb/
"Red", "Green", and "Blue" are the intensities of those colors, the combination
of which span the colorspace.
How "Alpha" behaves depends on the ``zorder`` of the Artist. Higher
``zorder`` Artists are drawn on top of lower Artists, and "Alpha" determines
whether the lower artist is covered by the higher.
If the old RGB of a pixel is ``RGBold`` and the RGB of the
pixel of the Artist being added is ``RGBnew`` with Alpha ``alpha``,
then the RGB of the pixel is updated to:
``RGB = RGBOld * (1 - Alpha) + RGBnew * Alpha``. Alpha
of 1 means the old color is completely covered by the new Artist, Alpha of 0
means that pixel of the Artist is transparent.
For more information on colors in matplotlib see
* the :doc:`/gallery/color/color_demo` example;
* the `matplotlib.colors` API;
* the :doc:`/gallery/color/named_colors` example.
"CN" color selection
--------------------
"CN" colors are converted to RGBA as soon as the artist is created. For
example,
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
th = np.linspace(0, 2*np.pi, 128)
def demo(sty):
mpl.style.use(sty)
fig, ax = plt.subplots(figsize=(3, 3))
ax.set_title('style: {!r}'.format(sty), color='C0')
ax.plot(th, np.cos(th), 'C1', label='C1')
ax.plot(th, np.sin(th), 'C2', label='C2')
ax.legend()
demo('default')
demo('seaborn')
###############################################################################
# will use the first color for the title and then plot using the second
# and third colors of each style's ``mpl.rcParams['axes.prop_cycle']``.
#
#
# .. _xkcd-colors:
#
# xkcd v X11/CSS4
# ---------------
#
# The xkcd colors are derived from a user survey conducted by the
# webcomic xkcd. `Details of the survey are available on the xkcd blog
# <https://blog.xkcd.com/2010/05/03/color-survey-results/>`__.
#
# Out of 148 colors in the CSS color list, there are 95 name collisions
# between the X11/CSS4 names and the xkcd names, all but 3 of which have
# different hex values. For example ``'blue'`` maps to ``'#0000FF'``
# where as ``'xkcd:blue'`` maps to ``'#0343DF'``. Due to these name
# collisions all of the xkcd colors have ``'xkcd:'`` prefixed. As noted in
# the blog post, while it might be interesting to re-define the X11/CSS4 names
# based on such a survey, we do not do so unilaterally.
#
# The name collisions are shown in the table below; the color names
# where the hex values agree are shown in bold.
import matplotlib._color_data as mcd
import matplotlib.patches as mpatch
overlap = {name for name in mcd.CSS4_COLORS
if "xkcd:" + name in mcd.XKCD_COLORS}
fig = plt.figure(figsize=[4.8, 16])
ax = fig.add_axes([0, 0, 1, 1])
for j, n in enumerate(sorted(overlap, reverse=True)):
weight = None
cn = mcd.CSS4_COLORS[n]
xkcd = mcd.XKCD_COLORS["xkcd:" + n].upper()
if cn == xkcd:
weight = 'bold'
r1 = mpatch.Rectangle((0, j), 1, 1, color=cn)
r2 = mpatch.Rectangle((1, j), 1, 1, color=xkcd)
txt = ax.text(2, j+.5, ' ' + n, va='center', fontsize=10,
weight=weight)
ax.add_patch(r1)
ax.add_patch(r2)
ax.axhline(j, color='k')
ax.text(.5, j + 1.5, 'X11', ha='center', va='center')
ax.text(1.5, j + 1.5, 'xkcd', ha='center', va='center')
ax.set_xlim(0, 3)
ax.set_ylim(0, j + 2)
ax.axis('off')
| 3.21875 | 3 |
12-listComprehensions.py | pgiardiniere/notes-WhirlwindTourOfPython | 0 | 12775 | # List Comprehensions
#########################
### Basic List Comprehensions
#########################
# allow us to circumvent constructing lists with for loops
l = [] # The Old Way
for n in range(12):
l.append(n**2)
[n ** 2 for n in range(12)] # Comprehension way
# General Syntax:
# [ `expr` for `var` in `iterable` ]
### Multiple iteration --- use tuples!
[(i, j) for i in range(2) for j in range(3)]
### Conditionals on the Iterator
[i for i in range(20) if i % 3 > 0] #S={i|0<=i<20, 3!|i, i∈I}
l = [] # equivalent old-school construction:
for val in range(20):
if val % 3:
l.append(val)
### Conditionals on the Value
# C code :: single-line conditional operator ?
# int absval = (val < 0) ? -val : val
# Python code :: single-line conditional operator if-else
val = -10
val if val >= 0 else -val
# if 3 !| val -> val in list.
# if 2 | val -> -val.
[val if val % 2 else -val
for val in range(20) if val % 3]
#########################
### Other comprehensions
#########################
{ n**2 for n in range(12) } # Set comprehension
{ n:n**2 for n in range(12) } # Dict comprehension
{ a % 3 for a in range(1000) } # a = {0, 1, 2}
# GENERATOR EXPRESSION ---- see next chapter for deets
( n**2 for n in range(12) ) | 4.09375 | 4 |
src/chemical_roles/export/cli.py | bgyori/chemical-roles | 5 | 12776 | # -*- coding: utf-8 -*-
"""CLI for Chemical Roles exporters."""
import os
import click
from ..constants import DATA
@click.group()
def export():
"""Export the database."""
@export.command(name='all')
@click.pass_context
def export_all(ctx):
"""Export all."""
ctx.invoke(summary)
ctx.invoke(obo)
ctx.invoke(bel)
ctx.invoke(indra)
directory_option = click.option('--directory', default=DATA)
@export.command()
def summary():
"""Rewrite readme and generate new export."""
from .build import rewrite_repo_readme, write_export
import seaborn as sns
sns.set(font_scale=1.3, style='whitegrid')
rewrite_repo_readme()
write_export()
@export.command()
@directory_option
def bel(directory):
"""Write BEL export."""
import pybel
from .bel import get_bel
graph = get_bel()
pybel.dump(graph, os.path.join(directory, 'crog.bel.nodelink.json.gz'))
@export.command()
@directory_option
def indra(directory):
"""Write INDRA export."""
import pybel
from .bel import get_bel
graph = get_bel(use_inferred=False, add_evidence=False)
pybel.to_indra_statements_json_file(graph, os.path.join(directory, 'crog.indra.json'), sort_keys=True)
@export.command()
@directory_option
def obo(directory):
"""Write OBO export."""
from .obo import get_obo
o = get_obo()
o.write_obo(os.path.join(directory, 'crog.obo'))
o.write_obonet_gz(os.path.join(directory, 'crog.obonet.json.gz'))
if __name__ == '__main__':
export()
| 2.09375 | 2 |
gdsfactory/geometry/write_drc.py | jorgepadilla19/gdsfactory | 42 | 12777 | <filename>gdsfactory/geometry/write_drc.py
"""Write DRC rule decks in klayout.
TODO:
- add min area
- define derived layers (composed rules)
"""
import pathlib
from dataclasses import asdict, is_dataclass
from typing import List, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from gdsfactory.config import logger
from gdsfactory.install import get_klayout_path
from gdsfactory.types import Dict, Layer, PathType
layer_name_to_min_width: Dict[str, float]
RuleType = Literal[
"width",
"space",
"enclosing",
]
def rule_width(value: float, layer: str, angle_limit: float = 90) -> str:
"""Min feature size"""
category = "width"
error = f"{layer} {category} {value}um"
return (
f"{layer}.{category}({value}, angle_limit({angle_limit}))"
f".output('{error}', '{error}')"
)
def rule_space(value: float, layer: str, angle_limit: float = 90) -> str:
"""Min Space between shapes of layer"""
category = "space"
error = f"{layer} {category} {value}um"
return (
f"{layer}.{category}({value}, angle_limit({angle_limit}))"
f".output('{error}', '{error}')"
)
def rule_separation(value: float, layer1: str, layer2: str):
"""Min space between different layers"""
error = f"min {layer1} {layer2} separation {value}um"
return f"{layer1}.separation({layer2}, {value})" f".output('{error}', '{error}')"
def rule_enclosing(
value: float, layer1: str, layer2: str, angle_limit: float = 90
) -> str:
"""Layer1 must be enclosed by layer2 by value.
checks if layer1 encloses (is bigger than) layer2 by value
"""
error = f"{layer1} enclosing {layer2} by {value}um"
return (
f"{layer1}.enclosing({layer2}, angle_limit({angle_limit}), {value})"
f".output('{error}', '{error}')"
)
def write_layer_definition(layer_map: Dict[str, Layer]) -> str:
"""Returns layer_map definition script for klayout
Args:
layer_map: can be dict or dataclass
"""
layer_map = asdict(layer_map) if is_dataclass(layer_map) else layer_map
return [
f"{key} = input({value[0]}, {value[1]})" for key, value in layer_map.items()
]
def write_drc_deck(rules: List[str], layer_map: Dict[str, Layer]) -> str:
"""Returns drc_rule_deck for klayou
Args:
rules: list of rules
layer_map: layer definitions can be dict or dataclass
"""
script = []
script += write_layer_definition(layer_map=layer_map)
script += ["\n"]
script += rules
return "\n".join(script)
def write_drc_deck_macro(
name="generic",
filepath: Optional[PathType] = None,
shortcut: str = "Ctrl+Shift+D",
**kwargs,
) -> str:
"""Write script for klayout rule deck
Args:
name: drc rule deck name
filepath: Optional macro path (defaults to .klayout/drc/name.lydrc)
Keyword Args:
rules: list of rules
layer_map: layer definitions can be dict or dataclass
Keyword Args:
rules: list of rules
layer_map: layer definitions can be dict or dataclass
"""
script = f"""<?xml version="1.0" encoding="utf-8"?>
<klayout-macro>
<description>{name} DRC</description>
<version/>
<category>drc</category>
<prolog/>
<epilog/>
<doc/>
<autorun>false</autorun>
<autorun-early>false</autorun-early>
<shortcut>{shortcut}</shortcut>
<show-in-menu>true</show-in-menu>
<group-name>drc_scripts</group-name>
<menu-path>tools_menu.drc.end</menu-path>
<interpreter>dsl</interpreter>
<dsl-interpreter-name>drc-dsl-xml</dsl-interpreter-name>
<text># {name} DRC
# Read about DRC scripts in the User Manual under "Design Rule Check (DRC)"
# Based on SOEN pdk https://github.com/usnistgov/SOEN-PDK/tree/master/tech/OLMAC
# http://klayout.de/doc/manual/drc_basic.html
report("generic DRC")
tiles(100)
tile_borders(2)
threads(3)
"""
script += write_drc_deck(**kwargs)
script += """
</text>
</klayout-macro>
"""
filepath = filepath or get_klayout_path() / "drc" / f"{name}.lydrc"
filepath = pathlib.Path(filepath)
filepath.write_text(script)
logger.info(f"Wrote DRC deck to {filepath}")
return script
if __name__ == "__main__":
import gdsfactory as gf
rules = [
rule_width(layer="WG", value=0.2),
rule_space(layer="WG", value=0.2),
rule_width(layer="M1", value=1),
rule_width(layer="M2", value=2),
rule_space(layer="M2", value=2),
rule_separation(layer1="HEATER", layer2="M1", value=1.0),
rule_enclosing(layer1="M1", layer2="VIAC", value=0.2),
]
drc_rule_deck = write_drc_deck_macro(rules=rules, layer_map=gf.LAYER)
print(drc_rule_deck)
| 2.609375 | 3 |
virtus/core/migrations/0004_auto_20180417_1625.py | eltonjncorreia/gerenciar-dados-virtus | 0 | 12778 | # Generated by Django 2.0.4 on 2018-04-17 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180417_1613'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.IntegerField(verbose_name='codigo')),
('descricao', models.CharField(max_length=255, verbose_name='descricao')),
('valor', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='valor')),
('unitario', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Unitário')),
('quantidade', models.IntegerField(verbose_name='quantidade')),
],
options={
'verbose_name': 'Item',
'verbose_name_plural': 'Itens',
'ordering': ['codigo'],
},
),
migrations.AlterModelOptions(
name='cliente',
options={'ordering': ['nome'], 'verbose_name': 'Cliente', 'verbose_name_plural': 'Clientes'},
),
migrations.AlterModelOptions(
name='endereco',
options={'ordering': ['tipo'], 'verbose_name': 'Endereço', 'verbose_name_plural': 'Endereços'},
),
migrations.AlterModelOptions(
name='pedido',
options={'ordering': ['numero'], 'verbose_name': 'Pedido', 'verbose_name_plural': 'Pedidos'},
),
]
| 1.820313 | 2 |
image_demo.py | a888999a/yolov3fusion1 | 7 | 12779 | <filename>image_demo.py<gh_stars>1-10
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : image_demo.py
# Author : YunYang1994
# Created date: 2019-01-20 16:06:06
# Description :
#
#================================================================
import cv2
import numpy as np
import core.utils as utils
import tensorflow as tf
from PIL import Image
return_elements = ["input/input_rgb:0","input/input_lwir:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"]
pb_file = "./yolov3_coco.pb"
image_path_rgb = r"C:\Users\gary\Desktop\b09\test\JPEGImages\rgb\set06_V000_I00019.jpg"
image_path_lwir = r"C:\Users\gary\Desktop\b09\test\JPEGImages\lwir\set06_V000_I00019.jpg"
num_classes = 1
input_size = 416
graph = tf.Graph()
original_rgb = cv2.imread(image_path_rgb)
original_lwir = cv2.imread(image_path_lwir)
original_image_rgb = cv2.cvtColor(original_rgb, cv2.COLOR_BGR2RGB)
original_image_lwir = cv2.cvtColor(original_lwir, cv2.COLOR_BGR2RGB)
original_image_size = original_image_rgb.shape[:2]
image_rgb,image_lwir = utils.image_preporcess(np.copy(original_image_rgb),np.copy(original_image_lwir), [input_size, input_size])
image_rgb = image_rgb[np.newaxis, ...]
image_lwir = image_lwir[np.newaxis, ...]
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
with tf.Session(graph=graph) as sess:
pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
[return_tensors[2], return_tensors[3], return_tensors[4]],
feed_dict={ return_tensors[0]: image_rgb,return_tensors[1]: image_lwir})
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
np.reshape(pred_mbbox, (-1, 5 + num_classes)),
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
bboxes = utils.nms(bboxes, 0.45, method='nms')
image = utils.draw_bbox(original_image_rgb, bboxes)
image = Image.fromarray(image)
image.show()
| 2.234375 | 2 |
Sensor/main.py | mahsahadian/EdgeBenchmarkTool | 0 | 12780 | <gh_stars>0
import cv2
from datetime import *
import time
import logging
import base64
import sys
import os
import shutil
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
import datetime
import sys
import re
from typing import NamedTuple
import json
from dotenv import load_dotenv
load_dotenv("sensor-variables.env")
log = logging.getLogger()
log.setLevel('DEBUG')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
print('Hello 1')
def on_connect(client, userdata, flags, rc):
""" The callback for when the client receives a CONNACK response from the server."""
print('Connected with result code ' + str(rc))
client.subscribe('topic')
# The callback for when a PUBLISH message is received from the server.
def save_influx(json_body, body):
print(" Saving data of : ", sys.getsizeof(str(body)), ' bytes')
influx_client.write_points(json_body)
def on_message(client, userdata, msg):
#current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
timestamp = str(int(time.time()))
#print(msg.topic + ' ' + str(msg.payload))
#sensor_data = _parse_mqtt_message(msg.topic, msg.payload.decode('utf-8'))
#if sensor_data is not None:
# _send_sensor_data_to_influxdb(sensor_data)
print("a")
#splits_ = str(msg.payload).split('XXX')
#splits_ = str(msg.payload).split('XXX')
#for i in range(len(splits_)):
json_body = [
{
"measurement": "t_1_4",
"tags": {
"camera_id": camera_id,
},
#"time": timestamp,
"transmitdelay":transmitdelay,
"JPGQuality":JPGQuality,
"fields": {
"value": str(msg.payload) #str(msg.payload)
}
}
]
save_influx(json_body, str(msg.payload))
#print(msg.topic, str(msg.payload))
#thinktime or sleep aftersending
client.loop_stop() # Stop loop
client.disconnect() # disconnect
#if splits_[i] == 'refresh':
#client.reinitialise()
#camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout')
#camera.processVideoStream()
#time.sleep(1)
#val = splits_[1].replace('"', '')
#print('recieved id: ', val)
#if int(val) == 2222:
# camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout')
# camera.processVideoStream()
def _init_influxdb_database():
databases = influx_client.get_list_database()
if len(list(filter(lambda x: x['name'] == INFLUXDB_DATABASE, databases))) == 0:
influx_client.create_database(INFLUXDB_DATABASE)
influx_client.switch_database(INFLUXDB_DATABASE)
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
class Camera():
def __init__(self,camera_id,destination_cluster_ip,JPGQuality,transmitdelay, folder):
self.camera_id = camera_id
self.destination_cluster_ip = destination_cluster_ip
self.JPGQuality = JPGQuality
self.transmitdelay = transmitdelay
start = time.time()
self.folder = folder
def cleanup(self):
folder = './imagesout'
for the_file in os.listdir ('./imagesout'):
file_path = os.path.join ('./imagesout', the_file)
try:
if os.path.isfile (file_path):
os.unlink (file_path)
# elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print (e)
def processVideoStream(self, thread=0):
vidcap = cv2.VideoCapture('black.mp4')
success, image = vidcap.read ()
count = 0
success = True
day_date= date.today()
start = time.time ()
#i = self.JPGQuality
print('JPGQuality:', self.JPGQuality)
list_image_base64 = []
list_image_base64_str = ''
image_base64_last = ''
while success:
#for i in range(9):
#self.JPGQuality = i + 1
cv2.imwrite("./imagesout/frame%d.jpg" % count, image, [int(cv2.IMWRITE_JPEG_QUALITY), self.JPGQuality]) # save frame as JPEG file
imageFileNameandPath = ("./imagesout/frame%d.jpg" % count)
image_base64 = self.convertToBase64(imageFileNameandPath)
success, image = vidcap.read ()
print ('Read a new frame: ', success, ' thread number:', thread)
timestamp = str(int(time.time()))
frame_id = timestamp+str(count)
end = time.time()
runtime_seconds = end - start
data = {'camera_id':str(self.camera_id), 'frame_id':str(frame_id), 'timestamp':timestamp, 'duration':str(int(runtime_seconds)) }
#self.cassandraclient.saveToCassandra(self.camera_id, frame_id, timestamp,day_date ,image_base64)
#self.kafkaclient.saveToKafka(self.camera_id, frame_id, timestamp, day_date, image_base64)
#list_image_base64.append(str(image_base64))
list_image_base64_str += str(image_base64)+'XXX'
image_base64_last = str(image_base64)
cname = "Client" + str(count)
client = mqtt.Client(cname)
client.on_connect = on_connect
client.on_message = on_message
client.connect(os.getenv('MQTT_SERVER_IP'), os.getenv('MQTT_SERVER_PORT'), 60)
client.subscribe("topic", qos=1)
client.publish(topic="topic", payload=str(image_base64), qos=1, retain=False)
#client.loop_forever()
client.loop_start()
time.sleep(1)
#list_image_base64_str = ''
#print(count)
count += 1
print('Experiment Runtime (seconds): ' + str(int(runtime_seconds)))
print('Images written per (second): ' + str(count/runtime_seconds))
self.cleanup()
def convertToBase64(self,fileNameandPath):
with open(fileNameandPath, "rb") as imageFile:
str = base64.b64encode(imageFile.read())
return str
camera_id = os.getenv('CAMERA_ID') # sys.argv[1] # 123
destination_cluster_ip = os.getenv('DESTINATION_CLUSTER_IP') #sys.argv[2] # '172.16.17.32'
JPGQuality = os.getenv('JPGQUALITY')#int(sys.argv[3] ) # 20
transmitdelay = os.getenv('TRANSMITDELAY') # int(sys.argv[4]) # 10
check_looping = 0
INFLUXDB_DATABASE = os.getenv('INFLUXDB_DATABASE_NAME')
influx_client = InfluxDBClient(os.getenv('INFLUXDB_DATABASE_IP'), os.getenv('INFLUXDB_DATABASE_PORT'), database=INFLUXDB_DATABASE)
_init_influxdb_database()
#while True:
camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout')
camera.processVideoStream()
| 2.640625 | 3 |
oxide/plugins/other/StartupItems.py | john-clark/rust-oxide-umod | 13 | 12781 | <reponame>john-clark/rust-oxide-umod<gh_stars>10-100
# Note:
# I add an underscore at the biginning of the variable name for example: "_variable" to prevent
# conflicts with build-in variables from Oxide.
# Use to manage the player's inventory.
import ItemManager
# Use to get player's information.
import BasePlayer
# The plug-in name should be the same as the class name and file name.
class StartupItems:
# Always start with a constructor.
def __init__(self):
# All the variables listed below are recommended for the plug-in and developer informaton.
self.Title = 'StartupItems'
self.Description = 'Set default items when player respawn after dead.'
self.Author = 'RedNinja1337'
self.Version = V(1, 0, 5)
self.Url = 'http://oxidemod.org/plugins/startupitems.1323/'
self.ResourceId = 1323
# Create the configuration file if it does not exists.
def LoadDefaultConfig(self):
# Add some demo data as an example on the configuration file.
self.Config['GroupItems'] = ({
'admin':({'item_shortname':'attire.hide.boots', 'Amount':1, 'Container':'Wear'},
{'item_shortname':'attire.hide.pants', 'Amount':1, 'Container':'Wear'},
{'item_shortname':'rock', 'Amount':1, 'Container':'Belt'},
{'item_shortname':'bow.hunting', 'Amount':1, 'Container':'Belt'},
{'item_shortname':'arrow.hv', 'Amount':25, 'Container':'Main'},),
'moderator':({},),
'player':({},)
})
# Called from BasePlayer.Respawn.
# Called when the player spawns (specifically when they click the "Respawn" button).
# ONLY called after the player has transitioned from dead to not-dead, so not when they're waking up.
def OnPlayerRespawned(self, BasePlayer):
# Check if there is any group set on the configuration file.
if self.Config['GroupItems']:
# If at least one group is found on the configuration file then set the variable "_GroupItems" equals the group's dictionary.
_GroupItems = self.Config['GroupItems']
# Set the variable "_Group" equals the list of groups the player belogs to. By default all players belog to the group "player".
_Group = permission.GetUserGroups(BasePlayer.userID.ToString())
# Set the variable "_SetGroup" equals the last group the user was added from Oxide.Group. By default all players belog to the group "player".
_SetGroup = _GroupItems.get(_Group[-1])
# Check if the group exists in the config file.
if _SetGroup:
try: # Catch the "KeyNotFoundException" error if "Container", "item_shortname" or "Amount" is not found on the config file.
if _SetGroup[0]['Container'] and _SetGroup[0]['item_shortname'] and _SetGroup[0]['Amount']:
# Set the variable "inv" equals the player's inventory.
inv = BasePlayer.inventory
# Empty the player's inventory.
inv.Strip()
# Iterate through the list of items for the specify group from the configuration file.
for item in _SetGroup:
# Add the items set on the configuration file to each container on the player's inventory.
if item['Container'].lower() == 'main':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerMain)
elif item['Container'].lower() == 'belt':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerBelt)
elif item['Container'].lower() == 'wear':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerWear)
else: return
else: print False
# Catch the "KeyNotFoundException" error if "Container", "item_shortname" or "Amount" is not found on the config file.
except KeyError: return
else: return
else: return
| 2.53125 | 3 |
error_handler.py | jrg1381/sm_asr_console | 2 | 12782 | # encoding: utf-8
""" Parameterized decorator for catching errors and displaying them in an error popup """
from enum import Enum
import npyscreen
class DialogType(Enum):
"""
Enum defining the type of dialog.
CONFIRM - the dialog waits until the user clicks OK
BRIEF - the dialog appears for a few seconds and then vanishes
"""
CONFIRM = npyscreen.notify_confirm
BRIEF = npyscreen.notify_wait
# PythonDecorators/decorator_function_with_arguments.py
def error_handler(title, dialog_type=DialogType.CONFIRM):
"""
Decorator for functions to catch their exceptions and display them in an error popup
:param title The title of the error pop-up
:param dialog_type A DialogType enum
"""
def wrap(original_function):
def wrapped_f(*args):
try:
return original_function(*args)
except Exception as ex: # pylint: disable=broad-except
dialog_type(str(ex), title)
return None
return wrapped_f
return wrap
| 3.1875 | 3 |
parlai/tasks/taskmaster2/agents.py | min942773/parlai_wandb | 2 | 12783 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Taskmaster-2 implementation for ParlAI.
No official train/valid/test splits are available as of 2020-05-18, so we make our own
splits.
"""
import os
import pandas as pd
import hashlib
from collections import Counter
from parlai.core.opt import Opt
from parlai.core.teachers import DialogTeacher
from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric
from parlai.utils.misc import warn_once
import json
import parlai.utils.logging as logging
from typing import Optional, Tuple
from parlai.core.message import Message
from parlai.utils.io import PathManager
import parlai.tasks.taskmaster2.build as build_
DOMAINS = [
'flights',
'food-ordering',
'hotels',
'movies',
'restaurant-search',
'sports',
'music',
]
ONTO_TOKEN = "Onto:"
CALL_TOKEN = "Call:"
RESP_TOKEN = "Result:"
class _Abstract(DialogTeacher):
"""
Abstract data loader.
"""
@classmethod
def add_cmdline_args(cls, argparser):
argparser.add_argument('--include-ontology', type=bool, default=False)
argparser.add_argument(
'--domains',
nargs='+',
default=DOMAINS,
choices=DOMAINS,
help='Uses last passed in configuration.',
)
return argparser
def __init__(self, opt: Opt, shared=None):
self.fold = opt['datatype'].split(':')[0]
opt['datafile'] = self.fold
self.dpath = os.path.join(opt['datapath'], 'taskmaster-2')
if shared is None:
warn_once(
"Taskmaster2 is a beta dataset, and format may significantly change."
)
build_.build(opt)
super().__init__(opt, shared)
def _h(self, x):
"""
Hash function.
"""
h = int(hashlib.sha1(x.encode('utf-8')).hexdigest(), 16) % 10
if h == 0:
return 'valid'
elif h == 1:
return 'test'
else:
return 'train'
def _normalize_annotation(self, anno):
return anno
def _load_data(self, fold, domains):
# load up the ontology
ontology = {}
for section in domains:
parts = []
fn = os.path.join(self.dpath, section + '.onto.json')
with PathManager.open(fn, 'r') as f:
o = json.load(f)
assert len(o) == 1
o = list(o.values())[0]
for sub in o:
prefix = sub['prefix']
parts += [
self._normalize_annotation(f'{prefix}.{a}')
for a in sub['annotations']
]
ontology[section] = ' ; '.join(parts)
chunks = []
for section in domains:
with PathManager.open(os.path.join(self.dpath, section + '.json')) as f:
subset = pd.read_json(f)
subset['domain'] = section
chunks.append(subset)
chunks = pd.concat(chunks, axis=0)
# shuffle deterministically for randomness in few-shot training
chunks = chunks.sample(frac=1.0, random_state=42)
chunks['fold'] = self._label_fold(chunks)
# only the fold we need here
chunks = chunks[chunks.fold == fold].reset_index()
chunks['ontology'] = chunks['domain'].apply(ontology.get)
return chunks
def _segments2text(self, segments):
output = []
slots = {}
for segment in segments:
val = segment['text']
for anno_ in segment['annotations']:
anno = anno_['name']
anno = self._normalize_annotation(anno)
output.append(f'{anno} = {val}')
slots[anno] = val
return " ; ".join(output), slots
def custom_evaluation(
self,
teacher_action: Message,
labels: Optional[Tuple[str]],
model_response: Message,
):
if 'metrics' in model_response and 'type' in teacher_action:
# keep copies of metrics across both api calls/responses
prefix = teacher_action['type']
keys = list(model_response['metrics'].keys())
for k in keys:
self.metrics.add(f'{prefix}_{k}', model_response['metrics'][k])
if 'text' not in model_response or not labels or 'type' not in teacher_action:
return
domain = teacher_action['domain']
if teacher_action['type'] == 'apicall':
# also count slot accuracy
text = model_response['text']
slot_guesses = set(
text.replace(CALL_TOKEN + " ", "").split(' ; ')
) # prevent cheating via repeated guesses
correct = 0
for slot_guess in slot_guesses:
if ' = ' not in slot_guess:
continue
try:
slot, guess = slot_guess.split(' = ')
except ValueError:
continue
if teacher_action['slots'].get(slot) == guess:
self.metrics.add('slot_p', AverageMetric(1))
self.metrics.add(f'{domain}_slot_p', AverageMetric(1))
correct += 1
else:
self.metrics.add('slot_p', AverageMetric(0))
self.metrics.add(f'{domain}_slot_p', AverageMetric(0))
logging.debug(
f"Bad slot guess '{slot_guess}' != {teacher_action['slots']}"
)
if teacher_action['slots']:
self.metrics.add(
'slot_r', AverageMetric(correct, len(teacher_action['slots']))
)
self.metrics.add(
f'{domain}_slot_r',
AverageMetric(correct, len(teacher_action['slots'])),
)
self.metrics.add(
'jga', AverageMetric(correct == len(teacher_action['slots']))
)
elif teacher_action['type'] == 'apiresp':
# keep track of statistics by domain
f1_metric = F1Metric.compute(model_response['text'], labels)
bleu_metric = BleuMetric.compute(model_response['text'], labels)
self.metrics.add(f'{domain}_lex_f1', f1_metric)
self.metrics.add(f'{domain}_lex_bleu', bleu_metric)
delex_text = model_response['text']
delex_label = labels[0]
# compute delexicalized string metrics
for slot, value in teacher_action['slots'].items():
delex_text = delex_text.replace(value, slot)
delex_label = delex_label.replace(value, slot)
f1_metric = F1Metric.compute(delex_text, (delex_label,))
self.metrics.add('delex_f1', f1_metric)
self.metrics.add(f'{domain}_delex_f1', f1_metric)
bleu_metric = BleuMetric.compute(delex_text, [delex_label])
self.metrics.add('delex_bleu', bleu_metric)
self.metrics.add(f'{domain}_delex_bleu', bleu_metric)
def setup_data(self, fold):
domains = self.opt.get('domains', DOMAINS)
chunks = self._load_data(fold, domains)
domains_cnt = Counter()
for _, row in chunks.iterrows():
domains_cnt[row['domain']] += 1
first = True
utterances = row['utterances'][:]
if (
len(utterances) >= 3
and utterances[0]['speaker'] == 'USER'
and utterances[1]['speaker'] == 'ASSISTANT'
and utterances[2]['speaker'] == 'ASSISTANT'
and "help you?" in utterances[1]['text']
):
# skip this one
utterances.pop(1)
if self.opt['include_ontology']:
yield {'text': f"{ONTO_TOKEN} {row['ontology']}", 'label': ''}, True
first = False
while utterances:
utt = utterances.pop(0)
segtxt, slots = self._segments2text(utt.get('segments', []))
if utt['speaker'] == 'USER':
yield {
'text': utt['text'],
'label': f'{CALL_TOKEN} {segtxt}',
'domain': row['domain'],
'slots': slots,
'type': 'apicall',
}, first
first = False
elif utt['speaker'] == 'ASSISTANT':
yield {
'text': f'{RESP_TOKEN} {segtxt}',
'label': utt['text'],
'domain': row['domain'],
'slots': slots,
'type': 'apiresp',
}, first
first = False
logging.debug(f"Fold {fold} domains: {domains_cnt}")
class DelexTeacher(_Abstract):
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
def _delexicalize(self, text, slots):
for key, value in slots.items():
text = text.replace(value, key)
return text
def setup_data(self, fold):
domains_cnt = Counter()
chunks = self._load_data(fold)
for _, row in chunks.iterrows():
domains_cnt[row['domain']] += 1
first = True
utterances = row['utterances'][:]
if (
len(utterances) >= 3
and utterances[0]['speaker'] == 'USER'
and utterances[1]['speaker'] == 'ASSISTANT'
and utterances[2]['speaker'] == 'ASSISTANT'
and "help you?" in utterances[1]['text']
):
# skip this one
utterances.pop(1)
user_utterances = []
asst_utterances = []
while utterances:
utt = utterances.pop(0)
_, slots = self._segments2text(utt.get('segments', []))
if utt['speaker'] == 'USER':
if asst_utterances:
yield {
'text': ' __BREAK__ '.join(user_utterances),
'label': ' __BREAK__ '.join(asst_utterances),
'domain': row['domain'],
}, first
first = False
user_utterances = []
asst_utterances = []
user_utterances.append(self._delexicalize(utt['text'], slots))
elif utt['speaker'] == 'ASSISTANT':
asst_utterances.append(self._delexicalize(utt['text'], slots))
if not user_utterances:
user_utterances.append('__SILENCE__')
if asst_utterances:
yield {
'text': ' __BREAK__ '.join(user_utterances),
'label': ' __BREAK__ '.join(asst_utterances),
'domain': row['domain'],
}, first
class TextOnlyTeacher(DelexTeacher):
def _delexicalize(self, text, slots):
return text
class FullShotTeacher(_Abstract):
"""
The full shot teacher uses a standard 80-10-10 split, without regarding domain.
"""
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
class FewShotTeacher(_Abstract):
"""
Few shot teacher tests for generalization to new domains.
"""
@classmethod
def add_cmdline_args(cls, argparser):
argparser.add_argument(
'--holdout',
default=DOMAINS[0],
choices=DOMAINS,
help='Domain which is held out from test',
)
argparser.add_argument(
'--n-shot',
default=100,
type=int,
help='Number of few shot examples to provide in training fold.',
)
return super().add_cmdline_args(argparser)
def _label_fold(self, chunks):
folds = []
num_shots = 0
for _, row in chunks.iterrows():
if row['domain'] != self.opt['holdout']:
# if it's not in the holdout, always mark it train
folds.append('train')
else:
# keep the same valid/test sets as in fullshot, and only leak
# a small number of the training examples (i.e. throw away the
# vast majority of our data but keep test sets the same)
f = self._h(row['conversation_id'])
if f != 'train':
folds.append(f)
elif num_shots < self.opt['n_shot']:
folds.append('train')
num_shots += 1
else:
folds.append('throwaway')
return folds
class DefaultTeacher(FullShotTeacher):
pass
| 1.835938 | 2 |
jmeter_api/timers/__init__.py | dashawn888/jmeter_api | 11 | 12784 | <reponame>dashawn888/jmeter_api
from jmeter_api.timers.constant_throughput_timer.elements import ConstantThroughputTimer, BasedOn
from jmeter_api.timers.constant_timer.elements import ConstantTimer
from jmeter_api.timers.uniform_random_timer.elements import UniformRandTimer
| 1.265625 | 1 |
tensorfn/distributed/launch.py | rosinality/tensorfn | 13 | 12785 | <gh_stars>10-100
import os
import torch
from torch import distributed as dist
from torch import multiprocessing as mp
from tensorfn import distributed as dist_fn
def find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):
world_size = n_machine * n_gpu_per_machine
if world_size > 1:
if "OMP_NUM_THREADS" not in os.environ:
os.environ["OMP_NUM_THREADS"] = "1"
if dist_url == "auto":
if n_machine != 1:
raise ValueError('dist_url="auto" not supported in multi-machine jobs')
port = find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if n_machine > 1 and dist_url.startswith("file://"):
raise ValueError(
"file:// is not a reliable init method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
distributed_worker,
nprocs=n_gpu_per_machine,
args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args),
daemon=False,
)
else:
fn(*args)
def distributed_worker(
local_rank, fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args
):
if not torch.cuda.is_available():
raise OSError("CUDA is not available. Please check your environments")
global_rank = machine_rank * n_gpu_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
except Exception:
raise OSError("failed to initialize NCCL groups")
dist_fn.synchronize()
if n_gpu_per_machine > torch.cuda.device_count():
raise ValueError(
f"specified n_gpu_per_machine larger than available device ({torch.cuda.device_count()})"
)
torch.cuda.set_device(local_rank)
if dist_fn.LOCAL_PROCESS_GROUP is not None:
raise ValueError("torch.distributed.LOCAL_PROCESS_GROUP is not None")
n_machine = world_size // n_gpu_per_machine
for i in range(n_machine):
ranks_on_i = list(range(i * n_gpu_per_machine, (i + 1) * n_gpu_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
dist_fn.distributed.LOCAL_PROCESS_GROUP = pg
fn(*args)
| 2.171875 | 2 |
VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py | AriaPahlavan/see-through-adas-core | 0 | 12786 | <reponame>AriaPahlavan/see-through-adas-core<gh_stars>0
from enum import Enum
from threading import Thread
import cv2
import time
class Resolution(Enum):
_32p = (64, 32)
_96p = (128, 96)
_120p = (160, 120)
_144p = (256, 144)
_240p = (360, 240)
_288p = (480, 272)
_360p = (480, 360)
_480p = (720, 480)
_576p = (720, 576)
_Hd = (1280, 720)
class MonoLensStream:
def setParam(self, param, value, name):
if self.stream.set(param, value):
pass
else:
import logging
log = logging.getLogger()
log.warning("[WARN] cannot set "+name)
def __init__(self, src=0, framerate=30, resolution=Resolution._240p.value, fourcc="MJPG", exposure=-10,
debugEnable=False, debugCount=1000):
"""
initialize the video stream
"""
self.stream = cv2.VideoCapture(src)
# set resolution
w, h = resolution
self.setParam(cv2.CAP_PROP_FRAME_WIDTH, w, "width")
self.setParam(cv2.CAP_PROP_FRAME_HEIGHT, h, "height")
self.setParam(cv2.CAP_PROP_FPS, framerate, "fps")
self.setParam(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc), "fourcc")
self.setParam(cv2.CAP_PROP_EXPOSURE, exposure, "exposure")
self.fpsDelay = 1 / framerate
# read first frame
(self.grabbed, self.frame) = self.stream.read()
# frame reader thread
if not debugEnable:
self.frameReaderThread = Thread(target=self.update, args=())
else:
self.min = self.avg = self.max = 0
self.debugCount = debugCount
self.frameReaderThread = Thread(target=self.debugUpdate, args=())
self.streamStopped = False
self.grabbedTime = time.time()
self.returnedTime = self.grabbedTime
def start(self):
"""
start the thread to read frames from the video stream
:return reference to itself
"""
self.frameReaderThread.daemon = True
self.frameReaderThread.start()
return self
def update(self):
"""
grab the next frame from the stream infinitely until the stream is stopped
"""
while True:
if self.streamStopped: # done with streaming
return
(self.grabbed, self.frame) = self.stream.read()
self.grabbedTime = time.time()
# time.sleep(self.fpsDelay)
def read(self):
"""
:return: the current frame
"""
while self.returnedTime == self.grabbedTime:
continue
self.returnedTime = self.grabbedTime
return self.frame, self.returnedTime
def stop(self):
"""
stop the video stream
"""
self.streamStopped = True
self.frameReaderThread.join()
self.stream.release()
def debugUpdate(self):
"""
**FOR DEBUGGING PURPOSES ONLY**
grab the next frame from the stream infinitely until the stream is stopped
"""
startTime = time.time() * 1000 * 1000
(self.grabbed, self.frame) = self.stream.read()
endTime = time.time() * 1000 * 1000
self.max = self.min = endTime - startTime
counter = self.debugCount
while self.debugCount != 0:
startTime = time.time() * 1000 * 1000
(self.grabbed, self.frame) = self.stream.read()
endTime = time.time() * 1000 * 1000
ellapsedTime = endTime - startTime
print(ellapsedTime)
self.avg += ellapsedTime
if self.min > ellapsedTime:
self.min = ellapsedTime
if self.max < ellapsedTime:
self.max = ellapsedTime
self.debugCount -= 1
time.sleep(self.fpsDelay)
self.avg = (self.avg / counter)
def debugResults(self):
"""
**FOR DEBUGGING PURPOSES ONLY**
:return average, min, and max from debugging results
"""
self.frameReaderThread.join()
self.stream.release()
return self.avg, self.min, self.max
| 2.515625 | 3 |
sdk/python/approzium/mysql/connector/pooling.py | UpGado/approzium | 59 | 12787 | from mysql.connector.pooling import MySQLConnectionPool
from ._connect import _parse_kwargs, _patch_MySQLConnection
class MySQLConnectionPool(MySQLConnectionPool):
def set_config(self, **kwargs):
kwargs = _parse_kwargs(kwargs)
super(MySQLConnectionPool, self).set_config(**kwargs)
def add_connection(self, cnx=None):
with _patch_MySQLConnection(include_pooling=True):
super().add_connection(cnx)
| 2.40625 | 2 |
app/network_x_tools/network_x_utils.py | ThembiNsele/ClimateMind-Backend | 6 | 12788 | <reponame>ThembiNsele/ClimateMind-Backend
class network_x_utils:
"""
This class provides commonly used utils which are shared between all different types
of NetworkX nodes (Feed Items, Solutions, Myths). For each of these, we want to be
able to pull basic information like the IRI, Descriptions, Images, etc.
Include any generalized NetworkX functions here.
"""
def __init__(self):
self.node = None # Current node
def set_current_node(self, node):
"""We usually pull multiple node related items simultaneously. Rather
than pass these in individually for each function, this let's us use the same
node for all of the functions in this class.
"""
self.node = node
def get_node_id(self):
"""Node IDs are the unique identifier in the IRI. This is provided to the
front-end as a reference for the feed, but is never shown to the user.
Example http://webprotege.stanford.edu/R8znJBKduM7l8XDXMalSWSl
"""
offset = 4 # .edu <- to skip these characters and get the unique IRI
full_iri = self.node["iri"]
pos = full_iri.find("edu") + offset
return full_iri[pos:]
def get_description(self):
"""Long Descriptions are used by the front-end to display explanations of the
climate effects shown in user feeds.
"""
try:
return self.node["properties"]["schema_longDescription"][0]
except:
return "No long desc available at present"
def get_short_description(self):
"""Short Descriptions are used by the front-end to display explanations of the
climate effects shown in user feeds.
"""
try:
return self.node["properties"]["schema_shortDescription"][0]
except:
return "No short desc available at present"
def get_image_url(self):
"""Images are displayed to the user in the climate feed to accompany an explanation
of the climate effects. The front-end is provided with the URL and then requests
these images from our server.
"""
try:
return self.node["properties"]["schema_image"][0]
except:
# Default image url if image is added
return "https://yaleclimateconnections.org/wp-content/uploads/2018/04/041718_child_factories.jpg"
def get_image_url_or_none(self):
"""Images are displayed to the user in the climate feed to accompany an explanation
of the climate effects. The front-end is provided with the URL and then requests
these images from our server.
"""
try:
return self.node["properties"]["schema_image"][0]
except:
# Default image url if image is added
return None
def get_causal_sources(self):
"""Sources are displayed to the user in the sources tab of the impacts overlay page.
This function returns a list of urls of the sources to show on the impact overlay page for an impact/effect.
Importantly, these sources aren't directly from the networkx node, but all the networkx edges that cause the node.
Only returns edges that are directly tied to the node (ancestor edge sources are not used)
"""
if "causal sources" in self.node and len(self.node["causal sources"]) > 0:
causal_sources = self.node["causal sources"]
try:
return causal_sources
except:
return (
[]
) # Default source if none #should this be the IPCC? or the US National Climate Assessment?
def get_solution_sources(self):
"""Returns a flattened list of custom solution source values from each node key that matches
custom_source_types string.
"""
try:
return self.node["solution sources"]
except:
return []
def get_is_possibly_local(self, node):
"""Returns whether it's possible that a node effects a particular user based on
their location. Note that here we need to pass in the node directly, rather than
using one set by the class as the node comes from the localised_acyclic_graph.py
rather than a the standard graph.
"""
if "isPossiblyLocal" in node:
if node["isPossiblyLocal"]:
return 1
else:
return 0
else:
return 0
def get_co2_eq_reduced(self):
"""
Returns the solution's CO2 Equivalent Reduced / Sequestered (2020–2050) in Gigatons.
Values taken from Project Drawdown scenario 2.
"""
if "CO2_eq_reduced" in self.node["data_properties"]:
return self.node["data_properties"]["CO2_eq_reduced"]
else:
return 0
| 2.921875 | 3 |
setup.py | Faust-Wang/vswarm | 21 | 12789 | <filename>setup.py
# Do not manually invoke this setup.py, use catkin instead!
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['vswarm'],
package_dir={'': 'src'}
)
setup(**setup_args)
| 1.359375 | 1 |
实例学习Numpy与Matplotlib/创建 numpy.array.py | shao1chuan/pythonbook | 95 | 12790 |
import numpy as np
nparr = np.array([i for i in range(10)])
a = np.zeros(10)
f = np.zeros(10,dtype=float)
n = np.full((3,5),44)
r = np.random.randint(0,100,size=(3,5))
r2 = np.random.random((3,5))
x = np.linspace(0,100,50)
print(nparr,a,f,n,r,r2,x) | 3.203125 | 3 |
examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py | hedou/DecryptLogin | 0 | 12791 | <reponame>hedou/DecryptLogin
'''
Function:
微博监控
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import re
import time
from DecryptLogin import login
'''微博监控'''
class WeiboMonitor():
def __init__(self, username, password, time_interval=30):
_, self.session = self.login(username, password)
self.headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'Host': 'm.weibo.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
self.api_url = 'https://m.weibo.cn/api/container/getIndex?uid={}&luicode=10000011&lfid=231093_-_selffollowed&type=uid&value={}&containerid={}'
self.time_interval = time_interval
'''开始监控'''
def run(self):
followed = self.getFollowed()
self.logging('请选择一位您关注列表中的用户进行监控:')
self.logging('-' * 40)
for idx, each in enumerate(sorted(followed.keys())):
self.logging('[%d]. %s' % (idx+1, each))
self.logging('-' * 40)
while True:
user_choice = input('请选择您想要监控的用户编号(例如1):')
try:
profile_url = followed[sorted(followed.keys())[int(user_choice)-1]]
user_id = re.findall(r'uid=(\d+)&', profile_url)[0]
break
except:
self.logging('您的输入有误, 请重新输入.', 'Warning')
self.monitor(user_id, profile_url)
'''监控用户主页'''
def monitor(self, user_id, profile_url):
user_name, containerid = self.getContainerid(user_id, profile_url)
response = self.session.get(self.api_url.format(user_id, user_id, containerid))
weibo_ids = []
cards = response.json()['data']['cards']
for card in cards:
if card['card_type'] == 9:
weibo_ids.append(str(card['mblog']['id']))
while True:
weibo_ids = self.checkUpdate(user_id, profile_url, weibo_ids)
time.sleep(self.time_interval)
'''检查用户是否有新的微博'''
def checkUpdate(self, user_id, profile_url, weibo_ids):
user_name, containerid = self.getContainerid(user_id, profile_url)
response = self.session.get(self.api_url.format(user_id, user_id, containerid))
cards = response.json()['data']['cards']
flag = False
for card in cards:
if card['card_type'] == 9:
if str(card['mblog']['id']) not in weibo_ids:
flag = True
weibo_ids.append(str(card['mblog']['id']))
self.logging(f'用户{user_name}发布了新微博')
pics = []
if card['mblog'].get('pics'):
for i in card['mblog']['pics']: pics.append(i['url'])
pics = '||'.join(pics)
self.logging(card)
if not flag: self.logging(f'用户{user_name}未发布新微博')
return weibo_ids
'''获取containerid'''
def getContainerid(self, user_id, profile_url):
self.session.get(profile_url)
containerid = re.findall(r'fid%3D(\d+)%26', str(self.session.cookies))[0]
response = self.session.get(self.api_url.format(user_id, user_id, containerid))
user_name = self.decode(re.findall(r'"screen_name":"(.*?)"', response.text)[0])
for i in response.json()['data']['tabsInfo']['tabs']:
if i['tab_type'] == 'weibo':
containerid = i['containerid']
return user_name, containerid
'''获取关注列表'''
def getFollowed(self):
data = {}
page = 0
while True:
page += 1
response = self.session.get('https://m.weibo.cn/api/container/getIndex?containerid=231093_-_selffollowed&page={}'.format(page), headers=self.headers)
profile_urls = re.findall(r'"profile_url":"(.*?)"', response.text)
screen_names = re.findall(r'"screen_name":"(.*?)"', response.text)
if len(profile_urls) == 0:
break
for screen_name, profile_url in zip(screen_names, profile_urls):
data[self.decode(screen_name)] = profile_url.replace('\\', '')
return data
'''解码'''
def decode(self, content):
return content.encode('latin-1').decode('unicode_escape')
'''模拟登录'''
def login(self, username, password):
client = login.Client()
weibo = client.weibo(reload_history=True)
infos_return, session = weibo.login(username, password, 'mobile')
return infos_return, session
'''logging'''
def logging(self, msg, tip='INFO'):
print(f'[{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} {tip}]: {msg}') | 2.578125 | 3 |
crawlai/items/critter/base_critter.py | apockill/CreepyCrawlAI | 13 | 12792 | <reponame>apockill/CreepyCrawlAI
from godot.bindings import ResourceLoader
from crawlai.grid_item import GridItem
from crawlai.items.food import Food
from crawlai.math_utils import clamp
from crawlai.turn import Turn
from crawlai.position import Position
_critter_resource = ResourceLoader.load("res://Game/Critter/Critter.tscn")
class BaseCritter(GridItem):
"""The base class for all critters"""
HEALTH_TICK_PENALTY = 1
MAX_HEALTH = 500
BITE_SIZE = 20
CHOICES = [
Turn(Position(*c), is_action)
for c in [(0, 1), (1, 0), (-1, 0), (0, -1)]
for is_action in (True, False)
] + [Turn(Position(0, 0), False)]
def __init__(self):
super().__init__()
self.health: int
self.age: int
self._reset_stats()
def _reset_stats(self):
self.health = self.MAX_HEALTH
self.age = 0
def _tick_stats(self):
self.age += 1
self.health -= self.HEALTH_TICK_PENALTY
def _load_instance(self):
return _critter_resource.instance()
def perform_action_onto(self, other: 'GridItem'):
if isinstance(other, Food):
max_bite = clamp(self.BITE_SIZE, 0, self.MAX_HEALTH - self.health)
self.health += other.take_nutrition(max_bite)
@property
def delete_queued(self):
return self.health <= 0
| 2.609375 | 3 |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/doc_fragments/nios.py | tr3ck3r/linklight | 17 | 12793 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
instance of NIOS WAPI over REST
- Value can also be specified using C(INFOBLOX_HOST) environment
variable.
type: str
required: true
username:
description:
- Configures the username to use to authenticate the connection to
the remote instance of NIOS.
- Value can also be specified using C(INFOBLOX_USERNAME) environment
variable.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to
the remote instance of NIOS.
- Value can also be specified using C(INFOBLOX_PASSWORD) environment
variable.
type: str
validate_certs:
description:
- Boolean value to enable or disable verifying SSL certificates
- Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
variable.
type: bool
default: no
aliases: [ ssl_verify ]
http_request_timeout:
description:
- The amount of time before to wait before receiving a response
- Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
variable.
type: int
default: 10
max_retries:
description:
- Configures the number of attempted retries before the connection
is declared usable
- Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
variable.
type: int
default: 3
wapi_version:
description:
- Specifies the version of WAPI to use
- Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
variable.
- Until ansible 2.8 the default WAPI was 1.4
type: str
default: '2.1'
max_results:
description:
- Specifies the maximum number of objects to be returned,
if set to a negative number the appliance will return an error when the
number of returned objects would exceed the setting.
- Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
variable.
type: int
default: 1000
notes:
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
'''
| 1.804688 | 2 |
torchrec/metrics/rec_metric.py | xing-liu/torchrec | 814 | 12794 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import abc
import math
from collections import defaultdict, deque
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
cast,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
import torch.nn as nn
from torchmetrics import Metric
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import (
compose_metric_key,
MetricNameBase,
MetricNamespaceBase,
MetricPrefix,
)
RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]]
@dataclass(frozen=True)
class MetricComputationReport:
name: MetricNameBase
metric_prefix: MetricPrefix
value: torch.Tensor
DefaultValueT = TypeVar("DefaultValueT")
ComputeIterType = Iterator[
Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix]
]
MAX_BUFFER_COUNT = 1000
class RecMetricException(Exception):
pass
class WindowBuffer:
def __init__(self, max_size: int, max_buffer_count: int) -> None:
self._max_size: int = max_size
self._max_buffer_count: int = max_buffer_count
self._buffers: Deque[torch.Tensor] = deque(maxlen=max_buffer_count)
self._used_sizes: Deque[int] = deque(maxlen=max_buffer_count)
self._window_used_size = 0
def aggregate_state(
self, window_state: torch.Tensor, curr_state: torch.Tensor, size: int
) -> None:
def remove(window_state: torch.Tensor) -> None:
window_state -= self._buffers.popleft()
self._window_used_size -= self._used_sizes.popleft()
if len(self._buffers) == self._buffers.maxlen:
remove(window_state)
self._buffers.append(curr_state)
self._used_sizes.append(size)
window_state += curr_state
self._window_used_size += size
while self._window_used_size > self._max_size:
remove(window_state)
@property
def buffers(self) -> Deque[torch.Tensor]:
return self._buffers
class RecMetricComputation(Metric, abc.ABC):
r"""The internal computation class template.
A metric implementation should overwrite update() and compute(). These two
APIs focuses the actual mathematical meaning of the metric, without the
detail knowledge of model output and task information.
Args:
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
n_tasks (int): the number tasks this communication obj
will have to compute.
window_size (int): the window size for the window metric.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consum metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
"""
_batch_window_buffers: Optional[Dict[str, WindowBuffer]]
def __init__(
self,
my_rank: int,
batch_size: int,
n_tasks: int,
window_size: int,
compute_on_all_ranks: bool = False,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
process_group: Optional[dist.ProcessGroup] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(process_group=process_group, *args, **kwargs)
self._my_rank = my_rank
self._n_tasks = n_tasks
self._batch_size = batch_size
self._window_size = window_size
self._compute_on_all_ranks = compute_on_all_ranks
if self._window_size > 0:
self._batch_window_buffers = {}
else:
self._batch_window_buffers = None
self._add_state(
"has_valid_update",
torch.zeros(self._n_tasks, dtype=torch.uint8),
add_window_state=False,
dist_reduce_fx=lambda x: torch.any(x, dim=0).byte(),
persistent=True,
)
@staticmethod
def get_window_state_name(state_name: str) -> str:
return f"window_{state_name}"
def get_window_state(self, state_name: str) -> torch.Tensor:
return getattr(self, self.get_window_state_name(state_name))
def _add_state(
self, name: str, default: DefaultValueT, add_window_state: bool, **kwargs: Any
) -> None:
# pyre-fixme[6]: Expected `Union[List[typing.Any], torch.Tensor]` for 2nd
# param but got `DefaultValueT`.
super().add_state(name, default, **kwargs)
if add_window_state:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
kwargs["persistent"] = False
window_state_name = self.get_window_state_name(name)
# Avoid pyre error
assert isinstance(default, torch.Tensor)
super().add_state(window_state_name, default.detach().clone(), **kwargs)
self._batch_window_buffers[window_state_name] = WindowBuffer(
max_size=self._window_size,
max_buffer_count=MAX_BUFFER_COUNT,
)
def _aggregate_window_state(
self, state_name: str, state: torch.Tensor, num_samples: int
) -> None:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
window_state_name = self.get_window_state_name(state_name)
assert self._batch_window_buffers is not None
self._batch_window_buffers[window_state_name].aggregate_state(
getattr(self, window_state_name), curr_state=state, size=num_samples
)
@abc.abstractmethod
# pyre-fixme[14]: `update` overrides method defined in `Metric` inconsistently.
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor],
) -> None: # pragma: no cover
pass
@abc.abstractmethod
def _compute(self) -> List[MetricComputationReport]: # pragma: no cover
pass
def pre_compute(self) -> None:
r"""If a metric need to do some work before `compute()`, the metric
has to override this `pre_compute()`. One possible usage is to do
some pre-processing of the local state before `compute()` as TorchMetric
wraps `RecMetricComputation.compute()` and will do the global aggregation
before `RecMetricComputation.compute()` is called.
"""
return
def compute(self) -> List[MetricComputationReport]:
if self._my_rank == 0 or self._compute_on_all_ranks:
return self._compute()
else:
return []
def local_compute(self) -> List[MetricComputationReport]:
return self._compute()
class RecMetric(nn.Module, abc.ABC):
r"""The main class template to implement a recommendation metric.
This class contains the recommendation tasks information (RecTaskInfo) and
the actual computation object (RecMetricComputation). RecMetric processes
all the information related to RecTaskInfo and models and pass the required
signals to the computation object, allowing the implementation of
RecMetricComputation to focus on the mathemetical meaning.
A new metric that inherit RecMetric must override the following attributes
in its own __init__(): `_namespace` and `_metrics_computations`. No other
methods should be overridden.
Args:
world_size (int): the number of trainers.
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
tasks (List[RecTaskInfo]): the information of the model tasks.
compute_mode (RecComputeMode): the computation mode. See RecComputeMode.
window_size (int): the window size for the window metric.
fused_update_limit (int): the maximum number of updates to be fused.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consume global metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo,
)
"""
_computation_class: Type[RecMetricComputation]
_namespace: MetricNamespaceBase
_metrics_computations: nn.ModuleList
_tasks: List[RecTaskInfo]
_window_size: int
_tasks_iter: Callable[[str], ComputeIterType]
_update_buffers: Dict[str, List[RecModelOutput]]
_default_weights: Dict[Tuple[int, ...], torch.Tensor]
PREDICTIONS: str = "predictions"
LABELS: str = "labels"
WEIGHTS: str = "weights"
def __init__(
self,
world_size: int,
my_rank: int,
batch_size: int,
tasks: List[RecTaskInfo],
compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION,
window_size: int = 100,
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
process_group: Optional[dist.ProcessGroup] = None,
**kwargs: Any,
) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.Metric or
# TorchMetrics.MetricCollection.
if (
compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION
and fused_update_limit > 0
):
raise ValueError(
"The fused tasks computation and the fused update cannot be set at the same time"
)
super().__init__()
self._world_size = world_size
self._my_rank = my_rank
self._window_size = math.ceil(window_size / world_size)
self._batch_size = batch_size
self._tasks = tasks
self._compute_mode = compute_mode
self._fused_update_limit = fused_update_limit
self._default_weights = {}
self._update_buffers = {
self.PREDICTIONS: [],
self.LABELS: [],
self.WEIGHTS: [],
}
if compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
n_metrics = 1
task_per_metric = len(self._tasks)
self._tasks_iter = self._fused_tasks_iter
else:
n_metrics = len(self._tasks)
task_per_metric = 1
self._tasks_iter = self._unfused_tasks_iter
self._metrics_computations: nn.ModuleList = nn.ModuleList(
[
# This Pyre error seems to be Pyre's bug as it can be inferred by mypy
# according to https://github.com/python/mypy/issues/3048.
# pyre-fixme[45]: Cannot instantiate abstract class `RecMetricCoputation`.
self._computation_class(
my_rank,
batch_size,
task_per_metric,
self._window_size,
compute_on_all_ranks,
process_group,
**kwargs,
)
for _ in range(n_metrics)
]
)
# TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the
# compute_scope str input with an enum
def _fused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
assert len(self._metrics_computations) == 1
self._metrics_computations[0].pre_compute()
for metric_report in getattr(
self._metrics_computations[0], compute_scope + "compute"
)():
for task, metric_value, has_valid_update in zip(
self._tasks,
metric_report.value,
self._metrics_computations[0].has_valid_update,
):
# The attribute has_valid_update is a tensor whose length equals to the
# number of tasks. Each value in it is corresponding to whether a task
# has valid updates or not.
# If for a task there's no valid updates, the calculated metric_value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_value
if has_valid_update > 0
else torch.zeros_like(metric_value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _unfused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
for task, metric_computation in zip(self._tasks, self._metrics_computations):
metric_computation.pre_compute()
for metric_report in getattr(
metric_computation, compute_scope + "compute"
)():
# The attribute has_valid_update is a tensor with only 1 value
# corresponding to whether the task has valid updates or not.
# If there's no valid update, the calculated metric_report.value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_report.value
if metric_computation.has_valid_update[0] > 0
else torch.zeros_like(metric_report.value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]:
def fuse(outputs: List[RecModelOutput]) -> RecModelOutput:
assert len(outputs) > 0
if isinstance(outputs[0], torch.Tensor):
return torch.cat(cast(List[torch.Tensor], outputs))
else:
task_outputs: Dict[str, List[torch.Tensor]] = defaultdict(list)
for output in outputs:
assert isinstance(output, dict)
for task_name, tensor in output.items():
task_outputs[task_name].append(tensor)
return {
name: torch.cat(tensors) for name, tensors in task_outputs.items()
}
ret: Dict[str, RecModelOutput] = {}
for key, output_list in self._update_buffers.items():
if len(output_list) > 0:
ret[key] = fuse(output_list)
else:
assert key == self.WEIGHTS
output_list.clear()
return ret
def _check_fused_update(self, force: bool) -> None:
if self._fused_update_limit <= 0:
return
if len(self._update_buffers[self.PREDICTIONS]) == 0:
return
if (
not force
and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit
):
return
fused_arguments = self._fuse_update_buffers()
self._update(
predictions=fused_arguments[self.PREDICTIONS],
labels=fused_arguments[self.LABELS],
weights=fused_arguments.get(self.WEIGHTS, None),
)
def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor:
weights = self._default_weights.get(predictions.size(), None)
if weights is None:
weights = torch.ones_like(predictions)
self._default_weights[predictions.size()] = weights
return weights
def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor:
return torch.gt(torch.count_nonzero(weights, dim=-1), 0)
def _update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
with torch.no_grad():
if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
assert isinstance(predictions, torch.Tensor)
# Reshape the predictions to size([len(self._tasks), self._batch_size])
predictions = predictions.view(-1, self._batch_size)
assert isinstance(labels, torch.Tensor)
labels = labels.view(-1, self._batch_size)
if weights is None:
weights = self._create_default_weights(predictions)
else:
assert isinstance(weights, torch.Tensor)
weights = weights.view(-1, self._batch_size)
# has_valid_weights is a tensor of bool whose length equals to the number
# of tasks. Each value in it is corresponding to whether the weights
# are valid, i.e. are set to non-zero values for that task in this update.
# If has_valid_weights are Falses for all the tasks, we just ignore this
# update.
has_valid_weights = self._check_nonempty_weights(weights)
if torch.any(has_valid_weights):
self._metrics_computations[0].update(
predictions=predictions, labels=labels, weights=weights
)
self._metrics_computations[0].has_valid_update.logical_or_(
has_valid_weights
).byte()
else:
for task, metric_ in zip(self._tasks, self._metrics_computations):
if task.name not in predictions:
continue
if torch.numel(predictions[task.name]) == 0:
assert torch.numel(labels[task.name]) == 0
assert weights is None or torch.numel(weights[task.name]) == 0
continue
# Reshape the predictions to size([1, self._batch_size])
task_predictions = predictions[task.name].view(1, -1)
task_labels = labels[task.name].view(1, -1)
if weights is None:
task_weights = self._create_default_weights(task_predictions)
else:
task_weights = weights[task.name].view(1, -1)
# has_valid_weights is a tensor with only 1 value corresponding to
# whether the weights are valid, i.e. are set to non-zero values for
# the task in this update.
# If has_valid_update[0] is False, we just ignore this update.
has_valid_weights = self._check_nonempty_weights(task_weights)
if has_valid_weights[0]:
metric_.update(
predictions=task_predictions,
labels=task_labels,
weights=task_weights,
)
metric_.has_valid_update.logical_or_(has_valid_weights).byte()
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
if self._fused_update_limit > 0:
self._update_buffers[self.PREDICTIONS].append(predictions)
self._update_buffers[self.LABELS].append(labels)
if weights is not None:
self._update_buffers[self.WEIGHTS].append(weights)
self._check_fused_update(force=False)
else:
self._update(predictions=predictions, labels=labels, weights=weights)
# The implementation of compute is very similar to local_compute, but compute overwrites
# the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute
def compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter(""):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter("local_"):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def sync(self) -> None:
for computation in self._metrics_computations:
computation.sync()
def unsync(self) -> None:
for computation in self._metrics_computations:
if computation._is_synced:
computation.unsync()
def reset(self) -> None:
for computation in self._metrics_computations:
computation.reset()
def get_memory_usage(self) -> Dict[torch.Tensor, int]:
r"""Estimates the memory of the rec metric instance's
underlying tensors; returns the map of tensor to size
"""
tensor_map = {}
attributes_q = deque(self.__dict__.values())
while attributes_q:
attribute = attributes_q.popleft()
if isinstance(attribute, torch.Tensor):
tensor_map[attribute] = (
attribute.size().numel() * attribute.element_size()
)
elif isinstance(attribute, WindowBuffer):
attributes_q.extend(attribute.buffers)
elif isinstance(attribute, Mapping):
attributes_q.extend(attribute.values())
elif isinstance(attribute, Sequence) and not isinstance(attribute, str):
attributes_q.extend(attribute)
elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum):
attributes_q.extend(attribute.__dict__.values())
return tensor_map
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, torch.Tensor]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, torch.Tensor]:
# We need to flush the cached output to ensure checkpointing correctness.
self._check_fused_update(force=True)
destination = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
return self._metrics_computations.state_dict(
destination=destination,
prefix=f"{prefix}_metrics_computations.",
keep_vars=keep_vars,
)
class RecMetricList(nn.Module):
"""
A list module to encapulate multiple RecMetric instances and provide the
same interfaces as RecMetric.
Args:
rec_metrics (List[RecMetric]: the list of the input RecMetrics.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo
)
metrics = RecMetricList([ne])
"""
rec_metrics: nn.ModuleList
def __init__(self, rec_metrics: List[RecMetric]) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.MetricCollection.
# The prequsite to use MetricCollection is that RecMetric inherits from
# TorchMetrics.Metric or TorchMetrics.MetricCollection
super().__init__()
self.rec_metrics = nn.ModuleList(rec_metrics)
def __len__(self) -> int:
return len(self.rec_metrics)
def __getitem__(self, idx: int) -> nn.Module:
return self.rec_metrics[idx]
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: RecModelOutput,
) -> None:
for metric in self.rec_metrics:
metric.update(predictions=predictions, labels=labels, weights=weights)
def compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.compute())
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.local_compute())
return ret
def sync(self) -> None:
for metric in self.rec_metrics:
metric.sync()
def unsync(self) -> None:
for metric in self.rec_metrics:
metric.unsync()
def reset(self) -> None:
for metric in self.rec_metrics:
metric.reset()
| 1.835938 | 2 |
alison.py | johanhoiness/SlothBot | 1 | 12795 | <reponame>johanhoiness/SlothBot
__author__ = 'JohnHiness'
import sys
import os
import random
import time
import string
import connection
from time import strftime
import ceq
import json, urllib2
import thread
args = sys.argv
req_files = ['filegen.py', 'connection.py', 'commands.py', 'general.py', 'automatics.py']
for filename in req_files:
if os.path.exists(filename) == False:
print "Required file \"{}\" not found. Make sure you have acquired all files.".format(filename)
sys.exit(1)
import filegen
if os.path.exists('config.py') == False:
print 'No configuration-file found. Generating config.py'
filegen.gen_config()
python = sys.executable
print str(python)+'||'+str(python)+'||'+ str(* sys.argv)
os.execl(python, python, * sys.argv)
if os.path.exists('revar.py') == False:
print 'No reconfigurable file found. Generating revar.py'
filegen.gen_revar()
python = sys.executable
print str(python)+'||'+str(python)+'||'+ str(* sys.argv)
os.execl(python, python, * sys.argv)
import config
import revar
import filegen
import commands
import general
import automatics
if not revar.channels:
revar.channels = config.channel.replace(', ', ',').replace(' ', ',').split(',')
if len(args) > 1:
if args[1].lower() == 'reconfig' or args[1].lower() == 'config':
answr = raw_input("This will have you regenerate the configuration file and all old configurations will be lost.\nAre you sure you want to do this?(y/n) ")
while answr.lower() != 'y' or answr.lower() != 'n':
answr = raw_input("You must use the letters Y or N to answer: ")
if answr.lower() == 'y':
filegen.gen_config()
sys.exit(0)
if answr.lower() == 'n':
sys.exit(0)
elif args[1].lower() == 'help':
print "Usage: python alison.py <help | reconfig | >"
sys.exit(0)
else:
print "Flag not recognized."
sys.exit(1)
def connect(server, port):
print "Connecting to {} with port {}.".format(server, port)
s = connection.s
readbuffer = ''
try:
s.connect((server, port))
except BaseException as exc:
print 'Failed to connect: ' + str(exc)
sys.exit(1)
s.send("PASS %<PASSWORD>" % config.password)
s.send("USER %s %s %s :%s\n" % (config.bot_username, config.bot_hostname, config.bot_servername, config.bot_realname))
s.send("NICK %s\n" % revar.bot_nick)
mode_found = False
while not mode_found:
readbuffer = readbuffer + s.recv(2048)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for rline in temp:
rline = string.rstrip(rline)
rline = string.split(rline)
g = general
if rline[0] == "PING":
g.ssend("PONG %s\r" % rline[1])
if rline[1] == '433':
if revar.bot_nick.lower() != config.bot_nick2.lower():
revar.bot_nick = config.bot_nick2
else:
revar.bot_nick += '_'
g.ssend('NICK %s' % revar.bot_nick)
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
if len(rline) > 2 and rline[1].lower() == 'join':
if not rline[2].lower() in revar.channels:
revar.channels.append(rline[2].lower())
if len(rline) > 2 and rline[1].lower() == 'part':
if rline[2].lower() in revar.channels:
try:
revar.channels.append(rline[2].lower())
except:
pass
if rline[1] == 'MODE':
mode_found = True
g.ssend('JOIN %s' % ','.join(revar.channels))
general.update_user_info()
def server_responses(rline):
g = general
if rline[0] == "PING":
g.ssend("PONG %s\r" % rline[1])
return True
if len(rline) > 4 and rline[3] == '152':
general.append_user_info(rline)
return True
if rline[1] == '433':
if revar.bot_nick.lower() != config.bot_nick2.lower():
revar.bot_nick = config.bot_nick2
else:
revar.bot_nick += '_'
g.ssend('NICK %s' % revar.bot_nick)
return True
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
return True
if len(rline) > 1 and rline[1].lower() == 'pong':
general.last_pong = time.time()
return True
if len(rline) > 2 and rline[1].lower() == 'join':
if not rline[2].lower() in revar.channels:
revar.channels.append(rline[2].lower())
return True
if len(rline) > 2 and rline[1].lower() == 'nick':
general.update_user_info()
return True
if len(rline) > 2 and rline[1].lower() == 'part':
if rline[2].lower() in revar.channels:
try:
revar.channels.append(rline[2].lower())
except:
pass
return True
if len(rline) > 3 and rline[1] == '319' and rline[2].lower() == revar.bot_nick.lower():
revar.channels = ' '.join(rline[4:])[1:].replace('+', '').replace('@', '').lower().split()
return True
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
return True
if not rline[0].find('!') != -1:
return True
if len(rline) > 3 and rline[1] == '315':
return True
return False
def find_imdb_link(chanq, msg):
if msg.lower().find('imdb.com/title/') != -1:
imdb_id = msg.lower()[msg.lower().find('imdb.com/title/')+15:][:9]
g.csend(chanq, commands.imdb_info('id', imdb_id))
def botendtriggerd(chant, usert, msgt):
if not general.check_operator(usert):
outp = 'You do not have permission to use any of these commands.'
else:
msgt = general.check_bottriggers(msgt).split()
outp = commands.operator_commands(chant, msgt)
if outp is not None:
for line in outp.split('\n'):
g.csend(chant, line)
time.sleep(1)
def work_command(chanw, userw, msgw):
msgw = general.check_midsentencecomment(msgw)
msgw, rec, notice, pm = general.checkrec(chanw, userw, msgw)
outp = commands.check_called(chanw, userw, msgw)
if outp is not None:
for line in outp.split('\n'):
g.csend(chanw, line, notice, pm, rec)
time.sleep(1)
def work_line(chanl, userl, msgl):
if chanl in general.countdown and msgl.lower().find('stop') != -1:
general.countdown.remove(chanl)
if chanl.find('#') != -1 and (msgl.lower().find('johan') != -1 or msgl.lower().find('slut') != -1):
for item in general.user_info:
if item['nickserv'].lower() == 'sloth':
general.csend(item['nick'], '{} <{}> {}'.format(chanl, userl, msgl))
general.update_seen(chanl, userl, msgl)
if (" "+msgl).lower().find('deer god') != -1 and time.time() - general.deer_god > 30 and revar.deer_god:
general.deer_god = time.time()
general.csend(chanl, "Deer God http://th07.deviantart.net/fs71/PRE/f/2011/223/3/c/deer_god_by_aubrace-d469jox.jpg")
if __name__ == '__main__':
thread.start_new_thread(automatics.get_ftime, ())
connect(config.server, config.port)
thread.start_new_thread(automatics.autoping, ())
thread.start_new_thread(automatics.autoweather, ())
thread.start_new_thread(automatics.checkpongs, ())
thread.start_new_thread(automatics.who_channel, ())
s = connection.s
readbuffer = ''
while True:
readbuffer = readbuffer + s.recv(2048)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for rline in temp:
rline = string.rstrip(rline)
rline = string.split(rline)
g = general
if not server_responses(rline) and len(rline) > 3:
msg = ' '.join(rline[3:])[1:]
user = rline[0][1:][:rline[0].find('!')][:-1]
chan = rline[2]
if chan.lower() == revar.bot_nick.lower():
chan = user
if config.verbose:
print g.ftime + ' << ' + ' '.join(rline)
else:
print g.ftime + ' << ' + chan + ' <{}> '.format(user) + msg
if general.check_bottriggers(msg):
thread.start_new_thread(botendtriggerd, (chan, user, msg),)
break
thread.start_new_thread(find_imdb_link, (chan, msg), )
thread.start_new_thread(work_line, (chan, user, msg), )
msg = general.check_midsentencetrigger(msg)
msg = general.check_triggers(msg)
if msg:
thread.start_new_thread(work_command, (chan, user, msg), )
| 2.640625 | 3 |
pyroomacoustics/experimental/tests/test_deconvolution.py | HemaZ/pyroomacoustics | 1 | 12796 | <filename>pyroomacoustics/experimental/tests/test_deconvolution.py<gh_stars>1-10
from unittest import TestCase
import numpy as np
from scipy.signal import fftconvolve
import pyroomacoustics as pra
# fix seed for repeatability
np.random.seed(0)
h_len = 30
x_len = 1000
SNR = 1000. # decibels
h_lp = np.fft.irfft(np.ones(5), n=h_len)
h_rand = np.random.randn(h_len)
h_hann = pra.hann(h_len, flag='symmetric')
x = np.random.randn(x_len)
noise = np.random.randn(x_len + h_len - 1)
def generate_signals(SNR, x, h, noise):
''' run convolution '''
# noise standard deviation
sigma_noise = 10**(-SNR / 20.)
y = fftconvolve(x, h)
y += sigma_noise * noise
return y, sigma_noise
class TestDeconvolution(TestCase):
def test_deconvolve_hann_noiseless(self):
h = h_hann
h_len = h_hann.shape[0]
SNR = 1000.
tol = 1e-7
y, sigma_noise = generate_signals(SNR, x, h, noise)
h_hat = pra.experimental.deconvolve(y, x, length=h_len)
rmse = np.sqrt(np.linalg.norm(h_hat - h)**2 / h_len)
print('rmse=', rmse, '(tol=', tol, ')')
self.assertTrue(rmse < tol)
def test_wiener_deconvolve_hann_noiseless(self):
h = h_hann
h_len = h_hann.shape[0]
SNR = 1000.
tol = 1e-7
y, sigma_noise = generate_signals(SNR, x, h, noise)
h_hat = pra.experimental.wiener_deconvolve(y, x, length=h_len, noise_variance=sigma_noise**2)
rmse = np.sqrt(np.linalg.norm(h_hat - h)**2 / h_len)
print('rmse=', rmse, '(tol=', tol, ')')
self.assertTrue(rmse < tol)
if __name__ == '__main__':
import matplotlib.pyplot as plt
h = h_hann
y, sigma_noise = generate_signals(SNR, x, h, noise)
h_hat1 = pra.experimental.deconvolve(y, x, length=h_len)
res1 = np.linalg.norm(y - fftconvolve(x, h_hat1))**2 / y.shape[0]
mse1 = np.linalg.norm(h_hat1 - h)**2 / h_len
h_hat2 = pra.experimental.wiener_deconvolve(y, x, length=h_len, noise_variance=sigma_noise**2, let_n_points=15)
res2 = np.linalg.norm(y - fftconvolve(x, h_hat2))**2 / y.shape[0]
mse2 = np.linalg.norm(h_hat2 - h)**2 / h_len
print('MSE naive: rmse=', np.sqrt(mse1), ' res=', pra.dB(res1, power=True))
print('MSE Wiener: rmse=', np.sqrt(mse2), ' res=', pra.dB(res1, power=True))
plt.plot(h)
plt.plot(h_hat1)
plt.plot(h_hat2)
plt.legend(['Original', 'Naive', 'Wiener'])
plt.show()
| 2.484375 | 2 |
pthelper/img_to_txt.py | hkcountryman/veg-scanner | 0 | 12797 | import cv2 as cv
from deskew import determine_skew
import numpy as np
from PIL import Image, ImageFilter, ImageOps
from pytesseract import image_to_string
from skimage import io
from skimage.color import rgb2gray
from skimage.transform import rotate
from spellchecker import SpellChecker
import traceback
# On Windows, you need to tell it where Tesseract is installed, for example:
# pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe
# OCR Stuff
####################################################################################################
def to_text(pic):
"""
Read and return text from an image.
Args:
pic: filename string, pathlib.Path object, or file object to read.
Returns:
Text from the image.
"""
try:
img = Image.open(pic)
except FileNotFoundError as e:
print("File " + pic + " does not exist.")
quit()
except PIL.UnidentifiedImageError as e:
print("That file is not an image.")
quit()
except:
print("Unanticipated error:")
traceback.print_exc()
quit()
remove_alpha(img)
text = image_to_string(img)
return text
def valid_text(ocr, accuracy_pct, language="en", distance=2, case_sensitive=True): # this spellchecker sucks
"""
Checks that the output of to_text() makes sense. To build your own dictionary, see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#how-to-build-a-new-dictionary
Args:
ocr: string to analyze.
accuracy_pct: percentage of words in ocr that should be in the dictionary.
language: language of dictionary (default English); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language
distance: Levenshtein distance (default 2 for shorter words); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage
https://en.wikipedia.org/wiki/Levenshtein_distance
Returns:
Boolean indicating success of to_text():
True: to_text() makes sense.
False: to_text() returned nonsense.
"""
if ocr == "":
return False # if it returned nothing
word_list = ocr.split() # get list of all words in input string
spell = SpellChecker(language=language, distance=distance, case_sensitive=case_sensitive)
misspelled = spell.unknown(word_list) # list of unknown words from word_list
#print(misspelled)
#print(word_list)
if (len(word_list) - len(misspelled)) / len(word_list) < accuracy_pct / 100:
return False # if it returned gibberish
return True # otherwise, all good
def parse(pic, accuracy_pct, language="en", distance=2, case_sensitive=True):
"""
Attempts OCR with image and decides if processing is needed.
Args:
pic: filename string, pathlib.Path object, or file object to read.
accuracy_pct: percentage of words in string that should be in the dictionary.
language: language of dictionary (default English); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language
distance: Levenshtein distance (default 2 for shorter words); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage
https://en.wikipedia.org/wiki/Levenshtein_distance
Returns:
Text from the image if OCR was successful; otherwise a failure message.
"""
text = to_text(pic)
if valid_text(text, accuracy_pct, language=language, distance=distance,
case_sensitive=case_sensitive):
return text
else:
return "OCR failed." # time for processing
# Image Processing Stuff
####################################################################################################
def remove_alpha(pic):
"""
Removes the alpha channel from an image, if it exists. Necessary for OCR.
Args:
pic: PIL.Image object to convert.
Returns:
The PIL.Image object in RGB format.
"""
return pic.convert("RGB")
def invert(pic):
"""
Inverts the colors in an image. Useful if OCR doesn't work.
Args:
pic: PIL.Image object to invert.
Returns:
The inverted PIL.Image object.
"""
return ImageOps.invert(remove_alpha(pic)) # negative colors
'''def resize(pic): # needs work: possible key error "dpi"
"""
Resizes an image that is less than 300 dpi. Useful if OCR doesn't work.
Args:
pic: PIL.Image object to resize.
Returns:
The resized PIL.Image object.
"""
pic = remove_alpha(pic)
res = pic.info["dpi"] # fetch tuple of dpi
lower = min(res) # get the lower of the two entries in the tuple
factor = 300 / lower # how much should we scale?
resized = pic.resize((round(pic.size[0]*factor), round(pic.size[1]*factor))) # scale it!
return resized'''
def threshold(pic, gaussian=True): # needs work
"""
Applies thresholding to the image. Doesn't work.
(Tesseract already tries the Otsu algorithm.)
Args:
pic: filename string, pathlib.Path object, or file object to read.
gaussian: boolean:
True: apply adaptive Gaussian thresholding.
False: apply adaptive mean thresholding.
Returns:
The image with thresholding.
"""
img = cv.imread("test2.jpg", 0)
if gaussian: # adaptive Gaussian thresholding
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
else: # adaptive mean thresholding
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
return Image.fromarray(img)
def denoise(pic): # needs work
"""
Allegedly removes noise? Useful if OCR doesn't work.
Args:
pic: filename string, pathlib.Path object, or file object to read.
Returns:
The denoised image.
"""
img = cv.imread(pic)
img = cv.fastNlMeansDenoising(img)
return Image.fromarray(img)
def dilate(pic, size):
"""
Dilates the text (grows edges of characters) if it's against a common background.
Useful if OCR doesn't work.
Args:
pic: PIL.Image object to dilate.
size: kernel size, in pixels. Recommend starting at 1.
Returns:
The dilated PIL.Image object.
"""
pic = remove_alpha(pic)
return pic.filter(ImageFilter.MaxFilter(size))
def erode(pic, size):
"""
Erodes the text (shrinks edges of characters) if it's against a common background.
Useful if OCR doesn't work.
Args:
pic: PIL.Image object to erode.
size: kernel size, in pixels. Recommend starting at 1.
Returns:
The eroded PIL.Image object.
"""
pic = remove_alpha(pic)
return pic.filter(ImageFilter.MinFilter(size))
def deskew(pic, output): # needs work
"""
Deskews an image. Useful if OCR doesn't work.
Args:
pic: filename string, pathlib.Path object, or file object to read.
output: string to save output as
"""
# Thanks to <NAME> (https://github.com/sbrunner) for deskew and the code!
img = io.imread(pic)
grayscale = rgb2gray(img)
angle = determine_skew(grayscale)
rotated = rotate(img, angle, resize=True) * 255
io.imsave(output, rotated.astype(np.uint8))
| 2.90625 | 3 |
scripts/training.py | tobinsouth/privacy-preserving-synthetic-mobility-data | 0 | 12798 | <reponame>tobinsouth/privacy-preserving-synthetic-mobility-data<gh_stars>0
# Params
learning_rate = 0.001
k = 0.0025
x0 =2500
epochs = 4
batch_size=16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import torch, numpy as np
from tqdm import tqdm
# Get the dataloader
from dataloader import get_train_test
trainStays, testStays = get_train_test(train_size=0.95, batch_size=batch_size, shuffle=True, dataset='cuebiq')
# Load and define the model
from VAE import SentenceVAE
# Model params
params = dict(
vocab_size = trainStays.dataset.dataset._vocab_size,
max_sequence_length = trainStays.dataset.dataset._max_seq_len,
embedding_size = 256,
rnn_type = 'gru',
hidden_size = 256,
num_layers = 1,
bidirectional = False,
latent_size = 16,
word_dropout = 0,
embedding_dropout = 0.5,
sos_idx=0,
eos_idx=0,
pad_idx=0,
unk_idx=1,
device=device,
)
model = SentenceVAE(**params)
model = model.to(device) # Device is defined in VAE
# Custom loss function from paper
NLL = torch.nn.NLLLoss(ignore_index=0, reduction='sum')
def loss_fn(logp, target, mean, logv, step, k, x0):
"""The loss function used in the paper, taken from https://github.com/timbmg/Sentence-VAE"""
target = target.view(-1)
logp = logp.view(-1, logp.size(2))
# Negative Log Likelihood
NLL_loss = NLL(logp, target)
# KL Divergence
KL_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
KL_weight = float(1/(1+np.exp(-k*(step-x0))))
return NLL_loss, KL_loss, KL_weight
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Logging with tensorboard
from torch.utils.tensorboard import SummaryWriter
LOG_DIR = "runs/cuebiq"
comment = f' batch_size = {batch_size} lr = {learning_rate} dp = False'
train_writer = SummaryWriter(LOG_DIR + "/train", comment=comment)
val_writer = SummaryWriter(LOG_DIR + "/val", comment=comment)
# Run training loop
step = 0
for epoch in range(epochs):
running_loss = 0.0
for i, batch in enumerate(tqdm(trainStays, miniters=500)):
batch = batch.to(device)
# Forward pass
logp, mean, logv, z = model(batch)
# loss calculation
NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch, mean, logv, step, k, x0)
loss = (NLL_loss + KL_weight * KL_loss) / batch_size
loss.to(device)
# backward + optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
running_loss += loss.item()
if i % 1000 == 999:
train_writer.add_scalar('loss', running_loss / 1000, epoch * len(trainStays) + i)
running_loss = 0.0
# Periodic Validation and checkpointing
if i % 20000 == 19999:
model.eval()
val_loss = 0.0
for batch in testStays:
batch = batch.to(device)
logp, mean, logv, z = model(batch)
NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch, mean, logv, step, k, x0)
loss = (NLL_loss + KL_weight * KL_loss) / batch_size
val_loss += loss.item()
val_writer.add_scalar('loss', val_loss / 20000, epoch * len(trainStays) + i)
model.train()
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': val_loss / 10000,
'params': params,
}, '../models/cuebiq_vae.pt')
train_writer.close()
val_writer.close() | 2.015625 | 2 |
tests/compute/planar/test_rotateZ.py | ianna/vector | 0 | 12799 | # Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
import pytest
import vector.backends.numpy_
import vector.backends.object_
def test_xy():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectXY(1, 0)
)
assert vec.rotateZ(0.1).x == pytest.approx(0.9950041652780258)
assert vec.rotateZ(0.1).y == pytest.approx(0.09983341664682815)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("x", numpy.float64), ("y", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("x", "y")
assert numpy.allclose(out.x, [0, 0.9950041652780258, -0.09983341664682815])
assert numpy.allclose(out.y, [0, 0.09983341664682815, 0.9950041652780258])
def test_rhophi():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectRhoPhi(1, 0)
)
assert vec.rotateZ(0.1).rho == pytest.approx(1)
assert vec.rotateZ(0.1).phi == pytest.approx(0.1)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("rho", numpy.float64), ("phi", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("rho", "phi")
assert numpy.allclose(out.rho, [0, 1, 0])
assert numpy.allclose(out.phi, [0.1, 0.1, 1.1])
| 2.203125 | 2 |