hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c36618f3333b93cbe1b7cfd4238c38225e0559e | 8,729 | py | Python | phaselink_plot.py | jiunting/PhaseLink | 994e85ff7162869dd2a54c26eeb307e72ed1972e | [
"MIT"
] | 47 | 2019-06-13T13:47:17.000Z | 2022-03-20T05:01:51.000Z | phaselink_plot.py | jiunting/PhaseLink | 994e85ff7162869dd2a54c26eeb307e72ed1972e | [
"MIT"
] | 2 | 2020-07-01T17:06:09.000Z | 2021-05-01T02:20:45.000Z | phaselink_plot.py | jiunting/PhaseLink | 994e85ff7162869dd2a54c26eeb307e72ed1972e | [
"MIT"
] | 26 | 2019-06-13T13:44:58.000Z | 2021-09-29T10:10:33.000Z | #!/home/zross/bin/python
import numpy as np
import sys
import glob
import obspy
import pylab as plt
import json
import random
class Arrival():
def __init__(self, net=None, sta=None, time=None, phase=None,
dist=None, resid=None):
self.net = net
self.sta = sta
self.time = time
self.phase = phase
self.dist = dist
self.resid = resid
class Event():
def __init__(self, arrivals = None):
if arrivals is not None:
self.arrivals = arrivals
else:
self.arrivals = []
def get_unassociated_trigs(origin_time, triggers, trig_meta):
t_start = origin_time - obspy.UTCDateTime(0) - 60.0
t_stop = t_start + 120.
idx = np.where((triggers >= t_start) & (triggers < t_stop))[0]
trigs = {}
for x in idx:
if trig_meta[x][1] not in trigs:
trigs[trig_meta[x][1]] = []
trigs[trig_meta[x][1]].append((trig_meta[x][3], trig_meta[x][4]))
return trigs
def plot_seismicity(catalog, params):
import pandas as pd
print('Reading fault file in GMT format, please wait...')
# list to store fault segments
faults = []
# preallocate to track fault pts within segment
maxpts = 1600000 # based on number of lines in file
flats = np.zeros(maxpts)
flons = np.zeros(maxpts)
fsegs = np.zeros(maxpts,dtype='int')
nn = -1
nseg=-1
# loop over lines
with open(params['fault_file']) as f:
for line in f:
# header line that gives number of points in segment
if line.startswith('Pline'):
nseg+=1
# fault point line
elif line.startswith('-1'):
nn+=1
lineS = line.split()
flons[nn]=float(lineS[0])
flats[nn]=float(lineS[1])
fsegs[nn]=nseg
# covert to dataframe
fault_df = pd.DataFrame()
fault_df['flon']=flons[:nn+1]
fault_df['flat']=flats[:nn+1]
fault_df['fseg']=fsegs[:nn+1]
print('Done, {:} faults read'.format(nseg+1))
from mpl_toolkits.basemap import Basemap, shiftgrid, cm
fig = plt.figure(figsize=(10,10))
ax = plt.gca()
lat0, lat1 = params['lat_min'], params['lat_max']
clat = (lat0+lat1)/2.
lon0, lon1 = params['lon_min'], params['lon_max']
clon = (lon0+lon1)/2.
proj = 'merc'
epsg = 4269
m = Basemap(llcrnrlon=lon0,llcrnrlat=lat0,urcrnrlon=lon1,urcrnrlat=lat1,
resolution='h',projection=proj,lat_0=clat,lon_0=clon, ax=ax,
epsg=epsg)
m.drawcoastlines()
m.fillcontinents(color='white', lake_color='paleturquoise')
m.drawparallels(np.arange(32, 38, 1.), labels=[1,0,0,1])
m.drawmeridians(np.arange(-120, -114, 1.), labels=[1,0,0,1])
m.drawmapboundary(fill_color='paleturquoise')
xpixels = 5000
service = 'World_Shaded_Relief'
#m.arcgisimage(service=service, xpixels = xpixels, verbose= False)
# plot faults
ifaults = (fault_df.flat >= lat0)&(fault_df.flat <= lat1) & (
fault_df.flon >= lon0)&(fault_df.flon <= lon1)
for g, v in fault_df[ifaults].groupby('fseg'):
m.plot(v.flon.values,v.flat.values,'-k',lw=1.0,latlon=True)
lon = []
lat = []
for event in cat:
lon.append(event.origins[0].longitude)
lat.append(event.origins[0].latitude)
#with open("datasets/cahuilla_sum.nll", 'r') as f:
# for line in f:
# temp = line.split()
# lon.append(float(temp[11]))
# lat.append(float(temp[9]))
m.scatter(lon, lat, 0.5, marker='o', color='r', latlon=True, zorder=10)
stla = []
stlo = []
with open(params["station_file"], 'r') as f:
for line in f:
temp = line.split()
stla.append(float(temp[2]))
stlo.append(float(temp[3]))
m.scatter(stlo, stla, 50, marker='^', color='blue', latlon=True, zorder=10)
plt.tight_layout()
plt.savefig("detection_map.png", dpi=320)
plt.show()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("phaselink_plot.py control_file.json")
sys.exit()
with open(sys.argv[1], "r") as f:
params = json.load(f)
triggers = []
trig_meta = []
if params['plot_unassociated']:
print("Reading unassociated triggers...")
with open(params['gpd_file'], 'r') as f:
for line in f:
net, sta, phase, time, prob, dur = line.split()
if float(prob) < params['pr_min'] or \
float(dur) < params['trig_dur_min']:
continue
trig_type = 0
else:
trig_type = 1
triggers.append(obspy.UTCDateTime(time) - obspy.UTCDateTime(0))
trig_meta.append((net, sta, phase, obspy.UTCDateTime(time),
trig_type))
idx = np.argsort(triggers)
triggers = np.array([triggers[x] for x in idx])
trig_meta = [trig_meta[x] for x in idx]
print("Now building catalog")
#nll_summary_file = "%s/%s" % \
# (params['nlloc_loc_path'], params['nlloc_sum_file'])
#cat = obspy.io.nlloc.core.read_nlloc_hyp(nll_summary_file)
nll_files = glob.glob("%s/*.*.*.*.*.hyp" % params['nlloc_loc_path'])
cat = obspy.Catalog()
for fname in nll_files:
try:
cat += obspy.read_events(fname)
except:
continue
random.shuffle(nll_files)
for event in cat:
print(event.preferred_origin().time)
print(cat)
print()
if params['plot_seismicity']:
plot_seismicity(cat, params)
for fname in nll_files:
cat = obspy.read_events(fname)
event = cat[0]
origin = event.preferred_origin()
origin_time = origin.time
print(event)
print(origin)
if params['plot_unassociated']:
trigs = get_unassociated_trigs(origin_time, triggers, trig_meta)
# Build id_map for join between arrivals and picks
picks = {}
sta_order = []
dist_count = 0
for arrival in origin.arrivals:
pick = arrival.pick_id.get_referred_object()
sta = pick.waveform_id.station_code
phase = arrival.phase
time = pick.time
#if arrival.distance <= params['dist_cutoff_radius']:
# dist_count += 1
if abs(arrival.time_residual) > params['max_t_resid']:
flag = 1
else:
flag = 0
if sta not in picks:
picks[sta] = [(time, phase, flag)]
sta_order.append(sta)
else:
picks[sta].append((time, phase, flag))
#if dist_count < params['dist_cutoff_n_min']:
# print("Skipping event, only %d phases within radius %.2f" % \
# (dist_count, params['dist_cutoff_radius']))
# continue
# Plot results
fig, ax = plt.subplots(1,1,figsize=(30,30))
colors = {0: 'lime', 1: 'yellow'}
count = 0
for sta in sta_order:
st = obspy.read("%s/%04d/%03d/*.%s.*" % \
(params['wf_path'], origin_time.year, origin_time.julday, sta),
starttime=origin_time-60, endtime=origin_time+60)
st.detrend()
st.filter(type='bandpass', freqmin=3.0, freqmax=20)
for tr in st:
ax.plot(np.arange(tr.data.size)*tr.stats.delta, \
tr.data/np.max(tr.data) + count, c='k', lw=1)
ax.text(125, count, sta)
if params['plot_unassociated']:
if sta in trigs:
for pick, t_type in trigs[sta]:
#tr_slice = tr.slice(starttime=pick,
# endtime=pick+1.0)
#ax.plot(np.arange(tr_slice.data.size) \
# * tr.stats.delta + (pick - origin_time) + 60.,
# tr_slice.data/np.max(tr.data) + count,
# c=colors[t_type], lw=1)
ax.plot(pick-tr.stats.starttime, 0,
marker="|", c=colors[t_type])
for pick, phase, flag in picks[sta]:
if phase == 'P':
color = 'r'
else:
color = 'b'
#if flag:
# color = 'limegreen'
ax.plot([pick-tr.stats.starttime, pick-tr.stats.starttime], [count-0.75, count+0.75], c=color)
count += 1
plt.show()
print()
| 33.573077 | 114 | 0.536717 |
import numpy as np
import sys
import glob
import obspy
import pylab as plt
import json
import random
class Arrival():
def __init__(self, net=None, sta=None, time=None, phase=None,
dist=None, resid=None):
self.net = net
self.sta = sta
self.time = time
self.phase = phase
self.dist = dist
self.resid = resid
class Event():
def __init__(self, arrivals = None):
if arrivals is not None:
self.arrivals = arrivals
else:
self.arrivals = []
def get_unassociated_trigs(origin_time, triggers, trig_meta):
t_start = origin_time - obspy.UTCDateTime(0) - 60.0
t_stop = t_start + 120.
idx = np.where((triggers >= t_start) & (triggers < t_stop))[0]
trigs = {}
for x in idx:
if trig_meta[x][1] not in trigs:
trigs[trig_meta[x][1]] = []
trigs[trig_meta[x][1]].append((trig_meta[x][3], trig_meta[x][4]))
return trigs
def plot_seismicity(catalog, params):
import pandas as pd
print('Reading fault file in GMT format, please wait...')
faults = []
maxpts = 1600000
flats = np.zeros(maxpts)
flons = np.zeros(maxpts)
fsegs = np.zeros(maxpts,dtype='int')
nn = -1
nseg=-1
with open(params['fault_file']) as f:
for line in f:
if line.startswith('Pline'):
nseg+=1
elif line.startswith('-1'):
nn+=1
lineS = line.split()
flons[nn]=float(lineS[0])
flats[nn]=float(lineS[1])
fsegs[nn]=nseg
fault_df = pd.DataFrame()
fault_df['flon']=flons[:nn+1]
fault_df['flat']=flats[:nn+1]
fault_df['fseg']=fsegs[:nn+1]
print('Done, {:} faults read'.format(nseg+1))
from mpl_toolkits.basemap import Basemap, shiftgrid, cm
fig = plt.figure(figsize=(10,10))
ax = plt.gca()
lat0, lat1 = params['lat_min'], params['lat_max']
clat = (lat0+lat1)/2.
lon0, lon1 = params['lon_min'], params['lon_max']
clon = (lon0+lon1)/2.
proj = 'merc'
epsg = 4269
m = Basemap(llcrnrlon=lon0,llcrnrlat=lat0,urcrnrlon=lon1,urcrnrlat=lat1,
resolution='h',projection=proj,lat_0=clat,lon_0=clon, ax=ax,
epsg=epsg)
m.drawcoastlines()
m.fillcontinents(color='white', lake_color='paleturquoise')
m.drawparallels(np.arange(32, 38, 1.), labels=[1,0,0,1])
m.drawmeridians(np.arange(-120, -114, 1.), labels=[1,0,0,1])
m.drawmapboundary(fill_color='paleturquoise')
xpixels = 5000
service = 'World_Shaded_Relief'
ifaults = (fault_df.flat >= lat0)&(fault_df.flat <= lat1) & (
fault_df.flon >= lon0)&(fault_df.flon <= lon1)
for g, v in fault_df[ifaults].groupby('fseg'):
m.plot(v.flon.values,v.flat.values,'-k',lw=1.0,latlon=True)
lon = []
lat = []
for event in cat:
lon.append(event.origins[0].longitude)
lat.append(event.origins[0].latitude)
m.scatter(lon, lat, 0.5, marker='o', color='r', latlon=True, zorder=10)
stla = []
stlo = []
with open(params["station_file"], 'r') as f:
for line in f:
temp = line.split()
stla.append(float(temp[2]))
stlo.append(float(temp[3]))
m.scatter(stlo, stla, 50, marker='^', color='blue', latlon=True, zorder=10)
plt.tight_layout()
plt.savefig("detection_map.png", dpi=320)
plt.show()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("phaselink_plot.py control_file.json")
sys.exit()
with open(sys.argv[1], "r") as f:
params = json.load(f)
triggers = []
trig_meta = []
if params['plot_unassociated']:
print("Reading unassociated triggers...")
with open(params['gpd_file'], 'r') as f:
for line in f:
net, sta, phase, time, prob, dur = line.split()
if float(prob) < params['pr_min'] or \
float(dur) < params['trig_dur_min']:
continue
trig_type = 0
else:
trig_type = 1
triggers.append(obspy.UTCDateTime(time) - obspy.UTCDateTime(0))
trig_meta.append((net, sta, phase, obspy.UTCDateTime(time),
trig_type))
idx = np.argsort(triggers)
triggers = np.array([triggers[x] for x in idx])
trig_meta = [trig_meta[x] for x in idx]
print("Now building catalog")
nll_files = glob.glob("%s/*.*.*.*.*.hyp" % params['nlloc_loc_path'])
cat = obspy.Catalog()
for fname in nll_files:
try:
cat += obspy.read_events(fname)
except:
continue
random.shuffle(nll_files)
for event in cat:
print(event.preferred_origin().time)
print(cat)
print()
if params['plot_seismicity']:
plot_seismicity(cat, params)
for fname in nll_files:
cat = obspy.read_events(fname)
event = cat[0]
origin = event.preferred_origin()
origin_time = origin.time
print(event)
print(origin)
if params['plot_unassociated']:
trigs = get_unassociated_trigs(origin_time, triggers, trig_meta)
picks = {}
sta_order = []
dist_count = 0
for arrival in origin.arrivals:
pick = arrival.pick_id.get_referred_object()
sta = pick.waveform_id.station_code
phase = arrival.phase
time = pick.time
if abs(arrival.time_residual) > params['max_t_resid']:
flag = 1
else:
flag = 0
if sta not in picks:
picks[sta] = [(time, phase, flag)]
sta_order.append(sta)
else:
picks[sta].append((time, phase, flag))
fig, ax = plt.subplots(1,1,figsize=(30,30))
colors = {0: 'lime', 1: 'yellow'}
count = 0
for sta in sta_order:
st = obspy.read("%s/%04d/%03d/*.%s.*" % \
(params['wf_path'], origin_time.year, origin_time.julday, sta),
starttime=origin_time-60, endtime=origin_time+60)
st.detrend()
st.filter(type='bandpass', freqmin=3.0, freqmax=20)
for tr in st:
ax.plot(np.arange(tr.data.size)*tr.stats.delta, \
tr.data/np.max(tr.data) + count, c='k', lw=1)
ax.text(125, count, sta)
if params['plot_unassociated']:
if sta in trigs:
for pick, t_type in trigs[sta]:
ax.plot(pick-tr.stats.starttime, 0,
marker="|", c=colors[t_type])
for pick, phase, flag in picks[sta]:
if phase == 'P':
color = 'r'
else:
color = 'b'
ax.plot([pick-tr.stats.starttime, pick-tr.stats.starttime], [count-0.75, count+0.75], c=color)
count += 1
plt.show()
print()
| true | true |
1c366196414ee266797f5e1f3e4eaf2e17fa0b9f | 1,691 | py | Python | src/graph_transpiler/webdnn/graph/operators/tile.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | src/graph_transpiler/webdnn/graph/operators/tile.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/graph/operators/tile.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | from typing import Optional
import numpy as np
from webdnn.graph.axis import AxisKeyDict
from webdnn.graph.graph import Graph
from webdnn.graph.operator import Operator
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
class Tile(Operator):
"""Tile(name)
Repeat input variable.
Args:
name (str): Operator name.
multiplier (tuple of int): number of repeat
Signature
.. code::
y, = op(x)
- **x** - Input variable.
- **y** - Output variable.
"""
def __init__(self, name: Optional[str], multiplier: AxisKeyDict[int]):
super().__init__(name)
self.parameters["multiplier"] = multiplier
def __call__(self, x: Variable):
assert x.ndim == len(self.multiplier), f"""
[Tile] Number of multiplier must be same as # of dimension of x:
(x.ndim)={x.ndim}
(len(self.multiplier))={len(self.multiplier)}"""
y_shape = [self.multiplier[a] * x.shape_dict[a] for a in x.order.axes]
y = Variable(y_shape, x.order)
self.append_input("x", x)
self.append_output("y", y)
return y,
@property
def multiplier(self) -> AxisKeyDict[int]:
return self.parameters["multiplier"]
def fold_constance(self, graph: Graph):
x = self.inputs["x"] # type: ConstantVariable
y = self.outputs["y"]
new_y = ConstantVariable(np.tile(x.data, [self.multiplier[a] for a in x.order.axes]), x.order)
new_y.change_order(y.order)
OptimizeRule.replace_variable(graph, y, new_y)
self.remove_all()
| 28.183333 | 102 | 0.645772 | from typing import Optional
import numpy as np
from webdnn.graph.axis import AxisKeyDict
from webdnn.graph.graph import Graph
from webdnn.graph.operator import Operator
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
class Tile(Operator):
def __init__(self, name: Optional[str], multiplier: AxisKeyDict[int]):
super().__init__(name)
self.parameters["multiplier"] = multiplier
def __call__(self, x: Variable):
assert x.ndim == len(self.multiplier), f"""
[Tile] Number of multiplier must be same as # of dimension of x:
(x.ndim)={x.ndim}
(len(self.multiplier))={len(self.multiplier)}"""
y_shape = [self.multiplier[a] * x.shape_dict[a] for a in x.order.axes]
y = Variable(y_shape, x.order)
self.append_input("x", x)
self.append_output("y", y)
return y,
@property
def multiplier(self) -> AxisKeyDict[int]:
return self.parameters["multiplier"]
def fold_constance(self, graph: Graph):
x = self.inputs["x"]
y = self.outputs["y"]
new_y = ConstantVariable(np.tile(x.data, [self.multiplier[a] for a in x.order.axes]), x.order)
new_y.change_order(y.order)
OptimizeRule.replace_variable(graph, y, new_y)
self.remove_all()
| true | true |
1c3661c53d66366f54589d4511c69a8cf99eb1fb | 1,391 | py | Python | mentoring/migrations/0002_mentee.py | aiventimptner/horizon | 6e2436bfa81cad55fefd4c0bb67df3c36a9b6deb | [
"MIT"
] | null | null | null | mentoring/migrations/0002_mentee.py | aiventimptner/horizon | 6e2436bfa81cad55fefd4c0bb67df3c36a9b6deb | [
"MIT"
] | 1 | 2021-06-10T19:59:07.000Z | 2021-06-10T19:59:07.000Z | mentoring/migrations/0002_mentee.py | aiventimptner/horizon | 6e2436bfa81cad55fefd4c0bb67df3c36a9b6deb | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-04 12:12
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mentoring', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Mentee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254, unique=True, validators=[django.core.validators.EmailValidator()])),
('phone', models.CharField(max_length=20, validators=[django.core.validators.RegexValidator(message="Die Mobilnummer ist nur im Format '0123/456789' erlaubt.", regex='01\\d{2}\\/\\d{6,7}')])),
('address', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(message="Die Anschrift muss im Format 'Straße Nr., PLZ Stadt' angegeben werden.", regex='.*\\,\\s\\d{5}\\s\\w{3,}')])),
('created', models.DateTimeField(auto_now_add=True)),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.mentor')),
],
),
]
| 47.965517 | 230 | 0.635514 |
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mentoring', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Mentee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254, unique=True, validators=[django.core.validators.EmailValidator()])),
('phone', models.CharField(max_length=20, validators=[django.core.validators.RegexValidator(message="Die Mobilnummer ist nur im Format '0123/456789' erlaubt.", regex='01\\d{2}\\/\\d{6,7}')])),
('address', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(message="Die Anschrift muss im Format 'Straße Nr., PLZ Stadt' angegeben werden.", regex='.*\\,\\s\\d{5}\\s\\w{3,}')])),
('created', models.DateTimeField(auto_now_add=True)),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.mentor')),
],
),
]
| true | true |
1c366363cce7e6ca5b38f303f13a7ca147dfad65 | 996 | py | Python | tests/test_simsalabim.py | simklein/simsalabim | feade4cf0c95d89e9d845feda2b5c3693eceb5f0 | [
"MIT"
] | null | null | null | tests/test_simsalabim.py | simklein/simsalabim | feade4cf0c95d89e9d845feda2b5c3693eceb5f0 | [
"MIT"
] | null | null | null | tests/test_simsalabim.py | simklein/simsalabim | feade4cf0c95d89e9d845feda2b5c3693eceb5f0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `simsalabim` package."""
import pytest
from click.testing import CliRunner
# from simsalabim import simsalabim
from simsalabim import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'simsalabim.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 26.210526 | 78 | 0.707831 |
import pytest
from click.testing import CliRunner
from simsalabim import cli
@pytest.fixture
def response():
def test_content(response):
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'simsalabim.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| true | true |
1c36646167785fc30818342279b53c475754adb1 | 22,774 | py | Python | train.py | cds-mipt/yolact | d226620495f16314ff8f5dda57bca18de54e004b | [
"MIT"
] | null | null | null | train.py | cds-mipt/yolact | d226620495f16314ff8f5dda57bca18de54e004b | [
"MIT"
] | null | null | null | train.py | cds-mipt/yolact | d226620495f16314ff8f5dda57bca18de54e004b | [
"MIT"
] | null | null | null | from data import *
from utils.augmentations import SSDAugmentation, BaseTransform
from utils.functions import MovingAverage, SavePath
from utils.logger import Log
from utils import timer
from layers.modules import MultiBoxLoss
from yolact import Yolact
import os
import sys
import time
import math, random
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import datetime
# Oof
import eval as eval_script
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Yolact Training Script')
parser.add_argument('--batch_size', default=8, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from. If this is "interrupt"'\
', the model will resume training from the interrupt file.')
parser.add_argument('--start_iter', default=-1, type=int,
help='Resume training at this iter. If this is -1, the iteration will be'\
'determined from the file name.')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning_rate', default=None, type=float,
help='Initial learning rate. Leave as None to read this from the config.')
parser.add_argument('--momentum', default=None, type=float,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', '--weight_decay', default=None, type=float,
help='Weight decay for SGD. Leave as None to read this from the config.')
parser.add_argument('--gamma', default=None, type=float,
help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models.')
parser.add_argument('--log_folder', default='logs/',
help='Directory for saving logs.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--save_interval', default=50,type=int,
help='The number of iterations between saving the model.')
parser.add_argument('--validation_size', default=10000,type=int,
help='The number of images to use for validation.')
parser.add_argument('--validation_epochs', default=4, type=int,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--keep_latest', dest='keep_latest', action='store_true',
help='Only keep the latest checkpoint instead of each one.')
parser.add_argument('--keep_latest_interval', default=100000, type=int,
help='When --keep_latest is on, don\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--no_log', dest='log', action='store_false',
help='Don\'t log per iteration information into log_folder.')
parser.add_argument('--log_gpu', dest='log_gpu', action='store_true',
help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')
parser.add_argument('--no_interrupt', dest='interrupt', action='store_false',
help='Don\'t save an interrupt when KeyboardInterrupt is caught.')
parser.add_argument('--batch_alloc', default=None, type=str,
help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')
parser.add_argument('--no_autoscale', dest='autoscale', action='store_false',
help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')
parser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)
args = parser.parse_args()
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
if args.autoscale and args.batch_size != 8:
factor = args.batch_size / 8
print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))
cfg.lr *= factor
cfg.max_iter //= factor
cfg.lr_steps = [x // factor for x in cfg.lr_steps]
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
replace('gamma')
replace('momentum')
# This is managed by set_lr
cur_lr = args.lr
if torch.cuda.device_count() == 0:
print('No GPUs detected. Exiting...')
exit(-1)
if args.batch_size // torch.cuda.device_count() < 6:
print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')
cfg.freeze_bn = True
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S']
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
class NetLoss(nn.Module):
"""
A wrapper for running the network and computing the loss
This is so we can more efficiently use DataParallel.
"""
def __init__(self, net:Yolact, criterion:MultiBoxLoss):
super().__init__()
self.net = net
self.criterion = criterion
def forward(self, images, targets, masks, num_crowds):
preds = self.net(images)
return self.criterion(preds, targets, masks, num_crowds)
class CustomDataParallel(nn.DataParallel):
"""
This is a custom version of DataParallel that works better with our training data.
It should also be faster than the general case.
"""
def scatter(self, inputs, kwargs, device_ids):
# More like scatter and data prep at the same time. The point is we prep the data in such a way
# that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.
devices = ['cuda:' + str(x) for x in device_ids]
splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)
return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \
[kwargs] * len(devices)
def gather(self, outputs, output_device):
out = {}
for k in outputs[0]:
out[k] = torch.stack([output[k].to(output_device) for output in outputs])
return out
def train():
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=SSDAugmentation(MEANS))
if args.validation_epochs > 0:
setup_eval()
val_dataset = COCODetection(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
filename_log = args.log_folder+cfg.name+'.log'
AP_best = 0
# Parallel wraps the underlying module, but when saving and loading we don't want that
yolact_net = Yolact()
net = yolact_net
net.train()
if args.log:
log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),
overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)
# I don't use the timer during training (I use a different timing method).
# Apparently there's a race condition with multiple GPUs, so disable it just to be safe.
timer.disable_all()
# Both of these can set args.resume to None, so do them before the check
if args.resume == 'interrupt':
args.resume = SavePath.get_interrupt(args.save_folder)
elif args.resume == 'latest':
args.resume = SavePath.get_latest(args.save_folder, cfg.name)
if args.resume is not None:
print('Resuming training, loading {}...'.format(args.resume))
yolact_net.load_weights(args.resume)
if args.start_iter == -1:
args.start_iter = SavePath.from_str(args.resume).iteration
else:
print('Initializing weights...')
yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=cfg.ohem_negpos_ratio)
if args.batch_alloc is not None:
args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]
if sum(args.batch_alloc) != args.batch_size:
print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))
exit(-1)
net = CustomDataParallel(NetLoss(net, criterion))
if args.cuda:
net = net.cuda()
# Initialize everything
if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means
yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())
if not cfg.freeze_bn: yolact_net.freeze_bn(True)
# loss counters
loc_loss = 0
conf_loss = 0
iteration = max(args.start_iter, 0)
last_time = time.time()
epoch_size = len(dataset) // args.batch_size
num_epochs = math.ceil(cfg.max_iter / epoch_size)
best_AP = 0
# Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index
step_index = 0
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)
time_avg = MovingAverage()
global loss_types # Forms the print order
loss_avgs = { k: MovingAverage(100) for k in loss_types }
print('Begin training!')
print()
# try-except so you can use ctrl+c to save early and stop training
try:
for epoch in range(num_epochs):
# Resume from start_iter
compute_validation_map(epoch, iteration, yolact_net, val_dataset, None)
if (epoch+1)*epoch_size < iteration:
continue
for datum in data_loader:
# Stop if we've reached an epoch if we're resuming from start_iter
if iteration == (epoch+1)*epoch_size:
break
# Stop at the configured number of iterations even if mid-epoch
if iteration == cfg.max_iter:
break
# Change a config setting if we've reached the specified iteration
changed = False
for change in cfg.delayed_settings:
if iteration >= change[0]:
changed = True
cfg.replace(change[1])
# Reset the loss averages because things might have changed
for avg in loss_avgs:
avg.reset()
# If a config setting was changed, remove it from the list so we don't keep checking
if changed:
cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]
# Warm up by linearly interpolating the learning rate from some smaller value
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
# Adjust the learning rate at the given iterations, but also if we resume from past that iteration
while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
step_index += 1
set_lr(optimizer, args.lr * (args.gamma ** step_index))
# Zero the grad to get ready to compute gradients
optimizer.zero_grad()
# Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)
losses = net(datum)
losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel
loss = sum([losses[k] for k in losses])
# no_inf_mean removes some components from the loss, so make sure to backward through all of it
# all_loss = sum([v.mean() for v in losses.values()])
# Backprop
loss.backward() # Do this to free up vram even if loss is not finite
if torch.isfinite(loss).item():
optimizer.step()
# Add the loss to the moving average for bookkeeping
for k in losses:
loss_avgs[k].add(losses[k].item())
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
# Exclude graph setup from the timing information
if iteration != args.start_iter:
time_avg.add(elapsed)
if iteration % 10 == 0:
eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]
total = sum([loss_avgs[k].get_avg() for k in losses])
loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])
print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')
% tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)
if args.log:
precision = 5
loss_info = {k: round(losses[k].item(), precision) for k in losses}
loss_info['T'] = sum([round(losses[k].item(), precision) for k in losses])
if args.log_gpu:
log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow
log.log('train', loss=loss_info, epoch=epoch, iter=iteration,
lr=round(cur_lr, 10), elapsed=elapsed)
log.log_gpu_stats = args.log_gpu
iteration += 1
# if iteration % args.save_interval == 0 and iteration != args.start_iter:
# if args.keep_latest:
# latest = SavePath.get_latest(args.save_folder, cfg.name)
# print('Saving state, iter:', iteration)
# yolact_net.save_weights(save_path(epoch, iteration))
# if args.keep_latest and latest is not None:
# if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
# print('Deleting old save...')
# os.remove(latest)
# This is done per epoch
if args.validation_epochs > 0:
if epoch % args.validation_epochs == 0 and epoch > 0:
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
with open(filename_log) as f:
f = f.readlines()
record = f[-1]
record = record.replace('true','True')
record = record.replace('null','None')
record = record.replace('NaN','None')
record = record.replace('false','False')
record = record.replace('Infinity', 'np.inf')
record_dict = eval(record)
AP = record_dict['data']['box']['50']
if AP_best < AP:
AP_best = AP
if args.keep_latest:
latest = SavePath.get_latest(args.save_folder, cfg.name)
print('Saving state, iter:', iteration)
yolact_net.save_weights(save_path(epoch, iteration))
if args.keep_latest and latest is not None:
if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
print('Deleting old save...')
os.remove(latest)
# Compute validation mAP after training is finished
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
except KeyboardInterrupt:
if args.interrupt:
print('Stopping early. Saving network...')
# Delete previous copy of the interrupted network so we don't spam the weights folder
SavePath.remove_interrupt(args.save_folder)
yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))
exit()
yolact_net.save_weights(save_path(epoch, iteration))
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
global cur_lr
cur_lr = new_lr
def gradinator(x):
x.requires_grad = False
return x
def prepare_data(datum, devices:list=None, allocation:list=None):
with torch.no_grad():
if devices is None:
devices = ['cuda:0'] if args.cuda else ['cpu']
if allocation is None:
allocation = [args.batch_size // len(devices)] * (len(devices) - 1)
allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less
images, (targets, masks, num_crowds) = datum
cur_idx = 0
for device, alloc in zip(devices, allocation):
for _ in range(alloc):
images[cur_idx] = gradinator(images[cur_idx].to(device))
targets[cur_idx] = gradinator(targets[cur_idx].to(device))
masks[cur_idx] = gradinator(masks[cur_idx].to(device))
cur_idx += 1
if cfg.preserve_aspect_ratio:
# Choose a random size from the batch
_, h, w = images[random.randint(0, len(images)-1)].size()
for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):
images[idx], targets[idx], masks[idx], num_crowds[idx] \
= enforce_size(image, target, mask, num_crowd, w, h)
cur_idx = 0
split_images, split_targets, split_masks, split_numcrowds \
= [[None for alloc in allocation] for _ in range(4)]
for device_idx, alloc in enumerate(allocation):
split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)
split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]
split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]
split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]
cur_idx += alloc
return split_images, split_targets, split_masks, split_numcrowds
def no_inf_mean(x:torch.Tensor):
"""
Computes the mean of a vector, throwing out all inf values.
If there are no non-inf values, this will return inf (i.e., just the normal mean).
"""
no_inf = [a for a in x if torch.isfinite(a)]
if len(no_inf) > 0:
return sum(no_inf) / len(no_inf)
else:
return x.mean()
def compute_validation_loss(net, data_loader, criterion):
global loss_types
with torch.no_grad():
losses = {}
# Don't switch to eval mode because we want to get losses
iterations = 0
for datum in data_loader:
images, targets, masks, num_crowds = prepare_data(datum)
out = net(images)
wrapper = ScatterWrapper(targets, masks, num_crowds)
_losses = criterion(out, wrapper, wrapper.make_mask())
for k, v in _losses.items():
v = v.mean().item()
if k in losses:
losses[k] += v
else:
losses[k] = v
iterations += 1
if args.validation_size <= iterations * args.batch_size:
break
for k in losses:
losses[k] /= iterations
loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])
print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
with torch.no_grad():
yolact_net.eval()
start = time.time()
print()
print("Computing validation mAP (this may take a while)...", flush=True)
val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
end = time.time()
if log is not None:
log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)
yolact_net.train()
return 1
def setup_eval():
eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])
if __name__ == '__main__':
train()
| 43.461832 | 196 | 0.60029 | from data import *
from utils.augmentations import SSDAugmentation, BaseTransform
from utils.functions import MovingAverage, SavePath
from utils.logger import Log
from utils import timer
from layers.modules import MultiBoxLoss
from yolact import Yolact
import os
import sys
import time
import math, random
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import datetime
import eval as eval_script
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Yolact Training Script')
parser.add_argument('--batch_size', default=8, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from. If this is "interrupt"'\
', the model will resume training from the interrupt file.')
parser.add_argument('--start_iter', default=-1, type=int,
help='Resume training at this iter. If this is -1, the iteration will be'\
'determined from the file name.')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning_rate', default=None, type=float,
help='Initial learning rate. Leave as None to read this from the config.')
parser.add_argument('--momentum', default=None, type=float,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', '--weight_decay', default=None, type=float,
help='Weight decay for SGD. Leave as None to read this from the config.')
parser.add_argument('--gamma', default=None, type=float,
help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models.')
parser.add_argument('--log_folder', default='logs/',
help='Directory for saving logs.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--save_interval', default=50,type=int,
help='The number of iterations between saving the model.')
parser.add_argument('--validation_size', default=10000,type=int,
help='The number of images to use for validation.')
parser.add_argument('--validation_epochs', default=4, type=int,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--keep_latest', dest='keep_latest', action='store_true',
help='Only keep the latest checkpoint instead of each one.')
parser.add_argument('--keep_latest_interval', default=100000, type=int,
help='When --keep_latest is on, don\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--no_log', dest='log', action='store_false',
help='Don\'t log per iteration information into log_folder.')
parser.add_argument('--log_gpu', dest='log_gpu', action='store_true',
help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')
parser.add_argument('--no_interrupt', dest='interrupt', action='store_false',
help='Don\'t save an interrupt when KeyboardInterrupt is caught.')
parser.add_argument('--batch_alloc', default=None, type=str,
help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')
parser.add_argument('--no_autoscale', dest='autoscale', action='store_false',
help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')
parser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)
args = parser.parse_args()
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
if args.autoscale and args.batch_size != 8:
factor = args.batch_size / 8
print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))
cfg.lr *= factor
cfg.max_iter //= factor
cfg.lr_steps = [x // factor for x in cfg.lr_steps]
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
replace('gamma')
replace('momentum')
# This is managed by set_lr
cur_lr = args.lr
if torch.cuda.device_count() == 0:
print('No GPUs detected. Exiting...')
exit(-1)
if args.batch_size // torch.cuda.device_count() < 6:
print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')
cfg.freeze_bn = True
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S']
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
class NetLoss(nn.Module):
def __init__(self, net:Yolact, criterion:MultiBoxLoss):
super().__init__()
self.net = net
self.criterion = criterion
def forward(self, images, targets, masks, num_crowds):
preds = self.net(images)
return self.criterion(preds, targets, masks, num_crowds)
class CustomDataParallel(nn.DataParallel):
def scatter(self, inputs, kwargs, device_ids):
devices = ['cuda:' + str(x) for x in device_ids]
splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)
return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \
[kwargs] * len(devices)
def gather(self, outputs, output_device):
out = {}
for k in outputs[0]:
out[k] = torch.stack([output[k].to(output_device) for output in outputs])
return out
def train():
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=SSDAugmentation(MEANS))
if args.validation_epochs > 0:
setup_eval()
val_dataset = COCODetection(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
filename_log = args.log_folder+cfg.name+'.log'
AP_best = 0
# Parallel wraps the underlying module, but when saving and loading we don't want that
yolact_net = Yolact()
net = yolact_net
net.train()
if args.log:
log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),
overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)
# Apparently there's a race condition with multiple GPUs, so disable it just to be safe.
timer.disable_all()
if args.resume == 'interrupt':
args.resume = SavePath.get_interrupt(args.save_folder)
elif args.resume == 'latest':
args.resume = SavePath.get_latest(args.save_folder, cfg.name)
if args.resume is not None:
print('Resuming training, loading {}...'.format(args.resume))
yolact_net.load_weights(args.resume)
if args.start_iter == -1:
args.start_iter = SavePath.from_str(args.resume).iteration
else:
print('Initializing weights...')
yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=cfg.ohem_negpos_ratio)
if args.batch_alloc is not None:
args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]
if sum(args.batch_alloc) != args.batch_size:
print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))
exit(-1)
net = CustomDataParallel(NetLoss(net, criterion))
if args.cuda:
net = net.cuda()
if not cfg.freeze_bn: yolact_net.freeze_bn()
yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())
if not cfg.freeze_bn: yolact_net.freeze_bn(True)
# loss counters
loc_loss = 0
conf_loss = 0
iteration = max(args.start_iter, 0)
last_time = time.time()
epoch_size = len(dataset) // args.batch_size
num_epochs = math.ceil(cfg.max_iter / epoch_size)
best_AP = 0
# Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index
step_index = 0
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)
time_avg = MovingAverage()
global loss_types
loss_avgs = { k: MovingAverage(100) for k in loss_types }
print('Begin training!')
print()
try:
for epoch in range(num_epochs):
compute_validation_map(epoch, iteration, yolact_net, val_dataset, None)
if (epoch+1)*epoch_size < iteration:
continue
for datum in data_loader:
if iteration == (epoch+1)*epoch_size:
break
if iteration == cfg.max_iter:
break
changed = False
for change in cfg.delayed_settings:
if iteration >= change[0]:
changed = True
cfg.replace(change[1])
# Reset the loss averages because things might have changed
for avg in loss_avgs:
avg.reset()
# If a config setting was changed, remove it from the list so we don't keep checking
if changed:
cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
step_index += 1
set_lr(optimizer, args.lr * (args.gamma ** step_index))
optimizer.zero_grad()
losses = net(datum)
losses = { k: (v).mean() for k,v in losses.items() }
loss = sum([losses[k] for k in losses])
loss.backward()
if torch.isfinite(loss).item():
optimizer.step()
for k in losses:
loss_avgs[k].add(losses[k].item())
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
if iteration != args.start_iter:
time_avg.add(elapsed)
if iteration % 10 == 0:
eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]
total = sum([loss_avgs[k].get_avg() for k in losses])
loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])
print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')
% tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)
if args.log:
precision = 5
loss_info = {k: round(losses[k].item(), precision) for k in losses}
loss_info['T'] = sum([round(losses[k].item(), precision) for k in losses])
if args.log_gpu:
log.log_gpu_stats = (iteration % 10 == 0)
log.log('train', loss=loss_info, epoch=epoch, iter=iteration,
lr=round(cur_lr, 10), elapsed=elapsed)
log.log_gpu_stats = args.log_gpu
iteration += 1
if args.validation_epochs > 0:
if epoch % args.validation_epochs == 0 and epoch > 0:
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
with open(filename_log) as f:
f = f.readlines()
record = f[-1]
record = record.replace('true','True')
record = record.replace('null','None')
record = record.replace('NaN','None')
record = record.replace('false','False')
record = record.replace('Infinity', 'np.inf')
record_dict = eval(record)
AP = record_dict['data']['box']['50']
if AP_best < AP:
AP_best = AP
if args.keep_latest:
latest = SavePath.get_latest(args.save_folder, cfg.name)
print('Saving state, iter:', iteration)
yolact_net.save_weights(save_path(epoch, iteration))
if args.keep_latest and latest is not None:
if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
print('Deleting old save...')
os.remove(latest)
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
except KeyboardInterrupt:
if args.interrupt:
print('Stopping early. Saving network...')
SavePath.remove_interrupt(args.save_folder)
yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))
exit()
yolact_net.save_weights(save_path(epoch, iteration))
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
global cur_lr
cur_lr = new_lr
def gradinator(x):
x.requires_grad = False
return x
def prepare_data(datum, devices:list=None, allocation:list=None):
with torch.no_grad():
if devices is None:
devices = ['cuda:0'] if args.cuda else ['cpu']
if allocation is None:
allocation = [args.batch_size // len(devices)] * (len(devices) - 1)
allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less
images, (targets, masks, num_crowds) = datum
cur_idx = 0
for device, alloc in zip(devices, allocation):
for _ in range(alloc):
images[cur_idx] = gradinator(images[cur_idx].to(device))
targets[cur_idx] = gradinator(targets[cur_idx].to(device))
masks[cur_idx] = gradinator(masks[cur_idx].to(device))
cur_idx += 1
if cfg.preserve_aspect_ratio:
# Choose a random size from the batch
_, h, w = images[random.randint(0, len(images)-1)].size()
for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):
images[idx], targets[idx], masks[idx], num_crowds[idx] \
= enforce_size(image, target, mask, num_crowd, w, h)
cur_idx = 0
split_images, split_targets, split_masks, split_numcrowds \
= [[None for alloc in allocation] for _ in range(4)]
for device_idx, alloc in enumerate(allocation):
split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)
split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]
split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]
split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]
cur_idx += alloc
return split_images, split_targets, split_masks, split_numcrowds
def no_inf_mean(x:torch.Tensor):
no_inf = [a for a in x if torch.isfinite(a)]
if len(no_inf) > 0:
return sum(no_inf) / len(no_inf)
else:
return x.mean()
def compute_validation_loss(net, data_loader, criterion):
global loss_types
with torch.no_grad():
losses = {}
# Don't switch to eval mode because we want to get losses
iterations = 0
for datum in data_loader:
images, targets, masks, num_crowds = prepare_data(datum)
out = net(images)
wrapper = ScatterWrapper(targets, masks, num_crowds)
_losses = criterion(out, wrapper, wrapper.make_mask())
for k, v in _losses.items():
v = v.mean().item()
if k in losses:
losses[k] += v
else:
losses[k] = v
iterations += 1
if args.validation_size <= iterations * args.batch_size:
break
for k in losses:
losses[k] /= iterations
loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])
print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
with torch.no_grad():
yolact_net.eval()
start = time.time()
print()
print("Computing validation mAP (this may take a while)...", flush=True)
val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
end = time.time()
if log is not None:
log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)
yolact_net.train()
return 1
def setup_eval():
eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])
if __name__ == '__main__':
train()
| true | true |
1c3664e5162aa7eecfb5a5efb953455e29eb37a7 | 453 | py | Python | src/prefect/engine/executors/local.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 8,633 | 2019-03-23T17:51:03.000Z | 2022-03-31T22:17:42.000Z | src/prefect/engine/executors/local.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 3,903 | 2019-03-23T19:11:21.000Z | 2022-03-31T23:21:23.000Z | src/prefect/engine/executors/local.py | ngriffiths13/prefect | 7f5613abcb182494b7dc12159277c3bc5f3c9898 | [
"Apache-2.0"
] | 937 | 2019-03-23T18:49:44.000Z | 2022-03-31T21:45:13.000Z | import warnings
from typing import Any
from prefect.executors import LocalExecutor as _LocalExecutor
class LocalExecutor(_LocalExecutor):
def __new__(cls, *args: Any, **kwargs: Any) -> "LocalExecutor":
warnings.warn(
"prefect.engine.executors.LocalExecutor has been moved to "
"`prefect.executors.LocalExecutor`, please update your imports",
stacklevel=2,
)
return super().__new__(cls)
| 30.2 | 76 | 0.677704 | import warnings
from typing import Any
from prefect.executors import LocalExecutor as _LocalExecutor
class LocalExecutor(_LocalExecutor):
def __new__(cls, *args: Any, **kwargs: Any) -> "LocalExecutor":
warnings.warn(
"prefect.engine.executors.LocalExecutor has been moved to "
"`prefect.executors.LocalExecutor`, please update your imports",
stacklevel=2,
)
return super().__new__(cls)
| true | true |
1c3665afa95ea066b427ed9ea62b8f525c9d89a6 | 715 | py | Python | filter_balanced.py | rymoah/ca-boolfun-construction | 9ee26fb4ac28076143a9929d40b4e6ad520e98ff | [
"MIT"
] | null | null | null | filter_balanced.py | rymoah/ca-boolfun-construction | 9ee26fb4ac28076143a9929d40b4e6ad520e98ff | [
"MIT"
] | null | null | null | filter_balanced.py | rymoah/ca-boolfun-construction | 9ee26fb4ac28076143a9929d40b4e6ad520e98ff | [
"MIT"
] | null | null | null | import os
for filename in os.listdir('experiments/enumeration_quadratic_rules'):
with open('experiments/enumeration_quadratic_rules'+filename, 'r') as fin, \
open('filtered_'+filename, 'w') as fout:
lines = fin.readlines()
for i, line in enumerate(lines):
if 'Rule' in line:
rule_ok = True
line_idx = i+1
while rule_ok and line_idx < len(lines) and 'Size' in lines[line_idx]:
if r'BAL: false' in lines[line_idx]:
rule_ok = False
line_idx += 1
if rule_ok:
fout.write(lines[i])
| 26.481481 | 86 | 0.495105 | import os
for filename in os.listdir('experiments/enumeration_quadratic_rules'):
with open('experiments/enumeration_quadratic_rules'+filename, 'r') as fin, \
open('filtered_'+filename, 'w') as fout:
lines = fin.readlines()
for i, line in enumerate(lines):
if 'Rule' in line:
rule_ok = True
line_idx = i+1
while rule_ok and line_idx < len(lines) and 'Size' in lines[line_idx]:
if r'BAL: false' in lines[line_idx]:
rule_ok = False
line_idx += 1
if rule_ok:
fout.write(lines[i])
| true | true |
1c3665dff7a844f4cbbf9449c0c621ea7723dd7a | 786 | py | Python | board_import.py | Yamnasm/takuzu | 1d5fee99c220273c229466d33126d06b18b3b188 | [
"MIT"
] | null | null | null | board_import.py | Yamnasm/takuzu | 1d5fee99c220273c229466d33126d06b18b3b188 | [
"MIT"
] | null | null | null | board_import.py | Yamnasm/takuzu | 1d5fee99c220273c229466d33126d06b18b3b188 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
class Tile:
EMPTY = 0
RED = 1
BLUE = 2
def get_board_from_html():
def get_tile_status(cell):
status = cell.div.get("class")[1]
if status == "tile-":
return Tile.EMPTY
elif status == "tile-1":
return Tile.RED
elif status == "tile-2":
return Tile.BLUE
with open("0h h1.html") as file:
data = file.read()
soup = BeautifulSoup(data, features="html.parser")
rows = soup.find("table", {"id":"grid"}).find_all("tr")
board = []
for row in rows:
cells = row.find_all("td")
data_row = [get_tile_status(cell) for cell in cells]
board.append(data_row)
return(board)
if __name__ == "__main__":
get_board_from_html() | 21.833333 | 60 | 0.571247 | from bs4 import BeautifulSoup
class Tile:
EMPTY = 0
RED = 1
BLUE = 2
def get_board_from_html():
def get_tile_status(cell):
status = cell.div.get("class")[1]
if status == "tile-":
return Tile.EMPTY
elif status == "tile-1":
return Tile.RED
elif status == "tile-2":
return Tile.BLUE
with open("0h h1.html") as file:
data = file.read()
soup = BeautifulSoup(data, features="html.parser")
rows = soup.find("table", {"id":"grid"}).find_all("tr")
board = []
for row in rows:
cells = row.find_all("td")
data_row = [get_tile_status(cell) for cell in cells]
board.append(data_row)
return(board)
if __name__ == "__main__":
get_board_from_html() | true | true |
1c366600ed4daefb88da8e0ff791d88dea78663d | 9,737 | py | Python | mkt/webapps/tests/test_tasks.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/webapps/tests/test_tasks.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/webapps/tests/test_tasks.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | import json
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
import mock
from nose.tools import eq_
import amo
import amo.tests
from editors.models import RereviewQueue
from files.models import FileUpload
from users.models import UserProfile
from mkt.developers.models import ActivityLog
from mkt.webapps.models import Webapp
from mkt.webapps.tasks import update_manifests
original = {
"version": "0.1",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"16": "http://test.com/icon-16.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
}
new = {
"version": "1.0",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"16": "http://test.com/icon-16.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
}
ohash = ('sha256:'
'fc11fba25f251d64343a7e8da4dfd812a57a121e61eb53c78c567536ab39b10d')
nhash = ('sha256:'
'409fbe87dca5a4a7937e3dea27b69cb3a3d68caf39151585aef0c7ab46d8ee1e')
class TestUpdateManifest(amo.tests.TestCase):
fixtures = ('base/platforms',)
def setUp(self):
UserProfile.objects.get_or_create(id=settings.TASK_USER_ID)
self.addon = amo.tests.app_factory()
self.version = self.addon.versions.latest()
self.file = self.version.files.latest()
self.file.update(hash=ohash)
ActivityLog.objects.all().delete()
with storage.open(self.file.file_path, 'w') as fh:
fh.write(json.dumps(original))
# This is the hash to set the get_content_hash to, for showing
# that the webapp has been updated.
self._hash = nhash
self.new = new.copy()
urlopen_patch = mock.patch('urllib2.urlopen')
self.urlopen_mock = urlopen_patch.start()
self.addCleanup(urlopen_patch.stop)
self.response_mock = mock.Mock()
self.response_mock.read.return_value = self._data()
self.response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = self.response_mock
@mock.patch('mkt.webapps.tasks._get_content_hash')
def _run(self, _get_content_hash, **kw):
# Will run the task and will act depending upon how you've set hash.
_get_content_hash.return_value = self._hash
update_manifests(ids=(self.addon.pk,), **kw)
def _data(self):
return json.dumps(self.new)
@mock.patch('mkt.webapps.models.copy_stored_file')
def test_new_version_not_created(self, _copy_stored_file):
# Test that update_manifest doesn't create multiple versions/files.
eq_(self.addon.versions.count(), 1)
old_version = self.addon.current_version
old_file = self.addon.get_latest_file()
self._run()
new = Webapp.objects.get(pk=self.addon.pk)
version = new.current_version
file = new.get_latest_file()
# Test that our new version looks good
eq_(new.versions.count(), 1)
assert version == old_version, 'Version created'
assert file == old_file, 'File created'
path = FileUpload.objects.all()[0].path
_copy_stored_file.assert_called_with(path,
os.path.join(version.path_prefix,
file.filename))
def test_version_updated(self):
self._run()
self.new['version'] = '1.1'
self.response_mock.read.return_value = self._data()
self._hash = 'foo'
self._run()
new = Webapp.objects.get(pk=self.addon.pk)
eq_(new.versions.latest().version, '1.1')
def test_not_log(self):
self._hash = ohash
self._run()
eq_(ActivityLog.objects.for_apps(self.addon).count(), 0)
def test_log(self):
self._run()
eq_(ActivityLog.objects.for_apps(self.addon).count(), 1)
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_not_webapp(self, mock_):
self.addon.update(type=amo.ADDON_EXTENSION)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_pending(self, mock_):
self.addon.update(status=amo.STATUS_PENDING)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_disabled(self, mock_):
self.addon.update(status=amo.STATUS_DISABLED)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_packaged(self, mock_):
self.addon.update(is_packaged=True)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_get_webapp(self, mock_):
eq_(self.addon.status, amo.STATUS_PUBLIC)
call_command('process_addons', task='update_manifests')
assert mock_.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_update_manifest(self, retry, fetch):
def f(self):
return '{}'
fetch.side_effect = f
update_manifests(ids=(self.addon.pk,))
assert not retry.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_fail(self, retry, fetch):
def die(self):
raise RuntimeError()
fetch.side_effect = die
update_manifests(ids=(self.addon.pk,))
retry.assert_called_with(
args=([self.addon.pk,],),
kwargs={'check_hash': True,
'retries': {self.addon.pk: 1}},
countdown=3600)
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_3x_fail(self, retry, fetch):
def die(self):
raise RuntimeError()
fetch.side_effect = die
update_manifests(ids=(self.addon.pk,), retries={self.addon.pk: 2})
assert not retry.called
assert RereviewQueue.objects.filter(addon=self.addon).exists()
@mock.patch('mkt.webapps.tasks._open_manifest')
def test_manifest_name_change_rereview(self, open_manifest):
# Mock original manifest file lookup.
open_manifest.return_value = original
# Mock new manifest with name change.
n = new.copy()
n['name'] = 'Mozilla Ball Ultimate Edition'
response_mock = mock.Mock()
response_mock.read.return_value = json.dumps(n)
response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = response_mock
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps(self.addon).count(), 2)
@mock.patch.object(settings, 'SITE_URL', 'http://test')
@mock.patch('mkt.webapps.tasks._open_manifest')
def test_validation_error_logs(self, open_manifest):
self.skip_if_disabled(settings.REGION_STORES)
# Mock original manifest file lookup.
open_manifest.return_value = original
# Mock new manifest with name change.
n = new.copy()
n['locales'] = 'en-US'
response_mock = mock.Mock()
response_mock.read.return_value = json.dumps(n)
response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = response_mock
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
assert 'http://test/developers/upload' in ''.join(
[a._details for a in ActivityLog.objects.for_apps(self.addon)])
eq_(ActivityLog.objects.for_apps(self.addon).count(), 1)
# Test we don't add app to re-review queue twice.
self._run()
eq_(RereviewQueue.objects.count(), 1)
@mock.patch('mkt.webapps.tasks._open_manifest')
def test_force_rereview(self, open_manifest):
# Mock original manifest file lookup.
open_manifest.return_value = original
# Mock new manifest with name change.
n = new.copy()
n['name'] = 'Mozilla Ball Ultimate Edition'
response_mock = mock.Mock()
response_mock.read.return_value = json.dumps(n)
response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = response_mock
# We're setting the hash to the same value.
self.file.update(hash=nhash)
eq_(RereviewQueue.objects.count(), 0)
self._run(check_hash=False)
# We should still get a rereview since we bypassed the manifest check.
eq_(RereviewQueue.objects.count(), 1)
# 2 logs: 1 for manifest update, 1 for re-review trigger.
eq_(ActivityLog.objects.for_apps(self.addon).count(), 2)
| 36.062963 | 78 | 0.642087 | import json
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
import mock
from nose.tools import eq_
import amo
import amo.tests
from editors.models import RereviewQueue
from files.models import FileUpload
from users.models import UserProfile
from mkt.developers.models import ActivityLog
from mkt.webapps.models import Webapp
from mkt.webapps.tasks import update_manifests
original = {
"version": "0.1",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"16": "http://test.com/icon-16.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
}
new = {
"version": "1.0",
"name": "MozillaBall",
"description": "Exciting Open Web development action!",
"icons": {
"16": "http://test.com/icon-16.png",
"48": "http://test.com/icon-48.png",
"128": "http://test.com/icon-128.png"
},
"installs_allowed_from": [
"*",
],
}
ohash = ('sha256:'
'fc11fba25f251d64343a7e8da4dfd812a57a121e61eb53c78c567536ab39b10d')
nhash = ('sha256:'
'409fbe87dca5a4a7937e3dea27b69cb3a3d68caf39151585aef0c7ab46d8ee1e')
class TestUpdateManifest(amo.tests.TestCase):
fixtures = ('base/platforms',)
def setUp(self):
UserProfile.objects.get_or_create(id=settings.TASK_USER_ID)
self.addon = amo.tests.app_factory()
self.version = self.addon.versions.latest()
self.file = self.version.files.latest()
self.file.update(hash=ohash)
ActivityLog.objects.all().delete()
with storage.open(self.file.file_path, 'w') as fh:
fh.write(json.dumps(original))
self._hash = nhash
self.new = new.copy()
urlopen_patch = mock.patch('urllib2.urlopen')
self.urlopen_mock = urlopen_patch.start()
self.addCleanup(urlopen_patch.stop)
self.response_mock = mock.Mock()
self.response_mock.read.return_value = self._data()
self.response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = self.response_mock
@mock.patch('mkt.webapps.tasks._get_content_hash')
def _run(self, _get_content_hash, **kw):
_get_content_hash.return_value = self._hash
update_manifests(ids=(self.addon.pk,), **kw)
def _data(self):
return json.dumps(self.new)
@mock.patch('mkt.webapps.models.copy_stored_file')
def test_new_version_not_created(self, _copy_stored_file):
# Test that update_manifest doesn't create multiple versions/files.
eq_(self.addon.versions.count(), 1)
old_version = self.addon.current_version
old_file = self.addon.get_latest_file()
self._run()
new = Webapp.objects.get(pk=self.addon.pk)
version = new.current_version
file = new.get_latest_file()
eq_(new.versions.count(), 1)
assert version == old_version, 'Version created'
assert file == old_file, 'File created'
path = FileUpload.objects.all()[0].path
_copy_stored_file.assert_called_with(path,
os.path.join(version.path_prefix,
file.filename))
def test_version_updated(self):
self._run()
self.new['version'] = '1.1'
self.response_mock.read.return_value = self._data()
self._hash = 'foo'
self._run()
new = Webapp.objects.get(pk=self.addon.pk)
eq_(new.versions.latest().version, '1.1')
def test_not_log(self):
self._hash = ohash
self._run()
eq_(ActivityLog.objects.for_apps(self.addon).count(), 0)
def test_log(self):
self._run()
eq_(ActivityLog.objects.for_apps(self.addon).count(), 1)
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_not_webapp(self, mock_):
self.addon.update(type=amo.ADDON_EXTENSION)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_pending(self, mock_):
self.addon.update(status=amo.STATUS_PENDING)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_disabled(self, mock_):
self.addon.update(status=amo.STATUS_DISABLED)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_ignore_packaged(self, mock_):
self.addon.update(is_packaged=True)
call_command('process_addons', task='update_manifests')
assert not mock_.called
@mock.patch('mkt.webapps.tasks._update_manifest')
def test_get_webapp(self, mock_):
eq_(self.addon.status, amo.STATUS_PUBLIC)
call_command('process_addons', task='update_manifests')
assert mock_.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_update_manifest(self, retry, fetch):
def f(self):
return '{}'
fetch.side_effect = f
update_manifests(ids=(self.addon.pk,))
assert not retry.called
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_fail(self, retry, fetch):
def die(self):
raise RuntimeError()
fetch.side_effect = die
update_manifests(ids=(self.addon.pk,))
retry.assert_called_with(
args=([self.addon.pk,],),
kwargs={'check_hash': True,
'retries': {self.addon.pk: 1}},
countdown=3600)
@mock.patch('mkt.webapps.tasks._fetch_manifest')
@mock.patch('mkt.webapps.tasks.update_manifests.retry')
def test_manifest_fetch_3x_fail(self, retry, fetch):
def die(self):
raise RuntimeError()
fetch.side_effect = die
update_manifests(ids=(self.addon.pk,), retries={self.addon.pk: 2})
assert not retry.called
assert RereviewQueue.objects.filter(addon=self.addon).exists()
@mock.patch('mkt.webapps.tasks._open_manifest')
def test_manifest_name_change_rereview(self, open_manifest):
open_manifest.return_value = original
n = new.copy()
n['name'] = 'Mozilla Ball Ultimate Edition'
response_mock = mock.Mock()
response_mock.read.return_value = json.dumps(n)
response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = response_mock
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
eq_(ActivityLog.objects.for_apps(self.addon).count(), 2)
@mock.patch.object(settings, 'SITE_URL', 'http://test')
@mock.patch('mkt.webapps.tasks._open_manifest')
def test_validation_error_logs(self, open_manifest):
self.skip_if_disabled(settings.REGION_STORES)
open_manifest.return_value = original
n = new.copy()
n['locales'] = 'en-US'
response_mock = mock.Mock()
response_mock.read.return_value = json.dumps(n)
response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = response_mock
eq_(RereviewQueue.objects.count(), 0)
self._run()
eq_(RereviewQueue.objects.count(), 1)
assert 'http://test/developers/upload' in ''.join(
[a._details for a in ActivityLog.objects.for_apps(self.addon)])
eq_(ActivityLog.objects.for_apps(self.addon).count(), 1)
self._run()
eq_(RereviewQueue.objects.count(), 1)
@mock.patch('mkt.webapps.tasks._open_manifest')
def test_force_rereview(self, open_manifest):
# Mock original manifest file lookup.
open_manifest.return_value = original
# Mock new manifest with name change.
n = new.copy()
n['name'] = 'Mozilla Ball Ultimate Edition'
response_mock = mock.Mock()
response_mock.read.return_value = json.dumps(n)
response_mock.headers = {
'Content-Type': 'application/x-web-app-manifest+json'}
self.urlopen_mock.return_value = response_mock
# We're setting the hash to the same value.
self.file.update(hash=nhash)
eq_(RereviewQueue.objects.count(), 0)
self._run(check_hash=False)
eq_(RereviewQueue.objects.count(), 1)
eq_(ActivityLog.objects.for_apps(self.addon).count(), 2)
| true | true |
1c36664dd33c8065af344d7f36342e5482578b9f | 1,327 | py | Python | qiling/examples/simple_efi_x8664.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | qiling/examples/simple_efi_x8664.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | qiling/examples/simple_efi_x8664.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | #!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import pickle
import sys
sys.path.append("..")
from qiling import Qiling
from qiling.const import QL_INTERCEPT, QL_VERBOSE
from qiling.os.uefi.const import EFI_SUCCESS, EFI_INVALID_PARAMETER
from qiling.os.uefi.utils import check_and_notify_protocols, signal_event
def force_notify_RegisterProtocolNotify(ql: Qiling, address: int, params):
event_id = params['Event']
if event_id in ql.loader.events:
# let's force notify
event = ql.loader.events[event_id]
event['Guid'] = params["Protocol"]
event["Set"] = False
signal_event(ql, event_id)
check_and_notify_protocols(ql, True)
return EFI_SUCCESS
return EFI_INVALID_PARAMETER
def my_onenter(ql: Qiling, address: int, params):
print(f'[onenter] CopyMem : params = {params}')
return address, params
if __name__ == "__main__":
with open("rootfs/x8664_efi/rom2_nvar.pickel", 'rb') as f:
env = pickle.load(f)
ql = Qiling(["rootfs/x8664_efi/bin/TcgPlatformSetupPolicy"], "rootfs/x8664_efi", env=env, verbose=QL_VERBOSE.DEBUG)
ql.set_api("RegisterProtocolNotify", force_notify_RegisterProtocolNotify)
ql.set_api("CopyMem", my_onenter, QL_INTERCEPT.ENTER)
ql.run()
| 28.234043 | 119 | 0.719668 |
import pickle
import sys
sys.path.append("..")
from qiling import Qiling
from qiling.const import QL_INTERCEPT, QL_VERBOSE
from qiling.os.uefi.const import EFI_SUCCESS, EFI_INVALID_PARAMETER
from qiling.os.uefi.utils import check_and_notify_protocols, signal_event
def force_notify_RegisterProtocolNotify(ql: Qiling, address: int, params):
event_id = params['Event']
if event_id in ql.loader.events:
event = ql.loader.events[event_id]
event['Guid'] = params["Protocol"]
event["Set"] = False
signal_event(ql, event_id)
check_and_notify_protocols(ql, True)
return EFI_SUCCESS
return EFI_INVALID_PARAMETER
def my_onenter(ql: Qiling, address: int, params):
print(f'[onenter] CopyMem : params = {params}')
return address, params
if __name__ == "__main__":
with open("rootfs/x8664_efi/rom2_nvar.pickel", 'rb') as f:
env = pickle.load(f)
ql = Qiling(["rootfs/x8664_efi/bin/TcgPlatformSetupPolicy"], "rootfs/x8664_efi", env=env, verbose=QL_VERBOSE.DEBUG)
ql.set_api("RegisterProtocolNotify", force_notify_RegisterProtocolNotify)
ql.set_api("CopyMem", my_onenter, QL_INTERCEPT.ENTER)
ql.run()
| true | true |
1c36690f65fbbbe58293b8db6f331e0ffc08a85f | 6,698 | py | Python | hs_core/management/commands/ingest_irods_files.py | hydroshare/hydroshare | bf9888bbe61507aff070b1dfcec2fdec1921468d | [
"BSD-3-Clause"
] | 178 | 2015-01-08T23:03:36.000Z | 2022-03-03T13:56:45.000Z | hs_core/management/commands/ingest_irods_files.py | hydroshare/hydroshare | bf9888bbe61507aff070b1dfcec2fdec1921468d | [
"BSD-3-Clause"
] | 4,125 | 2015-01-01T14:26:15.000Z | 2022-03-31T16:38:55.000Z | hs_core/management/commands/ingest_irods_files.py | hydroshare/hydroshare | bf9888bbe61507aff070b1dfcec2fdec1921468d | [
"BSD-3-Clause"
] | 53 | 2015-03-15T17:56:51.000Z | 2022-03-17T00:32:16.000Z | # -*- coding: utf-8 -*-
"""
Check synchronization between iRODS and Django
This checks that every file in IRODS corresponds to a ResourceFile in Django.
If a file in iRODS is not present in Django, it attempts to register that file in Django.
* By default, prints errors on stdout.
* Optional argument --log instead logs output to system log.
"""
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.management.utils import ingest_irods_files
import logging
class Command(BaseCommand):
help = "Check existence of proper Django metadata."
def add_arguments(self, parser):
# a list of resource id's, or none to check all resources
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--log',
action='store_true', # True for presence, False for absence
dest='log', # value is options['log']
help='log errors to system log',
)
def handle(self, *args, **options):
logger = logging.getLogger(__name__)
log_errors = options['log']
echo_errors = not options['log']
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
try:
r = BaseResource.objects.get(short_id=rid)
except BaseResource.DoesNotExist:
msg = "Resource with id {} not found in Django Resources".format(rid)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
continue # next resource
# Pabitra: Not sure why are we skipping other resource types
# Alva: cannot preserve file integrity constraints for other file types.
if r.resource_type != 'CompositeResource' and \
r.resource_type != 'GenericResource' and \
r.resource_type != 'ModelInstanceResource' and \
r.resource_type != 'ModelProgramResource':
print(("resource {} has type {}: skipping".format(r.short_id,
r.resource_type)))
else:
print("LOOKING FOR UNREGISTERED IRODS FILES FOR RESOURCE {} (current files {})"
.format(rid, str(r.files.all().count())))
# get the typed resource
try:
resource = r.get_content_model()
except Exception as e:
msg = "resource {} has no proxy resource: {}"\
.format(r.short_id, e.value)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
msg = "... affected resource {} has type {}, title '{}'"\
.format(r.short_id, r.resource_type, r.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
continue
_, count = ingest_irods_files(resource,
logger,
stop_on_error=False,
echo_errors=not options['log'],
log_errors=options['log'],
return_errors=False)
if count:
msg = "... affected resource {} has type {}, title '{}'"\
.format(resource.short_id, resource.resource_type,
resource.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
else: # check all resources
print("LOOKING FOR UNREGISTERED IRODS FILES FOR ALL RESOURCES")
for r in BaseResource.objects.all():
# Pabitra: Not sure why are we skipping other resource types
# Alva: cannot preserve file integrity constraints for other file types.
if r.resource_type == 'CompositeResource' or \
r.resource_type == 'GenericResource' or \
r.resource_type == 'ModelInstanceResource' or \
r.resource_type == 'ModelProgramResource':
print("LOOKING FOR UNREGISTERED IRODS FILES FOR RESOURCE {} (current files {})"
.format(r.short_id, str(r.files.all().count())))
try:
# get the typed resource
resource = r.get_content_model()
except Exception as e:
msg = "resource {} has no proxy resource: {}"\
.format(r.short_id, e.value)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
msg = "... affected resource {} has type {}, title '{}'"\
.format(r.short_id, r.resource_type, r.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
continue # next resource
_, count = ingest_irods_files(resource,
logger,
stop_on_error=False,
echo_errors=not options['log'],
log_errors=options['log'],
return_errors=False)
if count:
msg = "... affected resource {} has type {}, title '{}'"\
.format(resource.short_id, resource.resource_type, resource.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
else:
print("resource {} has type {}: skipping".format(r.short_id, r.resource_type))
| 46.193103 | 99 | 0.457599 |
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.management.utils import ingest_irods_files
import logging
class Command(BaseCommand):
help = "Check existence of proper Django metadata."
def add_arguments(self, parser):
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--log',
action='store_true', # True for presence, False for absence
dest='log', # value is options['log']
help='log errors to system log',
)
def handle(self, *args, **options):
logger = logging.getLogger(__name__)
log_errors = options['log']
echo_errors = not options['log']
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
try:
r = BaseResource.objects.get(short_id=rid)
except BaseResource.DoesNotExist:
msg = "Resource with id {} not found in Django Resources".format(rid)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
continue # next resource
# Pabitra: Not sure why are we skipping other resource types
# Alva: cannot preserve file integrity constraints for other file types.
if r.resource_type != 'CompositeResource' and \
r.resource_type != 'GenericResource' and \
r.resource_type != 'ModelInstanceResource' and \
r.resource_type != 'ModelProgramResource':
print(("resource {} has type {}: skipping".format(r.short_id,
r.resource_type)))
else:
print("LOOKING FOR UNREGISTERED IRODS FILES FOR RESOURCE {} (current files {})"
.format(rid, str(r.files.all().count())))
# get the typed resource
try:
resource = r.get_content_model()
except Exception as e:
msg = "resource {} has no proxy resource: {}"\
.format(r.short_id, e.value)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
msg = "... affected resource {} has type {}, title '{}'"\
.format(r.short_id, r.resource_type, r.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
continue
_, count = ingest_irods_files(resource,
logger,
stop_on_error=False,
echo_errors=not options['log'],
log_errors=options['log'],
return_errors=False)
if count:
msg = "... affected resource {} has type {}, title '{}'"\
.format(resource.short_id, resource.resource_type,
resource.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
else: # check all resources
print("LOOKING FOR UNREGISTERED IRODS FILES FOR ALL RESOURCES")
for r in BaseResource.objects.all():
# Pabitra: Not sure why are we skipping other resource types
# Alva: cannot preserve file integrity constraints for other file types.
if r.resource_type == 'CompositeResource' or \
r.resource_type == 'GenericResource' or \
r.resource_type == 'ModelInstanceResource' or \
r.resource_type == 'ModelProgramResource':
print("LOOKING FOR UNREGISTERED IRODS FILES FOR RESOURCE {} (current files {})"
.format(r.short_id, str(r.files.all().count())))
try:
# get the typed resource
resource = r.get_content_model()
except Exception as e:
msg = "resource {} has no proxy resource: {}"\
.format(r.short_id, e.value)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
msg = "... affected resource {} has type {}, title '{}'"\
.format(r.short_id, r.resource_type, r.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
continue # next resource
_, count = ingest_irods_files(resource,
logger,
stop_on_error=False,
echo_errors=not options['log'],
log_errors=options['log'],
return_errors=False)
if count:
msg = "... affected resource {} has type {}, title '{}'"\
.format(resource.short_id, resource.resource_type, resource.title)
if log_errors:
logger.info(msg)
if echo_errors:
print(msg)
else:
print("resource {} has type {}: skipping".format(r.short_id, r.resource_type))
| true | true |
1c3669e33b3ef46d3630161bba99f3778ca71c59 | 1,557 | py | Python | lib/googlecloudsdk/api_lib/app/flags.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/app/flags.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/app/flags.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | 1 | 2020-07-24T21:52:25.000Z | 2020-07-24T21:52:25.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds common flags used by the gcloud app commands."""
import argparse
from googlecloudsdk.calliope import base
SERVER_FLAG = base.Argument(
'--server',
help=argparse.SUPPRESS)
VERSION_FLAG = base.Argument(
'--version',
required=True,
help='The version of the app that you want to operate on.')
# TODO(user): Add service globbing.
MODULES_ARG = base.Argument(
'modules',
nargs='+',
help='One or more service names to perform this action on. To select the '
'default service for your app, use "default".')
MODULES_OPTIONAL_ARG = base.Argument(
'modules',
nargs='*',
help='An optional list of service names to perform this action on. To '
'select the default service for your app, use "default". If no services '
'are given, all services are used.')
IGNORE_CERTS_FLAG = base.Argument(
'--ignore-bad-certs',
action='store_true',
default=False,
help=argparse.SUPPRESS)
| 32.4375 | 79 | 0.716763 |
import argparse
from googlecloudsdk.calliope import base
SERVER_FLAG = base.Argument(
'--server',
help=argparse.SUPPRESS)
VERSION_FLAG = base.Argument(
'--version',
required=True,
help='The version of the app that you want to operate on.')
MODULES_ARG = base.Argument(
'modules',
nargs='+',
help='One or more service names to perform this action on. To select the '
'default service for your app, use "default".')
MODULES_OPTIONAL_ARG = base.Argument(
'modules',
nargs='*',
help='An optional list of service names to perform this action on. To '
'select the default service for your app, use "default". If no services '
'are given, all services are used.')
IGNORE_CERTS_FLAG = base.Argument(
'--ignore-bad-certs',
action='store_true',
default=False,
help=argparse.SUPPRESS)
| true | true |
1c366b4476d31eef747dfd6a4a9e924c5f8713e7 | 1,327 | py | Python | slicedpy/utils.py | JackKelly/slicedpy | c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62 | [
"Apache-2.0"
] | 3 | 2017-02-03T22:05:25.000Z | 2017-08-29T19:06:17.000Z | slicedpy/utils.py | JackKelly/slicedpy | c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62 | [
"Apache-2.0"
] | null | null | null | slicedpy/utils.py | JackKelly/slicedpy | c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62 | [
"Apache-2.0"
] | null | null | null | from __future__ import division, print_function
import pandas as pd
import numpy as np
def find_nearest(data, target, align='start', max_time_diff=None):
"""Finds the index of the nearest row in `data` to `target` time.
Args:
* data (pd.Series or pd.DataFrame): if `align==back` then `data` must
have an `end` column.
* target (pd.Timeseries or timestamp)
* align (str): `start` or `end`. Align with the front of the event
(as recorded by `data.index`) or the back (as recorded by `data['end']`)
* max_time_diff (datetime.timedelta or None): optional.
Returns:
int. Index into `data` for the element nearest in time to `target`.
Returns None if:
* diff is > `max_time_diff` or
* data is None or
* data.shape[0] == 0
"""
assert(align in ['start','end'])
if data is None or data.shape[0] == 0:
return None
if isinstance(target, pd.Timestamp):
target = target.to_pydatetime()
if align == 'start':
diff = data.index.to_pydatetime() - target
else:
diff = pd.to_datetime(data['end']) - target
diff = np.abs(diff)
min_diff = diff.min()
if max_time_diff is not None and min_diff > max_time_diff:
return None
else:
return diff.argmin()
| 28.847826 | 80 | 0.61266 | from __future__ import division, print_function
import pandas as pd
import numpy as np
def find_nearest(data, target, align='start', max_time_diff=None):
assert(align in ['start','end'])
if data is None or data.shape[0] == 0:
return None
if isinstance(target, pd.Timestamp):
target = target.to_pydatetime()
if align == 'start':
diff = data.index.to_pydatetime() - target
else:
diff = pd.to_datetime(data['end']) - target
diff = np.abs(diff)
min_diff = diff.min()
if max_time_diff is not None and min_diff > max_time_diff:
return None
else:
return diff.argmin()
| true | true |
1c366c0d55d5cf0b11f8a162f3fae00e8bc758b2 | 2,126 | py | Python | stdplugins/fork.py | Jayraj448/TGUserBot | 3b56fe32686ed35b8b2392f54fcaaa93f7b6d700 | [
"Apache-2.0"
] | 4 | 2019-10-03T04:59:02.000Z | 2019-10-31T00:01:33.000Z | stdplugins/fork.py | Jayraj448/TGUserBot | 3b56fe32686ed35b8b2392f54fcaaa93f7b6d700 | [
"Apache-2.0"
] | 1 | 2020-03-19T13:23:15.000Z | 2020-03-25T09:18:56.000Z | stdplugins/fork.py | Jayraj448/TGUserBot | 3b56fe32686ed35b8b2392f54fcaaa93f7b6d700 | [
"Apache-2.0"
] | 16 | 2019-09-30T17:26:32.000Z | 2020-05-21T10:23:26.000Z | """COMMAND : .fork"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 13)
input_str = event.pattern_match.group(1)
if input_str == "fork":
animation_chars = [
"`Your bot is running\n\nTelethon version:` 1.9.0\n`Python:` 3.7.3\n`User:` @amnd33p\n`Database Status: Databases functioning normally!`",
"`Connecting To github.com...`",
"`Deleting Old Repo....`",
"`Forking TGUserBot... 0%\n\n⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 0 MiB / 100.7 MiB`",
"`Forking TGUserBot... 4%\n\n⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 4 MiB / 100.7 MiB`",
"`Forking TGUserBot... 8%\n\n⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 8 MiB / 100.7 MiB`",
"`Forking TGUserBot... 20%\n\n⬛️⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 20 MiB / 100.7 MiB`",
"`Forking TGUserBot... 36%\n\n⬛️⬛️⬛️⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 36 MiB / 100.7 MiB `",
"`Forking TGUserBot... 52%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 52.7 MiB / 100.7 MiB `",
"`Forking TGUserBot... 75%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬜️⬜️⬜️⬜️\n\nFile Size: 52.7 MiB / 100.7 MiB `",
"`Forking TGUserBot... 84%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬜️⬜️\n\nFile Size: 84.7 MiB / 100.7 MiB `",
"`Forking TGUserBot... 100%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️\n\nFile Size: 100.7 MiB / 100.7 MiB\n\nTask Completed... `",
"`Fork Deploying...`\n\n@UniBorg ( `Custom Built By` @amnd33p ) \n`Verified Account:` ☑️\n`Official Channel:` https://t.me/Xpl0iter\n\n`Python` `Loading...`\n[GCC 7.3.0]\n`Telethon` `Loading...`",
"`Fork Deployed...`\n\n@UniBorg ( `Custom Built By` @amnd33p ) \n`Verified Account:` ✅\n`Official Channel:` https://t.me/Xpl0iter\n\n`Python` 3.6.8 [GCC 7.3.0]\n`Telethon` 1.8.0"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 14])
| 68.580645 | 208 | 0.5127 | from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 13)
input_str = event.pattern_match.group(1)
if input_str == "fork":
animation_chars = [
"`Your bot is running\n\nTelethon version:` 1.9.0\n`Python:` 3.7.3\n`User:` @amnd33p\n`Database Status: Databases functioning normally!`",
"`Connecting To github.com...`",
"`Deleting Old Repo....`",
"`Forking TGUserBot... 0%\n\n⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 0 MiB / 100.7 MiB`",
"`Forking TGUserBot... 4%\n\n⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 4 MiB / 100.7 MiB`",
"`Forking TGUserBot... 8%\n\n⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 8 MiB / 100.7 MiB`",
"`Forking TGUserBot... 20%\n\n⬛️⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 20 MiB / 100.7 MiB`",
"`Forking TGUserBot... 36%\n\n⬛️⬛️⬛️⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 36 MiB / 100.7 MiB `",
"`Forking TGUserBot... 52%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬜️⬜️⬜️⬜️⬜️⬜️⬜️\n\nFile Size: 52.7 MiB / 100.7 MiB `",
"`Forking TGUserBot... 75%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬜️⬜️⬜️⬜️\n\nFile Size: 52.7 MiB / 100.7 MiB `",
"`Forking TGUserBot... 84%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬜️⬜️\n\nFile Size: 84.7 MiB / 100.7 MiB `",
"`Forking TGUserBot... 100%\n\n⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️\n\nFile Size: 100.7 MiB / 100.7 MiB\n\nTask Completed... `",
"`Fork Deploying...`\n\n@UniBorg ( `Custom Built By` @amnd33p ) \n`Verified Account:` ☑️\n`Official Channel:` https://t.me/Xpl0iter\n\n`Python` `Loading...`\n[GCC 7.3.0]\n`Telethon` `Loading...`",
"`Fork Deployed...`\n\n@UniBorg ( `Custom Built By` @amnd33p ) \n`Verified Account:` ✅\n`Official Channel:` https://t.me/Xpl0iter\n\n`Python` 3.6.8 [GCC 7.3.0]\n`Telethon` 1.8.0"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 14])
| true | true |
1c366c8d7429b7b092acdf3f387648308f89adc4 | 1,991 | py | Python | predict2.py | ecdedios/ddfloww-site | bd582bc34a6248338abac8c2fe9c22bfbf69d79f | [
"MIT"
] | 1 | 2019-06-19T02:40:38.000Z | 2019-06-19T02:40:38.000Z | predict2.py | ecdedios/ddfloww-site | bd582bc34a6248338abac8c2fe9c22bfbf69d79f | [
"MIT"
] | 3 | 2019-12-26T17:28:17.000Z | 2022-03-21T22:17:22.000Z | predict2.py | ecdedios/ddfloww-site | bd582bc34a6248338abac8c2fe9c22bfbf69d79f | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegressionCV
from collections import OrderedDict
from sklearn.metrics import accuracy_score
df = pd.read_csv('phase2_df.csv')
x_columns = ['slap',
'threat_object',
'beaten',
'limit_family_contact',
'kick_punch',
'threat_hit',
'push_shove',
'jealous',
'life_danger'
]
X = df[x_columns]
y = df[['reassault']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .15, random_state = 0, stratify=y)
clf = LogisticRegressionCV(cv=5,
random_state=0,
solver='liblinear'
).fit(X_train, y_train)
print('Accuracy of Logistic Regression classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Logistic Regression classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
def predictorizer(feature1, feature2, feature3, feature4, feature5, feature6, feature7, feature8, feature9):
new_data = OrderedDict([
('slap', feature1),
('threat_object', feature2),
('beaten', feature3),
('limit_family_contact', feature4),
('kick_punch', feature5),
('threat_hit', feature6),
('push_shove', feature7),
('jealous', feature8),
('life_danger', feature9,)
])
# .values.reshape(1, -1) because it must be 2-dim, because we passed only one new observation
new_data = pd.Series(new_data).values.reshape(1,-1)
# Use the model to make a prediction
prediction = str(clf.predict_proba(new_data)[[0],[1]])
prediction = prediction.replace('[','')
prediction = prediction.replace(']','')
prediction = "{:.1%}".format(float(prediction))
return prediction
print(predictorizer('1','1','1','1','1','1','1','1','1'))
| 31.603175 | 108 | 0.612757 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegressionCV
from collections import OrderedDict
from sklearn.metrics import accuracy_score
df = pd.read_csv('phase2_df.csv')
x_columns = ['slap',
'threat_object',
'beaten',
'limit_family_contact',
'kick_punch',
'threat_hit',
'push_shove',
'jealous',
'life_danger'
]
X = df[x_columns]
y = df[['reassault']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .15, random_state = 0, stratify=y)
clf = LogisticRegressionCV(cv=5,
random_state=0,
solver='liblinear'
).fit(X_train, y_train)
print('Accuracy of Logistic Regression classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Logistic Regression classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
def predictorizer(feature1, feature2, feature3, feature4, feature5, feature6, feature7, feature8, feature9):
new_data = OrderedDict([
('slap', feature1),
('threat_object', feature2),
('beaten', feature3),
('limit_family_contact', feature4),
('kick_punch', feature5),
('threat_hit', feature6),
('push_shove', feature7),
('jealous', feature8),
('life_danger', feature9,)
])
new_data = pd.Series(new_data).values.reshape(1,-1)
prediction = str(clf.predict_proba(new_data)[[0],[1]])
prediction = prediction.replace('[','')
prediction = prediction.replace(']','')
prediction = "{:.1%}".format(float(prediction))
return prediction
print(predictorizer('1','1','1','1','1','1','1','1','1'))
| true | true |
1c36706fffdc53a4bd7f96e38244557e8292ea2b | 12,014 | py | Python | lifelines/fitters/mixins.py | GrowthJeff/lifelines | 4415be1bfeb7d15203109842926c1f6e50facaa6 | [
"MIT"
] | null | null | null | lifelines/fitters/mixins.py | GrowthJeff/lifelines | 4415be1bfeb7d15203109842926c1f6e50facaa6 | [
"MIT"
] | null | null | null | lifelines/fitters/mixins.py | GrowthJeff/lifelines | 4415be1bfeb7d15203109842926c1f6e50facaa6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import List, Optional, Dict, Any, Iterable
from textwrap import dedent, fill
from autograd import numpy as anp
import numpy as np
from pandas import DataFrame, Series
from lifelines.statistics import proportional_hazard_test, TimeTransformers
from lifelines.utils import format_p_value
from lifelines.utils.lowess import lowess
class SplineFitterMixin:
@staticmethod
def relu(x: np.array):
return anp.maximum(0, x)
def basis(self, x: np.array, knot: float, min_knot: float, max_knot: float):
lambda_ = (max_knot - knot) / (max_knot - min_knot)
return self.relu(x - knot) ** 3 - (lambda_ * self.relu(x - min_knot) ** 3 + (1 - lambda_) * self.relu(x - max_knot) ** 3)
class ProportionalHazardMixin:
def check_assumptions(
self,
training_df: DataFrame,
advice: bool = True,
show_plots: bool = False,
p_value_threshold: float = 0.01,
plot_n_bootstraps: int = 10,
columns: Optional[List[str]] = None,
) -> None:
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: bool, optional
display advice as output to the user's screen
show_plots: bool, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
.. code:: python
from lifelines.datasets import load_rossi
from lifelines import CoxPHFitter
rossi = load_rossi()
cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
@property
def hazard_ratios_(self):
return Series(np.exp(self.params_), index=self.params_.index, name="exp(coef)")
def compute_followup_hazard_ratios(self, training_df: DataFrame, followup_times: Iterable) -> DataFrame:
"""
Recompute the hazard ratio at different follow-up times (lifelines handles accounting for updated censoring and updated durations).
This is useful because we need to remember that the hazard ratio is actually a weighted-average of period-specific hazard ratios.
Parameters
----------
training_df: pd.DataFrame
The same dataframe used to train the model
followup_times: Iterable
a list/array of follow-up times to recompute the hazard ratio at.
"""
results = {}
for t in sorted(followup_times):
assert t <= training_df[self.duration_col].max(), "all follow-up times must be less than max observed duration"
df = training_df.copy()
# if we "rollback" the df to time t, who is dead and who is censored
df[self.event_col] = (df[self.duration_col] <= t) & df[self.event_col]
df[self.duration_col] = np.minimum(df[self.duration_col], t)
model = self.__class__(penalizer=self.penalizer, l1_ratio=self.l1_ratio).fit(
df, self.duration_col, self.event_col, weights_col=self.weights_col, entry_col=self.entry_col
)
results[t] = model.hazard_ratios_
return DataFrame(results).T
| 45.335849 | 354 | 0.570168 |
from typing import List, Optional, Dict, Any, Iterable
from textwrap import dedent, fill
from autograd import numpy as anp
import numpy as np
from pandas import DataFrame, Series
from lifelines.statistics import proportional_hazard_test, TimeTransformers
from lifelines.utils import format_p_value
from lifelines.utils.lowess import lowess
class SplineFitterMixin:
@staticmethod
def relu(x: np.array):
return anp.maximum(0, x)
def basis(self, x: np.array, knot: float, min_knot: float, max_knot: float):
lambda_ = (max_knot - knot) / (max_knot - min_knot)
return self.relu(x - knot) ** 3 - (lambda_ * self.relu(x - min_knot) ** 3 + (1 - lambda_) * self.relu(x - max_knot) ** 3)
class ProportionalHazardMixin:
def check_assumptions(
self,
training_df: DataFrame,
advice: bool = True,
show_plots: bool = False,
p_value_threshold: float = 0.01,
plot_n_bootstraps: int = 10,
columns: Optional[List[str]] = None,
) -> None:
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
@property
def hazard_ratios_(self):
return Series(np.exp(self.params_), index=self.params_.index, name="exp(coef)")
def compute_followup_hazard_ratios(self, training_df: DataFrame, followup_times: Iterable) -> DataFrame:
results = {}
for t in sorted(followup_times):
assert t <= training_df[self.duration_col].max(), "all follow-up times must be less than max observed duration"
df = training_df.copy()
# if we "rollback" the df to time t, who is dead and who is censored
df[self.event_col] = (df[self.duration_col] <= t) & df[self.event_col]
df[self.duration_col] = np.minimum(df[self.duration_col], t)
model = self.__class__(penalizer=self.penalizer, l1_ratio=self.l1_ratio).fit(
df, self.duration_col, self.event_col, weights_col=self.weights_col, entry_col=self.entry_col
)
results[t] = model.hazard_ratios_
return DataFrame(results).T
| true | true |
1c3670c02a3741dceb7e217f61b810d5322434c7 | 429 | py | Python | account/migrations/0004_alter_profile_photo.py | julesc00/socialmele | abed2f4ca73203afccf5de9ddd7d576aa627cf6b | [
"MIT"
] | null | null | null | account/migrations/0004_alter_profile_photo.py | julesc00/socialmele | abed2f4ca73203afccf5de9ddd7d576aa627cf6b | [
"MIT"
] | null | null | null | account/migrations/0004_alter_profile_photo.py | julesc00/socialmele | abed2f4ca73203afccf5de9ddd7d576aa627cf6b | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-05-01 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_alter_profile_photo'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='images/users/%Y/%m/%d'),
),
]
| 22.578947 | 94 | 0.606061 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_alter_profile_photo'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='images/users/%Y/%m/%d'),
),
]
| true | true |
1c3671765dface0d62955322a8c4f99cc1df9d83 | 54 | py | Python | carla/recourse_methods/catalog/face/library/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 140 | 2021-08-03T21:53:32.000Z | 2022-03-20T08:52:02.000Z | carla/recourse_methods/catalog/face/library/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 54 | 2021-03-07T18:22:16.000Z | 2021-08-03T12:06:31.000Z | carla/recourse_methods/catalog/face/library/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 16 | 2021-08-23T12:14:58.000Z | 2022-03-01T00:52:58.000Z | # flake8: noqa
from .face_method import graph_search
| 13.5 | 37 | 0.796296 |
from .face_method import graph_search
| true | true |
1c3671bfc6332f3424ddea5eaeaa6013b8fc4c0b | 18,139 | py | Python | config/qtile/Managers/ScreenManager.py | dat-adi/Dotfiles | 7a541aba2bbdd88736bebc9e82f6921ab4a3e03b | [
"Apache-2.0"
] | 2 | 2021-05-06T15:58:29.000Z | 2021-10-02T14:12:08.000Z | config/qtile/Managers/ScreenManager.py | dat-adi/dotfiles | 7a541aba2bbdd88736bebc9e82f6921ab4a3e03b | [
"Apache-2.0"
] | null | null | null | config/qtile/Managers/ScreenManager.py | dat-adi/dotfiles | 7a541aba2bbdd88736bebc9e82f6921ab4a3e03b | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import os
from libqtile import bar, layout, widget, hook, qtile
from libqtile.config import Click, Drag, Group, Key, KeyChord, Match, Screen
def get_two_screens(colors):
groupbox_defaults = dict(
margin_y=3,
margin_x=0,
padding_y=7,
padding_x=7,
borderwidth=3,
active=colors[2],
inactive=colors[7],
rounded=False,
highlight_color=colors[1],
highlight_method="line",
this_current_screen_border=colors[6],
this_screen_border=colors[4],
other_current_screen_border=colors[6],
other_screen_border=colors[4],
foreground=colors[2],
background=colors[0],
)
screens = [
Screen(
bottom=bar.Bar(
[
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[1],
padding=0,
scale=0.7,
),
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0],
),
widget.GroupBox(
visible_groups=["SYS"],
font="FiraCode Nerd Font", # ? using the font is vital for loading the icon
fontsize=15,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DEV"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["WWW"],
font="Font Awesome 5 Free",
fontsize=25,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DIS"],
font="FiraCode Nerd Font",
fontsize=16,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["TEAMS"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DOC"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["MUS"],
font="Font Awesome 5 Free",
fontsize=27,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VID"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VBOX"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.Spacer(10),
# WindowName
widget.WindowName(
format=" {name}",
max_chars=80,
background=colors[0],
foreground=colors[6],
),
widget.Chord(
chords_colors={
"launch": ("#0000ff", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.Systray(icon_size=16, background=colors[0], padding=5),
widget.Spacer(10),
# Backlight
widget.TextBox(
text="", padding=8, foreground=colors[3], fontsize=25
),
widget.Backlight(
foreground=colors[3],
change_command="light -S {0}",
backlight_name="intel_backlight",
),
widget.Spacer(10),
widget.CheckUpdates(
update_interval=1800,
distro="Arch",
display_format="{updates} Updates",
mouse_callbacks={
"Button1": lambda: qtile.cmd_spawn(
"alacritty -e sudo pacman -Syu"
)
},
foreground=colors[3],
),
widget.Spacer(10),
# Volume
widget.TextBox(
text="墳", foreground=colors[3], padding=6, fontsize=23
),
widget.Volume(foreground=colors[3]),
widget.Spacer(10),
# Time
widget.TextBox(
text="", fontsize=21, padding=6, foreground=colors[3]
),
widget.Clock(foreground=colors[3], format="%d-%m-%Y | %a %I:%M %p"),
widget.Spacer(10),
# CPU
widget.TextBox(
text="", fontsize=23, padding=8, foreground=colors[3]
),
widget.CPU(format="{load_percent}%", foreground=colors[3]),
widget.Spacer(10),
# Battery
widget.TextBox(text="", fontsize=14, foreground=colors[3]),
widget.Battery(
foreground=colors[3],
low_foreground="d08770",
format="{percent:2.0%}",
),
widget.Spacer(10),
],
24,
background=colors[0],
),
),
Screen(
bottom=bar.Bar(
[
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[1],
padding=0,
scale=0.7,
),
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0],
),
widget.GroupBox(
visible_groups=["SYS"],
font="FiraCode Nerd Font", # ? using the font is vital for loading the icon
fontsize=15,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DEV"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["WWW"],
font="Font Awesome 5 Free",
fontsize=25,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DIS"],
font="FiraCode Nerd Font",
fontsize=16,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["TEAMS"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DOC"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["MUS"],
font="Font Awesome 5 Free",
fontsize=27,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VID"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VBOX"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.Spacer(10),
# WindowName
widget.WindowName(
format=" {name}",
max_chars=80,
background=colors[0],
foreground=colors[6],
),
widget.Chord(
chords_colors={
"launch": ("#0000ff", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.Systray(icon_size=16, background=colors[0], padding=5),
widget.Spacer(10),
# Backlight
widget.TextBox(
text="", padding=8, foreground=colors[3], fontsize=25
),
widget.Backlight(
foreground=colors[3],
change_command="light -S {0}",
backlight_name="intel_backlight",
),
widget.Spacer(10),
# Volume
widget.TextBox(
text="墳", foreground=colors[3], padding=6, fontsize=23
),
widget.Volume(foreground=colors[3]),
widget.Spacer(10),
# Time
widget.TextBox(
text="", fontsize=21, padding=6, foreground=colors[3]
),
widget.Clock(foreground=colors[3], format="%d-%m-%Y | %a %I:%M %p"),
widget.Spacer(10),
# CPU
widget.TextBox(
text="", fontsize=23, padding=8, foreground=colors[3]
),
widget.CPU(format="{load_percent}%", foreground=colors[3]),
widget.Spacer(10),
# Battery
widget.TextBox(text="", fontsize=14, foreground=colors[3]),
widget.Battery(
foreground=colors[3],
low_foreground="d08770",
format="{percent:2.0%}",
),
widget.Spacer(10),
],
24,
background=colors[0],
),
),
]
return screens
def get_one_screens(colors):
groupbox_defaults = dict(
margin_y=3,
margin_x=0,
padding_y=5,
padding_x=7,
borderwidth=3,
active=colors[2],
inactive=colors[7],
rounded=False,
highlight_color=colors[1],
highlight_method="line",
this_current_screen_border=colors[6],
this_screen_border=colors[4],
other_current_screen_border=colors[6],
other_screen_border=colors[4],
foreground=colors[2],
background=colors[0],
)
screens = [
Screen(
top=bar.Bar(
[
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[1],
padding=0,
scale=0.7,
),
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0],
),
widget.GroupBox(
visible_groups=["SYS"],
font="FiraCode Nerd Font", # ? using the font is vital for loading the icon
fontsize=15,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DEV"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["WWW"],
font="Font Awesome 5 Free",
fontsize=25,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DIS"],
font="FiraCode Nerd Font",
fontsize=16,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["TEAMS"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DOC"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["MUS"],
font="Font Awesome 5 Free",
fontsize=27,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VID"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VBOX"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.Spacer(500),
# WindowName
widget.WindowName(
format="{name}",
font="Ubuntu Mono Bold",
fontsize=10,
max_chars=80,
background=colors[0],
foreground=colors[6],
),
widget.Chord(
chords_colors={
"launch": ("#0000ff", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.Systray(icon_size=16, background=colors[0], padding=5),
widget.Spacer(10),
# Backlight
widget.TextBox(
text="", padding=8, foreground=colors[3], fontsize=25
),
widget.Backlight(
foreground=colors[3],
change_command="light -S {0}",
backlight_name="intel_backlight",
),
widget.Spacer(10),
widget.CheckUpdates(
update_interval=1800,
distro="Arch",
display_format="{updates} Updates",
mouse_callbacks={
"Button1": lambda: qtile.cmd_spawn(
"alacritty -e sudo pacman -Syu"
)
},
foreground=colors[3],
),
widget.Spacer(10),
# Volume
widget.TextBox(
text="墳", foreground=colors[3], padding=6, fontsize=23
),
widget.Volume(foreground=colors[3]),
widget.Spacer(10),
# Time
widget.TextBox(
text="", fontsize=21, padding=6, foreground=colors[3]
),
widget.Clock(foreground=colors[3], format="%d-%m-%Y | %a %I:%M %p"),
widget.Spacer(10),
# CPU
widget.TextBox(
text="", fontsize=23, padding=8, foreground=colors[3]
),
widget.CPU(format="{load_percent}%", foreground=colors[3]),
widget.Spacer(10),
# Battery
widget.TextBox(text="", fontsize=14, foreground=colors[3]),
widget.Battery(
foreground=colors[3],
low_foreground="d08770",
format="{percent:2.0%}",
),
widget.Spacer(10),
],
24,
background=colors[0],
),
),
]
return screens
| 39.177106 | 100 | 0.364077 |
import os
from libqtile import bar, layout, widget, hook, qtile
from libqtile.config import Click, Drag, Group, Key, KeyChord, Match, Screen
def get_two_screens(colors):
groupbox_defaults = dict(
margin_y=3,
margin_x=0,
padding_y=7,
padding_x=7,
borderwidth=3,
active=colors[2],
inactive=colors[7],
rounded=False,
highlight_color=colors[1],
highlight_method="line",
this_current_screen_border=colors[6],
this_screen_border=colors[4],
other_current_screen_border=colors[6],
other_screen_border=colors[4],
foreground=colors[2],
background=colors[0],
)
screens = [
Screen(
bottom=bar.Bar(
[
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[1],
padding=0,
scale=0.7,
),
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0],
),
widget.GroupBox(
visible_groups=["SYS"],
font="FiraCode Nerd Font",
fontsize=15,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DEV"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["WWW"],
font="Font Awesome 5 Free",
fontsize=25,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DIS"],
font="FiraCode Nerd Font",
fontsize=16,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["TEAMS"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DOC"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["MUS"],
font="Font Awesome 5 Free",
fontsize=27,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VID"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VBOX"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.Spacer(10),
widget.WindowName(
format=" {name}",
max_chars=80,
background=colors[0],
foreground=colors[6],
),
widget.Chord(
chords_colors={
"launch": ("#0000ff", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.Systray(icon_size=16, background=colors[0], padding=5),
widget.Spacer(10),
widget.TextBox(
text="", padding=8, foreground=colors[3], fontsize=25
),
widget.Backlight(
foreground=colors[3],
change_command="light -S {0}",
backlight_name="intel_backlight",
),
widget.Spacer(10),
widget.CheckUpdates(
update_interval=1800,
distro="Arch",
display_format="{updates} Updates",
mouse_callbacks={
"Button1": lambda: qtile.cmd_spawn(
"alacritty -e sudo pacman -Syu"
)
},
foreground=colors[3],
),
widget.Spacer(10),
widget.TextBox(
text="墳", foreground=colors[3], padding=6, fontsize=23
),
widget.Volume(foreground=colors[3]),
widget.Spacer(10),
widget.TextBox(
text="", fontsize=21, padding=6, foreground=colors[3]
),
widget.Clock(foreground=colors[3], format="%d-%m-%Y | %a %I:%M %p"),
widget.Spacer(10),
widget.TextBox(
text="", fontsize=23, padding=8, foreground=colors[3]
),
widget.CPU(format="{load_percent}%", foreground=colors[3]),
widget.Spacer(10),
widget.TextBox(text="", fontsize=14, foreground=colors[3]),
widget.Battery(
foreground=colors[3],
low_foreground="d08770",
format="{percent:2.0%}",
),
widget.Spacer(10),
],
24,
background=colors[0],
),
),
Screen(
bottom=bar.Bar(
[
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[1],
padding=0,
scale=0.7,
),
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0],
),
widget.GroupBox(
visible_groups=["SYS"],
font="FiraCode Nerd Font",
fontsize=15,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DEV"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["WWW"],
font="Font Awesome 5 Free",
fontsize=25,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DIS"],
font="FiraCode Nerd Font",
fontsize=16,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["TEAMS"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DOC"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["MUS"],
font="Font Awesome 5 Free",
fontsize=27,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VID"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VBOX"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.Spacer(10),
widget.WindowName(
format=" {name}",
max_chars=80,
background=colors[0],
foreground=colors[6],
),
widget.Chord(
chords_colors={
"launch": ("#0000ff", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.Systray(icon_size=16, background=colors[0], padding=5),
widget.Spacer(10),
widget.TextBox(
text="", padding=8, foreground=colors[3], fontsize=25
),
widget.Backlight(
foreground=colors[3],
change_command="light -S {0}",
backlight_name="intel_backlight",
),
widget.Spacer(10),
widget.TextBox(
text="墳", foreground=colors[3], padding=6, fontsize=23
),
widget.Volume(foreground=colors[3]),
widget.Spacer(10),
widget.TextBox(
text="", fontsize=21, padding=6, foreground=colors[3]
),
widget.Clock(foreground=colors[3], format="%d-%m-%Y | %a %I:%M %p"),
widget.Spacer(10),
widget.TextBox(
text="", fontsize=23, padding=8, foreground=colors[3]
),
widget.CPU(format="{load_percent}%", foreground=colors[3]),
widget.Spacer(10),
widget.TextBox(text="", fontsize=14, foreground=colors[3]),
widget.Battery(
foreground=colors[3],
low_foreground="d08770",
format="{percent:2.0%}",
),
widget.Spacer(10),
],
24,
background=colors[0],
),
),
]
return screens
def get_one_screens(colors):
groupbox_defaults = dict(
margin_y=3,
margin_x=0,
padding_y=5,
padding_x=7,
borderwidth=3,
active=colors[2],
inactive=colors[7],
rounded=False,
highlight_color=colors[1],
highlight_method="line",
this_current_screen_border=colors[6],
this_screen_border=colors[4],
other_current_screen_border=colors[6],
other_screen_border=colors[4],
foreground=colors[2],
background=colors[0],
)
screens = [
Screen(
top=bar.Bar(
[
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[0],
background=colors[1],
padding=0,
scale=0.7,
),
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0],
),
widget.GroupBox(
visible_groups=["SYS"],
font="FiraCode Nerd Font",
fontsize=15,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DEV"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["WWW"],
font="Font Awesome 5 Free",
fontsize=25,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DIS"],
font="FiraCode Nerd Font",
fontsize=16,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["TEAMS"],
font="FiraCode Nerd Font",
fontsize=17,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["DOC"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["MUS"],
font="Font Awesome 5 Free",
fontsize=27,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VID"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.GroupBox(
visible_groups=["VBOX"],
font="Font Awesome 5 Free",
fontsize=20,
**groupbox_defaults
),
widget.Spacer(500),
widget.WindowName(
format="{name}",
font="Ubuntu Mono Bold",
fontsize=10,
max_chars=80,
background=colors[0],
foreground=colors[6],
),
widget.Chord(
chords_colors={
"launch": ("#0000ff", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.Systray(icon_size=16, background=colors[0], padding=5),
widget.Spacer(10),
widget.TextBox(
text="", padding=8, foreground=colors[3], fontsize=25
),
widget.Backlight(
foreground=colors[3],
change_command="light -S {0}",
backlight_name="intel_backlight",
),
widget.Spacer(10),
widget.CheckUpdates(
update_interval=1800,
distro="Arch",
display_format="{updates} Updates",
mouse_callbacks={
"Button1": lambda: qtile.cmd_spawn(
"alacritty -e sudo pacman -Syu"
)
},
foreground=colors[3],
),
widget.Spacer(10),
widget.TextBox(
text="墳", foreground=colors[3], padding=6, fontsize=23
),
widget.Volume(foreground=colors[3]),
widget.Spacer(10),
widget.TextBox(
text="", fontsize=21, padding=6, foreground=colors[3]
),
widget.Clock(foreground=colors[3], format="%d-%m-%Y | %a %I:%M %p"),
widget.Spacer(10),
widget.TextBox(
text="", fontsize=23, padding=8, foreground=colors[3]
),
widget.CPU(format="{load_percent}%", foreground=colors[3]),
widget.Spacer(10),
widget.TextBox(text="", fontsize=14, foreground=colors[3]),
widget.Battery(
foreground=colors[3],
low_foreground="d08770",
format="{percent:2.0%}",
),
widget.Spacer(10),
],
24,
background=colors[0],
),
),
]
return screens
| true | true |
1c3671de991b760c1e3e6830247a65e28cd63810 | 1,022 | py | Python | jogo_dados.py | rodrigo1708/scriptsComPython | 1655451d7fbb8fa2899687de62df70e36ac97863 | [
"MIT"
] | null | null | null | jogo_dados.py | rodrigo1708/scriptsComPython | 1655451d7fbb8fa2899687de62df70e36ac97863 | [
"MIT"
] | null | null | null | jogo_dados.py | rodrigo1708/scriptsComPython | 1655451d7fbb8fa2899687de62df70e36ac97863 | [
"MIT"
] | null | null | null | # JOGO DE DADOS - 4 jogadores jogam um dado e, no final, será mostrada a classificação dos jogadores
from operator import itemgetter
from random import randint
from time import sleep
jogo = {'jogador 1': randint(1, 6), 'jogador 2': randint(1, 6),
'jogador 3': randint(1, 6), 'jogador 4': randint(1, 6)} # Cria os 4 jogadores com o sorteio de cada numero do
# dado para cada jogador
ranking = {}
print('='*15, 'VALORES SORTEADOS', '='*15)
for k, v in jogo.items():
print(f'{k} tirou o valor {v} no dado.')
sleep(1)
ranking = sorted(jogo.items(), key=itemgetter(1), reverse=True) # Coloca o ranking dos jogadores de acordo com
# a pontuação. Maior pontuação >> menor pontuação
print('='*15, 'RANKING', '='*15)
sleep(1)
for k, v in enumerate(ranking):
print(f'{k + 1}º colocado: {v[0]} ==> {v[1]} pontos.')
sleep(1)
print('='*40)
print('<< PROGRAMA FINALIZADO >>')
| 42.583333 | 118 | 0.576321 |
from operator import itemgetter
from random import randint
from time import sleep
jogo = {'jogador 1': randint(1, 6), 'jogador 2': randint(1, 6),
'jogador 3': randint(1, 6), 'jogador 4': randint(1, 6)}
ranking = {}
print('='*15, 'VALORES SORTEADOS', '='*15)
for k, v in jogo.items():
print(f'{k} tirou o valor {v} no dado.')
sleep(1)
ranking = sorted(jogo.items(), key=itemgetter(1), reverse=True)
print('='*15, 'RANKING', '='*15)
sleep(1)
for k, v in enumerate(ranking):
print(f'{k + 1}º colocado: {v[0]} ==> {v[1]} pontos.')
sleep(1)
print('='*40)
print('<< PROGRAMA FINALIZADO >>')
| true | true |
1c36728c1f5d05857b6a3f289d7ff101268f235a | 4,355 | py | Python | experiments/densenet169_medical_transfer_medico/_sources/util_47b99ae08126d90d5ce16b9f952f88cc.py | Stevenah/keras-training-system | ef15519d84335621f3e8f73db68cd54134e723fe | [
"MIT"
] | 2 | 2018-09-19T14:53:15.000Z | 2021-09-30T21:46:26.000Z | experiments/densenet169_medical_transfer_medico/_sources/util_47b99ae08126d90d5ce16b9f952f88cc.py | Stevenah/keras-training-system | ef15519d84335621f3e8f73db68cd54134e723fe | [
"MIT"
] | null | null | null | experiments/densenet169_medical_transfer_medico/_sources/util_47b99ae08126d90d5ce16b9f952f88cc.py | Stevenah/keras-training-system | ef15519d84335621f3e8f73db68cd54134e723fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from keras.callbacks import Callback
from keras.models import model_from_json
from scipy.misc import imsave, imread, imresize
import numpy as np
import tensorflow as tf
import keras.backend as K
import os
import cv2
import shutil
def merge_dict_of_lists(dict1, dict2):
keys = set(dict1).union(dict2)
no = []
return dict((k, dict1.get(k, no) + dict2.get(k, no)) for k in keys)
def prepare_dataset(split_dirs, split_index, split_total):
training_dir = os.path.join('../tmp', 'training_data')
validation_dir = os.path.join('../tmp', 'validation_data')
if os.path.exists(validation_dir):
shutil.rmtree(validation_dir)
if os.path.exists(training_dir):
shutil.rmtree(training_dir)
os.makedirs(validation_dir)
os.makedirs(training_dir)
for index in range(split_total):
if index == split_index:
copytree(split_dirs[index], validation_dir)
else:
copytree(split_dirs[index], training_dir)
return training_dir, validation_dir
def get_sub_dirs(path):
root, *_ = os.walk(path)
return root[1]
def pad_string(string, size, fill=" ", edge="|"):
return f'{edge}{string}{((size - len(string)) * fill)}{edge}\n'
def copytree(sourceRoot, destRoot):
if not os.path.exists(destRoot):
return False
ok = True
for path, dirs, files in os.walk(sourceRoot):
relPath = os.path.relpath(path, sourceRoot)
destPath = os.path.join(destRoot, relPath)
if not os.path.exists(destPath):
os.makedirs(destPath)
for file in files:
destFile = os.path.join(destPath, file)
if os.path.isfile(destFile):
ok = False
continue
srcFile = os.path.join(path, file)
shutil.copy2(srcFile, destFile)
def split_data_on_suffix(test_suffix, data_dir):
training_dir = os.path.join(data_dir, 'training')
validation_dir = os.path.join(data_dir, 'validation')
class_names = get_sub_dirs(training_dir)
for class_name in class_names:
train_class_dir = os.path.join(training_dir, class_name)
valid_class_dir = os.path.join(validation_dir, class_name)
if not os.path.exists(valid_class_dir):
os.makedirs(valid_class_dir)
for filename in os.listdir(train_class_dir):
if os.path.splitext(os.path.basename(filename))[0][-(len(test_suffix)):] == test_suffix:
file_source = os.path.join(train_class_dir, filename)
file_dest = os.path.join(valid_class_dir, filename)
shutil.copy(file_source, file_dest)
return training_dir, validation_dir
def split_data(folds, data_dir):
class_names = get_sub_dirs(data_dir)
if os.path.exists('../tmp'):
shutil.rmtree('../tmp')
split_dirs = []
for index in range(folds):
split_dirs.append(os.path.join('../tmp', 'splits', f'split_{index}'))
for class_name in class_names:
class_dir = os.path.join(data_dir, class_name)
split_size = len(os.listdir(class_dir)) // folds
split_start = 0
split_end = split_size
for split_index in range(folds):
split_dir = os.path.join(split_dirs[split_index], class_name)
os.makedirs(split_dir)
for filename in os.listdir(class_dir)[split_start:split_end]:
file_source = os.path.join(class_dir, filename)
file_dest = os.path.join(split_dir, filename)
os.symlink(file_source, file_dest)
split_start += split_size
split_end += split_size
return split_dirs
def copysuffix(split_dir, dest_dir, suffix):
class_names = get_sub_dirs(split_dir)
for class_name in class_names:
split_class_dir = os.path.join(split_dir, class_name)
dest_class_dir = os.path.join(dest_dir, class_name)
if not os.path.exists(dest_class_dir):
os.makedirs(dest_class_dir)
for filename in os.listdir(split_class_dir):
if os.path.splitext(os.path.basename(filename))[0][-(len(suffix) + 1):] == f'_{suffix}':
file_source = os.path.join(split_class_dir, filename)
file_dest = os.path.join(dest_class_dir, filename)
shutil.copy(file_source, file_dest) | 32.744361 | 100 | 0.649828 |
from keras.callbacks import Callback
from keras.models import model_from_json
from scipy.misc import imsave, imread, imresize
import numpy as np
import tensorflow as tf
import keras.backend as K
import os
import cv2
import shutil
def merge_dict_of_lists(dict1, dict2):
keys = set(dict1).union(dict2)
no = []
return dict((k, dict1.get(k, no) + dict2.get(k, no)) for k in keys)
def prepare_dataset(split_dirs, split_index, split_total):
training_dir = os.path.join('../tmp', 'training_data')
validation_dir = os.path.join('../tmp', 'validation_data')
if os.path.exists(validation_dir):
shutil.rmtree(validation_dir)
if os.path.exists(training_dir):
shutil.rmtree(training_dir)
os.makedirs(validation_dir)
os.makedirs(training_dir)
for index in range(split_total):
if index == split_index:
copytree(split_dirs[index], validation_dir)
else:
copytree(split_dirs[index], training_dir)
return training_dir, validation_dir
def get_sub_dirs(path):
root, *_ = os.walk(path)
return root[1]
def pad_string(string, size, fill=" ", edge="|"):
return f'{edge}{string}{((size - len(string)) * fill)}{edge}\n'
def copytree(sourceRoot, destRoot):
if not os.path.exists(destRoot):
return False
ok = True
for path, dirs, files in os.walk(sourceRoot):
relPath = os.path.relpath(path, sourceRoot)
destPath = os.path.join(destRoot, relPath)
if not os.path.exists(destPath):
os.makedirs(destPath)
for file in files:
destFile = os.path.join(destPath, file)
if os.path.isfile(destFile):
ok = False
continue
srcFile = os.path.join(path, file)
shutil.copy2(srcFile, destFile)
def split_data_on_suffix(test_suffix, data_dir):
training_dir = os.path.join(data_dir, 'training')
validation_dir = os.path.join(data_dir, 'validation')
class_names = get_sub_dirs(training_dir)
for class_name in class_names:
train_class_dir = os.path.join(training_dir, class_name)
valid_class_dir = os.path.join(validation_dir, class_name)
if not os.path.exists(valid_class_dir):
os.makedirs(valid_class_dir)
for filename in os.listdir(train_class_dir):
if os.path.splitext(os.path.basename(filename))[0][-(len(test_suffix)):] == test_suffix:
file_source = os.path.join(train_class_dir, filename)
file_dest = os.path.join(valid_class_dir, filename)
shutil.copy(file_source, file_dest)
return training_dir, validation_dir
def split_data(folds, data_dir):
class_names = get_sub_dirs(data_dir)
if os.path.exists('../tmp'):
shutil.rmtree('../tmp')
split_dirs = []
for index in range(folds):
split_dirs.append(os.path.join('../tmp', 'splits', f'split_{index}'))
for class_name in class_names:
class_dir = os.path.join(data_dir, class_name)
split_size = len(os.listdir(class_dir)) // folds
split_start = 0
split_end = split_size
for split_index in range(folds):
split_dir = os.path.join(split_dirs[split_index], class_name)
os.makedirs(split_dir)
for filename in os.listdir(class_dir)[split_start:split_end]:
file_source = os.path.join(class_dir, filename)
file_dest = os.path.join(split_dir, filename)
os.symlink(file_source, file_dest)
split_start += split_size
split_end += split_size
return split_dirs
def copysuffix(split_dir, dest_dir, suffix):
class_names = get_sub_dirs(split_dir)
for class_name in class_names:
split_class_dir = os.path.join(split_dir, class_name)
dest_class_dir = os.path.join(dest_dir, class_name)
if not os.path.exists(dest_class_dir):
os.makedirs(dest_class_dir)
for filename in os.listdir(split_class_dir):
if os.path.splitext(os.path.basename(filename))[0][-(len(suffix) + 1):] == f'_{suffix}':
file_source = os.path.join(split_class_dir, filename)
file_dest = os.path.join(dest_class_dir, filename)
shutil.copy(file_source, file_dest) | true | true |
1c3672dc5123c3d6dc052a1ac58ebe48fbfdee05 | 4,503 | py | Python | tests/models/validators/v3_0_0/jsd_d83302be1f7c528e8211524aeaacd66d.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 36 | 2021-05-18T16:24:19.000Z | 2022-03-05T13:44:41.000Z | tests/models/validators/v3_0_0/jsd_d83302be1f7c528e8211524aeaacd66d.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 15 | 2021-06-08T19:03:37.000Z | 2022-02-25T14:47:33.000Z | tests/models/validators/v3_0_0/jsd_d83302be1f7c528e8211524aeaacd66d.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 6 | 2021-06-10T09:32:01.000Z | 2022-01-12T08:34:39.000Z | # -*- coding: utf-8 -*-
"""Identity Services Engine getNetworkAccessDictionaryAttributesByDictionaryName data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD83302Be1F7C528E8211524Aeaacd66D(object):
"""getNetworkAccessDictionaryAttributesByDictionaryName request
schema definition."""
def __init__(self):
super(JSONSchemaValidatorD83302Be1F7C528E8211524Aeaacd66D, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"items": {
"properties": {
"allowedValues": {
"items": {
"properties": {
"isDefault": {
"default": false,
"type": "boolean"
},
"key": {
"type": "string"
},
"value": {
"type": "string"
}
},
"required": [
"key",
"value"
],
"type": "object"
},
"type": "array"
},
"dataType": {
"enum": [
"BOOLEAN",
"DATE",
"FLOAT",
"INT",
"IP",
"IPV4",
"IPV6",
"IPV6INTERFACE",
"IPV6PREFIX",
"LONG",
"OCTET_STRING",
"STRING",
"UNIT32",
"UINT64"
],
"type": "string"
},
"description":
{
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"directionType": {
"enum": [
"IN",
"OUT",
"NONE",
"BOTH"
],
"type": "string"
},
"id": {
"type": "string"
},
"internalName": {
"type": "string"
},
"name": {
"type": "string"
}
},
"required": [
"dataType",
"internalName",
"name"
],
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 30.632653 | 92 | 0.463469 |
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD83302Be1F7C528E8211524Aeaacd66D(object):
def __init__(self):
super(JSONSchemaValidatorD83302Be1F7C528E8211524Aeaacd66D, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"items": {
"properties": {
"allowedValues": {
"items": {
"properties": {
"isDefault": {
"default": false,
"type": "boolean"
},
"key": {
"type": "string"
},
"value": {
"type": "string"
}
},
"required": [
"key",
"value"
],
"type": "object"
},
"type": "array"
},
"dataType": {
"enum": [
"BOOLEAN",
"DATE",
"FLOAT",
"INT",
"IP",
"IPV4",
"IPV6",
"IPV6INTERFACE",
"IPV6PREFIX",
"LONG",
"OCTET_STRING",
"STRING",
"UNIT32",
"UINT64"
],
"type": "string"
},
"description":
{
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"directionType": {
"enum": [
"IN",
"OUT",
"NONE",
"BOTH"
],
"type": "string"
},
"id": {
"type": "string"
},
"internalName": {
"type": "string"
},
"name": {
"type": "string"
}
},
"required": [
"dataType",
"internalName",
"name"
],
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| true | true |
1c3673553621c93211bfc7ebc739c0d8c7f40aff | 251 | py | Python | optimus/ml/contants.py | atwoodjw/Optimus | 938463cec41a6683d2077c9afc7d6ba05c3b993f | [
"Apache-2.0"
] | null | null | null | optimus/ml/contants.py | atwoodjw/Optimus | 938463cec41a6683d2077c9afc7d6ba05c3b993f | [
"Apache-2.0"
] | null | null | null | optimus/ml/contants.py | atwoodjw/Optimus | 938463cec41a6683d2077c9afc7d6ba05c3b993f | [
"Apache-2.0"
] | null | null | null | CLUSTER_COL = "cluster"
COUNT_COL = "count"
RECOMMENDED_COL = "recommended"
CLUSTER_SIZE_COL = "cluster_size"
NGRAM_COL = "ngram"
FINGERPRINT_COL = "fingerprint"
NGRAM_FINGERPRINT_COL = "ngram_fingerprint"
LEVENSHTEIN_DISTANCE = "LEVENSHTEIN_DISTANCE" | 31.375 | 45 | 0.812749 | CLUSTER_COL = "cluster"
COUNT_COL = "count"
RECOMMENDED_COL = "recommended"
CLUSTER_SIZE_COL = "cluster_size"
NGRAM_COL = "ngram"
FINGERPRINT_COL = "fingerprint"
NGRAM_FINGERPRINT_COL = "ngram_fingerprint"
LEVENSHTEIN_DISTANCE = "LEVENSHTEIN_DISTANCE" | true | true |
1c36755e5436d63d8309c37538f048289c08c336 | 229 | py | Python | condominios/models.py | mpeyrotc/govector | 5429d538d0bcee4d95d9069dd397b3b5b35b504c | [
"MIT"
] | null | null | null | condominios/models.py | mpeyrotc/govector | 5429d538d0bcee4d95d9069dd397b3b5b35b504c | [
"MIT"
] | null | null | null | condominios/models.py | mpeyrotc/govector | 5429d538d0bcee4d95d9069dd397b3b5b35b504c | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
class Avisos (models.Model):
Id = models.IntegerField()
Nombre = models.CharField(max_length=50)
Descripcion = models.CharField(max_length=500)
| 20.818182 | 50 | 0.759825 | from __future__ import unicode_literals
from django.db import models
class Avisos (models.Model):
Id = models.IntegerField()
Nombre = models.CharField(max_length=50)
Descripcion = models.CharField(max_length=500)
| true | true |
1c367584e1fa968de99aab27875653fd9d2ac6f9 | 332 | py | Python | examples/basic/sys_example.py | SummerLife/micropython | de8072bc9eae02538f789b93a1711fed95dc77f1 | [
"MIT"
] | null | null | null | examples/basic/sys_example.py | SummerLife/micropython | de8072bc9eae02538f789b93a1711fed95dc77f1 | [
"MIT"
] | null | null | null | examples/basic/sys_example.py | SummerLife/micropython | de8072bc9eae02538f789b93a1711fed95dc77f1 | [
"MIT"
] | 1 | 2019-01-17T13:47:08.000Z | 2019-01-17T13:47:08.000Z | #
# Copyright (c) 2006-2019, RT-Thread Development Team
#
# SPDX-License-Identifier: MIT License
#
# Change Logs:
# Date Author Notes
# 2019-06-13 SummerGift first version
#
import sys
print(sys.version)
print(sys.version_info)
print(sys.path)
print(sys.__name__)
print(sys.platform)
print(sys.byteorder)
| 17.473684 | 53 | 0.698795 |
import sys
print(sys.version)
print(sys.version_info)
print(sys.path)
print(sys.__name__)
print(sys.platform)
print(sys.byteorder)
| true | true |
1c36783d030b7b9f75ee3e4e616549c3c6ba72bd | 583 | py | Python | part2/servers/simple/simple_client.py | Coffee-fan/ThriftBook | 5d0bf01811b898d24b0c4ff2cdb2e53fcb41a8ba | [
"Apache-2.0"
] | 103 | 2015-03-25T05:41:05.000Z | 2021-12-08T07:11:37.000Z | part2/servers/simple/simple_client.py | Coffee-fan/ThriftBook | 5d0bf01811b898d24b0c4ff2cdb2e53fcb41a8ba | [
"Apache-2.0"
] | 15 | 2015-02-06T10:21:57.000Z | 2020-10-02T19:16:15.000Z | part2/servers/simple/simple_client.py | Coffee-fan/ThriftBook | 5d0bf01811b898d24b0c4ff2cdb2e53fcb41a8ba | [
"Apache-2.0"
] | 43 | 2015-01-27T21:13:52.000Z | 2021-12-02T03:59:22.000Z | #!/usr/bin/env python
import sys
sys.path.append("gen-py")
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from simple import Message
trans = TSocket.TSocket("localhost", 9090)
trans = TTransport.TBufferedTransport(trans)
proto = TBinaryProtocol.TBinaryProtocol(trans)
client = Message.Client(proto)
trans.open()
while True:
print("[Client] received: %s" % client.motd())
line = raw_input("Enter 'q' to exit, anything else to continue: ")
if line == 'q':
break
trans.close()
| 24.291667 | 70 | 0.723842 |
import sys
sys.path.append("gen-py")
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from simple import Message
trans = TSocket.TSocket("localhost", 9090)
trans = TTransport.TBufferedTransport(trans)
proto = TBinaryProtocol.TBinaryProtocol(trans)
client = Message.Client(proto)
trans.open()
while True:
print("[Client] received: %s" % client.motd())
line = raw_input("Enter 'q' to exit, anything else to continue: ")
if line == 'q':
break
trans.close()
| true | true |
1c36784a322b882ed6cb4f0a486753cc02de5b3f | 32,580 | py | Python | lingvo/core/hyperparams.py | xsppp/gpipe_with_Mnist | 4486e675c7b52c7519a6d39f97e9b22ed5461944 | [
"Apache-2.0"
] | null | null | null | lingvo/core/hyperparams.py | xsppp/gpipe_with_Mnist | 4486e675c7b52c7519a6d39f97e9b22ed5461944 | [
"Apache-2.0"
] | null | null | null | lingvo/core/hyperparams.py | xsppp/gpipe_with_Mnist | 4486e675c7b52c7519a6d39f97e9b22ed5461944 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines Params base class, used for defining class/function parameters."""
import ast
import copy
import enum
import importlib
import inspect
import re
import sys
from typing import Any, TypeVar, Generic, Sequence
import dataclasses
import lingvo.compat as tf
from lingvo.core import hyperparams_pb2
from lingvo.core import symbolic
from google.protobuf import message
from google.protobuf import text_format
def _QuoteString(s):
"""Quotes a string with appropriate quotes and escaping.
This performs lite escaping by choosing enclosing quotation marks that would
escape the least (either single or double quotes) and escaping those quotes
and the backslash. Note that this does not escape newlines. If the string
contains embedded newlines, they will be output verbatim.
Args:
s: String to quote.
Returns:
Quotes string (possibly multiline).
"""
single_quote_count = s.count('\'')
double_quote_count = s.count('"')
quote_delim = '\'' if single_quote_count <= double_quote_count else '"'
# Apply escaping to the chosen quote character and the backslash.
encoded = re.sub(r'([%s\\])' % quote_delim, r'\\\1', s)
return quote_delim + encoded + quote_delim
def _UnquoteString(quoted):
if quoted and quoted[0] in ['"', '\'']:
# Note that only the limited set of escaping produced by _QuoteString is
# supported.
contents = quoted.strip(quoted[0])
return re.sub(r"""\\([\\'"])""", r'\1', contents)
else:
# Just return literal text.
return quoted
def _EndsWithTerminalQuote(s, quote_char):
"""Returns whether a string ends with a valid terminal quote."""
endm = re.search(r'(\\*)%s$' % quote_char, s)
if not endm:
return False
backslashes = endm.group(1)
if len(backslashes) % 2 == 0:
# Even number of backslashes preceding the quote means the quote is
# not escaped.
return True
else:
# Terminal quote is escaped.
return False
def _IsNamedTuple(x):
"""Returns whether an object is an instance of a collections.namedtuple.
Examples::
_IsNamedTuple((42, 'hi')) ==> False
Foo = collections.namedtuple('Foo', ['a', 'b'])
_IsNamedTuple(Foo(a=42, b='hi')) ==> True
Args:
x: The object to check.
"""
return isinstance(x, tuple) and hasattr(x, '_fields')
class _SortedDict(dict):
"""A dict with a __repr__ that is always sorted by key."""
def __repr__(self):
return '{' + ', '.join(
'%r: %r' % item for item in sorted(self.items())) + '}'
class _Param:
"""Stores data for a single parameter."""
def __init__(self, name, default_value, description):
self._name = name
self._value = default_value
self._description = description
def __eq__(self, other):
# pylint: disable=protected-access
return self._name == other._name and self._value == other._value
# Deep copy the value only if it is supported.
def __deepcopy__(self, memo):
if isinstance(self._value, (tf.Tensor, symbolic.Symbol)):
# In case self._value is a tensor/symbol, let's just make a reference.
value = self._value
else:
value = copy.deepcopy(self._value, memo)
p = _Param(self._name, value, self._description)
# Q(yonghui): Is this the right use of memo.
memo[id(self)] = p
return p
def ToString(self, nested_depth):
"""Prints the parameter as a string."""
def GetRepr(val):
"""Get the representation of `val`."""
if isinstance(val, Params):
return _SortedDict({k: GetRepr(v) for k, v in val.IterParams()})
if isinstance(val, dict):
return _SortedDict({k: GetRepr(v) for k, v in val.items()})
if isinstance(val, (list, tuple)) and not _IsNamedTuple(val):
# NB: this constructor signature works for tuples, but not namedtuples.
return type(val)([GetRepr(v) for v in val])
# NOTE(markmurphy): I introduced Repr() because it's impossible (afaik) to
# overwrite the __str__ or __repr__ method of a types.FunctionType object.
if hasattr(val, 'Repr'):
return val.Repr()
return val
nested_indent = ' ' * nested_depth
if isinstance(self._value, Params):
# pylint: disable=protected-access
value_str = self._value._ToString(nested_depth)
elif isinstance(self._value, str):
return '%s%s: "%s"' % (nested_indent, self._name, self._value)
else:
value_str = str(GetRepr(self._value))
return '%s%s: %s' % (nested_indent, self._name, value_str)
def Set(self, value):
# Note that we don't make a copy of Params objects.
# TODO(sadovsky): Maybe add safeguard to ensure that Params object is not
# owned by other Params objects.
self._value = value
def Get(self):
return self._value
def CopyFieldsTo(from_p, to_p, skip=None):
"""Copy fields from one Params to another, with optional skipped params.
Preserves `type(to_p.Instantiate())`. Use `from_p.Copy()` instead if requiring
a deep copy of `from_p`, without updating `to_p`.
Args:
from_p: Source params to copy from.
to_p: Destination params to copy to.
skip: If not None, a list of strings of param names to skip. Automatically
skips InstantiableParams' 'cls' parameter.
Returns:
The updated to_p.
"""
skip = skip or []
skip.append('cls')
for n, p in from_p.IterParams():
if n in skip:
continue
if isinstance(p, Params):
to_p.Set(**{n: p.Copy()})
else:
to_p.Set(**{n: p})
return to_p
class Params:
"""Stores data for a set of parameters.
Provides attribute-based API, e.g. "params.foo = 5".
Uses internal {'name': _Param} dict for storing parameter data.
"""
def __init__(self):
self.__dict__['_immutable'] = False
self._params = {} # name => _Param
def __setattr__(self, name, value):
if self._immutable:
raise TypeError('This Params instance is immutable.')
if name == '_params' or name == '_immutable':
self.__dict__[name] = value
else:
try:
self._params[name].Set(value)
except KeyError:
pass
#raise AttributeError(self._KeyErrorString(name))
def __getattr__(self, name):
if name == '_params' or name == '_immutable':
return self.__dict__[name]
try:
return self._params[name].Get()
except KeyError:
# cPickle expects __getattr__ to raise AttributeError, not KeyError.
raise AttributeError(self._KeyErrorString(name))
def __dir__(self):
return sorted(self._params.keys())
def __contains__(self, name):
return name in self._params
def __len__(self):
return len(self._params)
# Note: This gets called by _Param.__eq__() on nested Params objects.
def __eq__(self, other):
return isinstance(other, Params) and self._params == other._params # pylint: disable=protected-access
def __ne__(self, other):
return not self == other
def __str__(self):
return self._ToString(0)
def _ToString(self, nested_depth):
# Note: We use iteritems() below so as to sort by name.
sorted_param_strs = [
v.ToString(nested_depth + 1) for (_, v) in sorted(self._params.items())
]
nested_indent = ' ' * nested_depth
return '{\n%s\n%s}' % ('\n'.join(sorted_param_strs), nested_indent)
# Override __deepcopy__ so that copy.deepcopy(self._params) properly
# deep-copies nested Params objects.
# TODO(sadovsky): Is it okay not to touch memo?
def __deepcopy__(self, unused_memo):
return self.Copy()
def _SimilarKeys(self, name):
"""Return a list of params keys that are similar to name."""
def _Overlaps(name, key):
"""The fraction of 3-char substrings in <name> that appear in key."""
matches = 0
trials = 0
for i in range(len(name) - 3):
trials += 1
if name[i:i + 3] in key:
matches += 1
if trials:
return float(matches) / trials
return 0
if '_params' in self.__dict__:
return [key for key in self._params if _Overlaps(name, key) > 0.5]
return []
def _KeyErrorString(self, name):
similar = self._SimilarKeys(name)
if similar:
return name + ' (did you mean: [%s])' % (','.join(sorted(similar)))
if '_params' in self.__dict__:
return name + ' (keys are %s)' % self._params.keys()
return name
def Copy(self):
"""Creates a deep copy of self."""
return self._CopyTo(type(self)())
def _CopyTo(self, res):
# pylint: disable=protected-access
res._params = copy.deepcopy(self._params)
res._immutable = self._immutable
# pylint: enable=protected-access
return res
# TODO(sadovsky):
# - Maybe let users specify whether this parameter is allowed to have
# value=None, and if not, assert on Get(), like required proto field.
# - Maybe enforce that value is one of
# {number, string, bool, list, dict, Params}.
def Define(self, name, default_value, description):
"""Defines a parameter.
Args:
name: The parameter name. Must only contain lowercase letters, numbers,
and underscores. Must start with lowercase letter.
default_value: Default value for this parameter. May be None.
description: String description of this parameter.
Raises:
AttributeError: If parameter 'name' is already defined.
"""
if self._immutable:
raise TypeError('This Params instance is immutable.')
assert name is not None and isinstance(
name, str) and (re.match('^[a-z][a-z0-9_]*$', name) is not None)
if name in self._params:
raise AttributeError('Parameter %s is already defined' % name)
self._params[name] = _Param(name, default_value, description)
def Freeze(self):
"""Marks this Params as immutable."""
self._immutable = True
def IsImmutable(self):
"""Return whether this Params is immutable."""
return self._immutable
def _GetNested(self, name):
"""Returns nested param by its name."""
parts = name.split('.')
curr = self
for i, part in enumerate(parts[:-1]):
# Get the value (nested Params object) associated with name 'part'.
try:
is_list = re.match(r'^(.+)\[(.+)\]$', part)
if is_list:
part = is_list.group(1)
list_index = int(is_list.group(2))
# pylint: disable=protected-access
curr = curr._params[part].Get()
if is_list:
curr = curr[list_index]
except KeyError:
raise AttributeError('.'.join(parts[:i + 1]))
assert isinstance(curr, Params), (
'Cannot introspect %s for %s' % (type(curr), '.'.join(parts[:i + 1])))
return curr, parts[-1]
def Set(self, **kwargs):
"""Sets multiple parameters.
Dots in names indicate navigation into nested Params objects. We do not
allow navigation into lists or dicts, and may ban these types altogether in
favor of string representations.
Args:
**kwargs: Name-value pairs to set.
Returns:
self
"""
if self._immutable:
raise TypeError('This Params instance is immutable: %s' % self)
for name, value in kwargs.items():
# Get nested param.
param, key = self._GetNested(name)
# Update the value associated with key.
try:
# pylint: disable=protected-access
param._params[key].Set(value)
except KeyError:
raise AttributeError(self._KeyErrorString(name))
return self
def Get(self, name):
"""Get parameter.
Dots in names indicate navigation into nested Params objects. We do not
allow navigation into lists or dicts, and may ban these types altogether in
favor of string representations.
Args:
name: (str) Name.
Returns:
value.
Raises:
AttributeError: if parameter is not found
"""
param, key = self._GetNested(name)
# Get the value associated with key.
try:
# pylint: disable=protected-access
return param._params[key].Get()
except KeyError:
raise AttributeError(self._KeyErrorString(name))
def Delete(self, *args):
"""Deletes multiple parameters.
Dots in names indicate navigation into nested Params objects. We do not
allow navigation into lists or dicts, and may ban these types altogether in
favor of string representations.
Args:
*args: List of names.
Returns:
self
"""
if self._immutable:
raise TypeError('This Params instance is immutable.')
for name in args:
# Get nested param.
param, key = self._GetNested(name)
# Delete the key.
try:
# pylint: disable=protected-access
del param._params[key]
except KeyError:
raise AttributeError(self._KeyErrorString(name))
return self
def IterParams(self):
"""Pythonic dict-like iteration."""
for name, param in self._params.items():
yield (name, param.Get())
def ToProto(self):
"""Writes to a Hyperparams proto.
Serializes the Hyperparams into a proto that can be then written to disk or
sent over the network. Note that serialization is not guaranteed to be
unique or stable (this is a feature of protos themselves, not this code), so
using it for fingerprinting for example may not be appropriate. Refer to the
ToText() method for a serialization approach that Lingvo controls.
Returns:
The serialized params as a Hyperparams proto.
"""
def _ToParamValue(val):
"""Serializes to HyperparamValue proto."""
param_pb = hyperparams_pb2.HyperparamValue()
if isinstance(val, Params):
param_pb.param_val.CopyFrom(_ToParam(val))
elif isinstance(val, list) or isinstance(val, range):
# The range function is serialized by explicitely calling it.
param_pb.list_val.items.extend([_ToParamValue(v) for v in val])
elif dataclasses.is_dataclass(val) or _IsNamedTuple(val):
val_cls = type(val)
vals = val.__dict__.values() if dataclasses.is_dataclass(
val) else val._asdict().values()
param_pb.named_tuple_val.type = inspect.getmodule(
val_cls).__name__ + '/' + val_cls.__name__
param_pb.named_tuple_val.items.extend([_ToParamValue(v) for v in vals])
elif isinstance(val, tuple):
param_pb.tuple_val.items.extend([_ToParamValue(v) for v in val])
elif isinstance(val, dict):
param_pb.dict_val.SetInParent()
for k, v in val.items():
param_pb.dict_val.items[k].CopyFrom(_ToParamValue(v))
elif isinstance(val, type):
param_pb.type_val = inspect.getmodule(val).__name__ + '/' + val.__name__
elif isinstance(val, tf.DType):
param_pb.dtype_val = val.name
elif isinstance(val, str):
param_pb.string_val = val
elif isinstance(val, bool):
param_pb.bool_val = val
elif isinstance(val, int):
param_pb.int_val = val
elif isinstance(val, float):
param_pb.float_val = val
elif isinstance(val, enum.Enum):
enum_cls = type(val)
param_pb.enum_val.type = inspect.getmodule(
enum_cls).__name__ + '/' + enum_cls.__name__
param_pb.enum_val.name = val.name
elif isinstance(val, message.Message):
proto_cls = type(val)
param_pb.proto_val.type = inspect.getmodule(
proto_cls).__name__ + '/' + proto_cls.__name__
param_pb.proto_val.val = val.SerializeToString()
elif val is None:
# We represent a NoneType by the absence of any of the oneof.
pass
else:
raise AttributeError('Unsupported type: %s for value %s' %
(type(val), val))
return param_pb
def _ToParam(val):
"""Serializes to Hyperparam proto."""
param_pb = hyperparams_pb2.Hyperparam()
for k, v in val.IterParams():
param_pb.items[k].CopyFrom(_ToParamValue(v))
return param_pb
return _ToParam(self)
# TODO(tonybruguier): Move to module-level function (cls is never used).
@classmethod
def FromProto(cls, param_pb):
"""Reads from a Hyperparams proto."""
def _LoadClass(module_and_class_name):
tokens = module_and_class_name.split('/')
assert len(tokens) == 2, module_and_class_name
return getattr(importlib.import_module(tokens[0]), tokens[1])
def _FromParamValue(param_pb):
"""Deserializes HyperparamValue proto."""
which_oneof = param_pb.WhichOneof('kind')
if which_oneof == 'param_val':
return _FromParam(param_pb.param_val)
elif which_oneof == 'list_val':
return [_FromParamValue(val) for val in param_pb.list_val.items]
elif which_oneof == 'named_tuple_val':
named_tuple_cls = _LoadClass(param_pb.named_tuple_val.type)
if not dataclasses.is_dataclass(named_tuple_cls) and not issubclass(
named_tuple_cls, tuple):
return None
return named_tuple_cls(
*[_FromParamValue(val) for val in param_pb.named_tuple_val.items])
elif which_oneof == 'tuple_val':
return tuple([_FromParamValue(val) for val in param_pb.tuple_val.items])
elif which_oneof == 'dict_val':
dict_val = dict()
for k in param_pb.dict_val.items:
dict_val[k] = _FromParamValue(param_pb.dict_val.items[k])
return dict_val
elif which_oneof == 'type_val':
tokens = param_pb.type_val.split('/')
assert len(tokens) == 2
return getattr(importlib.import_module(tokens[0]), tokens[1])
elif which_oneof == 'dtype_val':
return tf.as_dtype(param_pb.dtype_val)
elif which_oneof == 'string_val':
return param_pb.string_val
elif which_oneof == 'int_val':
return param_pb.int_val
elif which_oneof == 'float_val':
return param_pb.float_val
elif which_oneof == 'bool_val':
return param_pb.bool_val
elif which_oneof == 'enum_val':
enum_cls = _LoadClass(param_pb.enum_val.type)
if not issubclass(enum_cls, enum.Enum):
return None
return enum_cls[param_pb.enum_val.name]
elif which_oneof == 'proto_val':
proto_cls = _LoadClass(param_pb.proto_val.type)
if not issubclass(proto_cls, message.Message):
return None
proto_msg = proto_cls()
proto_msg.ParseFromString(param_pb.proto_val.val)
return proto_msg
else:
# If nothing is set, it's the None type.
return None
def _FromParam(param_pb):
"""Deserializes Hyperparam proto."""
params = InstantiableParams() if 'cls' in param_pb.items else Params()
for k in param_pb.items:
val = _FromParamValue(param_pb.items[k])
if k == 'cls':
params.Set(**{k: val})
else:
params.Define(k, val, '')
return params
return _FromParam(param_pb)
def ToText(self, include_types=False):
"""Encodes params into a simple text format.
Each param is represented as a single line in the output. The param
name and value is separated by a ":". The nest param name is
separated by ".". For values of non-trivial types (types other than
int, float, bool, str, and a few, etc.), we just print out the name
of its type.
Note that strings are enclosed in appropriate single or double quotes
(whichever would involve the least escaping) and will have some characters
backslash escaped. String properties can span multiple lines.
Args:
include_types: Should we return types of the values. If True, the types
dict will be returned as a second val in a return tuple
Returns:
The encoded text or (encoded text, types dict) if include_types is True.
"""
kv = {}
types = {}
def GetRepr(val):
"""Get the representation of `val`."""
if isinstance(val, Params):
return _SortedDict({k: GetRepr(v) for k, v in val.IterParams()})
if isinstance(val, dict):
return _SortedDict({k: GetRepr(v) for k, v in val.items()})
if dataclasses.is_dataclass(val):
return _SortedDict({k: GetRepr(v) for k, v in val.__dict__.items()})
if _IsNamedTuple(val):
return _SortedDict({k: GetRepr(v) for k, v in val._asdict().items()})
if isinstance(val, (list, tuple)):
return type(val)([GetRepr(v) for v in val])
if isinstance(val, (int, float, bool, str, enum.Enum)):
return val
if isinstance(val, tf.DType):
return val.name
if isinstance(val, message.Message):
proto_str = text_format.MessageToString(val, as_one_line=True)
return 'proto/%s/%s/%s' % (inspect.getmodule(val).__name__,
type(val).__name__, proto_str)
if isinstance(val, type):
return 'type/' + inspect.getmodule(val).__name__ + '/' + val.__name__
return type(val).__name__
def Traverse(p, prefix, kv):
"""Traverses 'p' and inserts key-value pairs to 'kv'."""
if isinstance(p, Params):
for key, val in p.IterParams():
Traverse(val, prefix + '.' + key, kv)
elif (isinstance(p, (list, tuple)) and
all(isinstance(x, Params) for x in p)):
for i, val in enumerate(p):
Traverse(val, '%s[%d]' % (prefix, i), kv)
elif isinstance(p, str):
kv[prefix] = _QuoteString(p)
types[prefix[1:]] = 'str'
else:
kv[prefix] = str(GetRepr(p))
types[prefix[1:]] = type(p).__name__
Traverse(self, '', kv)
ret = ''
for (k, v) in sorted(kv.items()):
ret += k[1:] + ' : ' + v + '\n'
return (ret, types) if include_types else ret
def FromText(self, text, type_overrides=None):
"""Merges params specified in 'text' into 'params'.
'text' follows the simple text format as produced by ToText.
For a param specified in both 'params' and 'text', overwrites the value in
'params' according to 'text'. Params specified in 'text' but not in 'params'
are ignored.
Args:
text: A text representation of params.
type_overrides: Overrides for the types of the params.
Raises:
AttributeError: text contains invalid parameter key
ValueError: text contains invalid parameter value, or the format is
wrong.
"""
if self._immutable:
raise TypeError('This Params instance is immutable.')
kv = {}
type_overrides = type_overrides or {}
string_continue = None # None or (key, quote, value)
for line in text.split('\n'):
# Continuing a multi-line string.
if string_continue:
value_stripped = line.rstrip()
if not _EndsWithTerminalQuote(value_stripped, string_continue[1]):
# String continues
string_continue = (string_continue[0], string_continue[1],
string_continue[2] + '\n' + line)
continue
# String terminates.
kv[string_continue[0]] = string_continue[2] + '\n' + value_stripped
string_continue = None
continue
# Regular line.
line = line.strip()
if not line or line[0] == '#':
# empty line or comment
continue
pair = line.split(':', 1)
if len(pair) == 2:
key = pair[0].strip()
value = pair[1].lstrip()
value_stripped = value.rstrip()
# Detect single vs multi-line string start.
if value and value[0] in ['"', '\'']:
quote_char = value[0]
if not _EndsWithTerminalQuote(value[1:], quote_char):
# Multi-line string.
string_continue = (key, quote_char, value)
continue
kv[key] = value_stripped
else:
raise ValueError('Line {} is not in <key>:<value> format'.format(line))
def _ValueFromText(key, old_val, val):
"""Returns the new param value from its text representation."""
val_type = type(old_val).__name__
if isinstance(old_val, str):
val_type = 'str'
if key in type_overrides:
val_type = type_overrides[key]
# Converts val (a string) to a best-guessed typed value.
if val_type == 'bool':
return val and (val != 'False') and (val != 'false')
elif val_type == 'int':
return int(val)
elif val_type == 'float':
return float(val)
elif val_type == 'DType':
return tf.as_dtype(val)
elif dataclasses.is_dataclass(old_val) or _IsNamedTuple(old_val):
# Maps field name to new value (or its string repr, if non-POD).
name_to_new_value = ast.literal_eval(val)
contents = {}
items = old_val.__dict__.items() if dataclasses.is_dataclass(
old_val) else old_val._asdict().items()
for k, old_field_value in items:
new_field_value = name_to_new_value[k]
# Recurse to parse any non-POD contents not converted by
# literal_eval().
if isinstance(new_field_value, str):
contents[k] = _ValueFromText(k, old_field_value, new_field_value)
else:
contents[k] = new_field_value
return type(old_val)(**contents)
elif val_type in ['list', 'tuple']:
return ast.literal_eval(val)
elif val_type == 'dict':
return ast.literal_eval(val) if val != 'dict' else {}
elif val_type == 'str':
val = _UnquoteString(val)
if val.startswith('[') and val.endswith(']'):
# We may have stored a list as a string, try converting to a list.
# In case of ValueError - use the string as is.
try:
return ast.literal_eval(val)
except ValueError:
pass
return val
elif isinstance(old_val, enum.Enum):
cls, _, name = val.rpartition('.')
if val_type != cls:
raise ValueError('Expected enum of class %s but got %s' %
(val_type, cls))
return type(old_val)[name]
elif (isinstance(old_val, type) or isinstance(old_val, message.Message) or
old_val is None):
if val == 'NoneType':
return None
elif old_val is None and val in ('False', 'false'):
return False
elif old_val is None and val in ('True', 'true'):
return True
else:
try:
val_type, pkg, cls = val.split('/', 2)
if val_type == 'type':
return getattr(sys.modules[pkg], cls)
elif val_type == 'proto':
cls, proto_str = cls.split('/', 1)
proto_cls = getattr(sys.modules[pkg], cls)
if not issubclass(proto_cls, message.Message):
raise ValueError('%s is not a proto class.' % proto_cls)
return text_format.Parse(proto_str, proto_cls())
except ValueError as e:
raise ValueError('Error processing %r : %r with %r' % (key, val, e))
else:
raise ValueError('Failed to read a parameter: %r : %r' % (key, val))
for key, val in kv.items():
old_val = self.Get(key)
new_val = _ValueFromText(key, old_val, val)
self.Set(**{key: new_val})
def ToTextWithTypes(self):
"""Same as ToText but encodes both params and their types."""
text, types = self.ToText(include_types=True)
text += '\n\n'
for (k, v) in sorted(types.items()):
text += k + ' : ' + v + '\n'
return text
def FromTextWithTypes(self, text):
"""Same as FromText but expects to have types encoded in the text."""
text, types_str = text.split('\n\n')
types = {}
for row in types_str.split('\n'):
if not row:
continue
k, v = row.split(':')
types[k.strip()] = v.strip()
self.FromText(text, type_overrides=types)
def TextDiff(self, other):
"""Return the differences between this object and another as a string.
Args:
other: The other Params object.
Returns:
A string of differences.
"""
def IsStringy(x) -> bool:
return isinstance(x, (str, bytes))
def TextDiffHelper(a, b, key: str, spaces: str) -> str:
"""Return the differences between a and b as a string."""
if a == b:
return ''
if isinstance(a, Params) and isinstance(b, Params):
diff = ''
diff += '?' + spaces + key + ':\n'
diff += TextDiffParamsHelper(a, b, spaces + ' ')
return diff
sequences = False
try:
len(a)
len(b)
sequences = True
except TypeError:
pass
if sequences and not IsStringy(a) and not IsStringy(b):
return TextDiffSequenceHelper(a, b, key, spaces)
diff = ''
diff += '>' + spaces + key + ': ' + str(a) + '\n'
diff += '<' + spaces + key + ': ' + str(b) + '\n'
return diff
def TextDiffSequenceHelper(a: Sequence[Any], b: Sequence[Any], key: str,
spaces: str):
"""Return the differences between a and b as a string."""
diff = ''
for i in range(max([len(a), len(b)])):
key_i = f'{key}[{i}]'
if i < len(a) and i < len(b):
diff += TextDiffHelper(a[i], b[i], key_i, spaces)
elif i < len(a):
diff += '>' + spaces + key_i + ': ' + str(a[i]) + '\n'
else:
diff += '<' + spaces + key_i + ': ' + str(b[i]) + '\n'
return diff
def TextDiffParamsHelper(a: Params, b: Params, spaces: str) -> str:
"""Return the differences between a and b as a string."""
a_keys = set([key for key, _ in a.IterParams()])
b_keys = set([key for key, _ in b.IterParams()])
all_keys = a_keys.union(b_keys)
diff = ''
for key in sorted(all_keys):
if key in a_keys and key not in b_keys:
diff += '>' + spaces + key + ': ' + str(a.Get(key)) + '\n'
elif key in b_keys and key not in a_keys:
diff += '<' + spaces + key + ': ' + str(b.Get(key)) + '\n'
elif a.Get(key) != b.Get(key):
diff += TextDiffHelper(a.Get(key), b.Get(key), key, spaces)
return diff
return TextDiffParamsHelper(self, other, spaces=' ')
T = TypeVar('T')
class InstantiableParams(Params, Generic[T]):
"""Params which can be instantiated.
When using InstantiableParams, callers must provide a class which supports
initialization using a Params instance.
This covers a common use case of Params to hold a configuration for a given
class.
"""
def __init__(self, cls: T = None):
super().__init__()
self.Define('cls', cls, 'Cls that this param object is associated with.')
def Instantiate(self, **args) -> T:
"""Instantiate an instance that this Params is configured for.
Example:
params = InstantiableParams(cls=MyObject)
params.Define('weight', 0.2, 'Training weight.')
params.weight = 0.9
obj = params.Instantiate()
It's common for classes to have a classmethod called Params that returns
a pre-made InstantiableParams, like this:
params = MyObject.Params()
params.weight = 0.9
obj = params.Instantiate()
By convention, anything that parameterizes the behavior of your class
should be stored in this Params object. However, your class may also use
shared state objects which aren't really parameters, like a shared lock.
These can be passed as extra arguments to Instantiate.
Example:
lock = threading.Lock()
params = MyObject.Params()
obj_a = params.Instantiate(lock=lock)
obj_b = params.Instantiate(lock=lock)
Args:
**args: Additional keyword arguments to pass to the constructor in
addition to this Params object.
Returns:
A constructed object where type(object) == cls.
"""
assert self.cls is not None
# The class initializer is expected to support initialization using Params.
return self.cls(self, **args)
def Copy(self) -> 'InstantiableParams[T]':
"""See base class."""
return self._CopyTo(type(self)(self.cls))
| 34.696486 | 106 | 0.631369 |
import ast
import copy
import enum
import importlib
import inspect
import re
import sys
from typing import Any, TypeVar, Generic, Sequence
import dataclasses
import lingvo.compat as tf
from lingvo.core import hyperparams_pb2
from lingvo.core import symbolic
from google.protobuf import message
from google.protobuf import text_format
def _QuoteString(s):
single_quote_count = s.count('\'')
double_quote_count = s.count('"')
quote_delim = '\'' if single_quote_count <= double_quote_count else '"'
encoded = re.sub(r'([%s\\])' % quote_delim, r'\\\1', s)
return quote_delim + encoded + quote_delim
def _UnquoteString(quoted):
if quoted and quoted[0] in ['"', '\'']:
# Note that only the limited set of escaping produced by _QuoteString is
# supported.
contents = quoted.strip(quoted[0])
return re.sub(r"""\\([\\'"])""", r'\1', contents)
else:
return quoted
def _EndsWithTerminalQuote(s, quote_char):
endm = re.search(r'(\\*)%s$' % quote_char, s)
if not endm:
return False
backslashes = endm.group(1)
if len(backslashes) % 2 == 0:
return True
else:
return False
def _IsNamedTuple(x):
return isinstance(x, tuple) and hasattr(x, '_fields')
class _SortedDict(dict):
def __repr__(self):
return '{' + ', '.join(
'%r: %r' % item for item in sorted(self.items())) + '}'
class _Param:
def __init__(self, name, default_value, description):
self._name = name
self._value = default_value
self._description = description
def __eq__(self, other):
return self._name == other._name and self._value == other._value
def __deepcopy__(self, memo):
if isinstance(self._value, (tf.Tensor, symbolic.Symbol)):
value = self._value
else:
value = copy.deepcopy(self._value, memo)
p = _Param(self._name, value, self._description)
# Q(yonghui): Is this the right use of memo.
memo[id(self)] = p
return p
def ToString(self, nested_depth):
def GetRepr(val):
if isinstance(val, Params):
return _SortedDict({k: GetRepr(v) for k, v in val.IterParams()})
if isinstance(val, dict):
return _SortedDict({k: GetRepr(v) for k, v in val.items()})
if isinstance(val, (list, tuple)) and not _IsNamedTuple(val):
# NB: this constructor signature works for tuples, but not namedtuples.
return type(val)([GetRepr(v) for v in val])
# NOTE(markmurphy): I introduced Repr() because it's impossible (afaik) to
if hasattr(val, 'Repr'):
return val.Repr()
return val
nested_indent = ' ' * nested_depth
if isinstance(self._value, Params):
value_str = self._value._ToString(nested_depth)
elif isinstance(self._value, str):
return '%s%s: "%s"' % (nested_indent, self._name, self._value)
else:
value_str = str(GetRepr(self._value))
return '%s%s: %s' % (nested_indent, self._name, value_str)
def Set(self, value):
# TODO(sadovsky): Maybe add safeguard to ensure that Params object is not
# owned by other Params objects.
self._value = value
def Get(self):
return self._value
def CopyFieldsTo(from_p, to_p, skip=None):
skip = skip or []
skip.append('cls')
for n, p in from_p.IterParams():
if n in skip:
continue
if isinstance(p, Params):
to_p.Set(**{n: p.Copy()})
else:
to_p.Set(**{n: p})
return to_p
class Params:
def __init__(self):
self.__dict__['_immutable'] = False
self._params = {} # name => _Param
def __setattr__(self, name, value):
if self._immutable:
raise TypeError('This Params instance is immutable.')
if name == '_params' or name == '_immutable':
self.__dict__[name] = value
else:
try:
self._params[name].Set(value)
except KeyError:
pass
#raise AttributeError(self._KeyErrorString(name))
def __getattr__(self, name):
if name == '_params' or name == '_immutable':
return self.__dict__[name]
try:
return self._params[name].Get()
except KeyError:
# cPickle expects __getattr__ to raise AttributeError, not KeyError.
raise AttributeError(self._KeyErrorString(name))
def __dir__(self):
return sorted(self._params.keys())
def __contains__(self, name):
return name in self._params
def __len__(self):
return len(self._params)
# Note: This gets called by _Param.__eq__() on nested Params objects.
def __eq__(self, other):
return isinstance(other, Params) and self._params == other._params # pylint: disable=protected-access
def __ne__(self, other):
return not self == other
def __str__(self):
return self._ToString(0)
def _ToString(self, nested_depth):
# Note: We use iteritems() below so as to sort by name.
sorted_param_strs = [
v.ToString(nested_depth + 1) for (_, v) in sorted(self._params.items())
]
nested_indent = ' ' * nested_depth
return '{\n%s\n%s}' % ('\n'.join(sorted_param_strs), nested_indent)
# Override __deepcopy__ so that copy.deepcopy(self._params) properly
# deep-copies nested Params objects.
# TODO(sadovsky): Is it okay not to touch memo?
def __deepcopy__(self, unused_memo):
return self.Copy()
def _SimilarKeys(self, name):
def _Overlaps(name, key):
matches = 0
trials = 0
for i in range(len(name) - 3):
trials += 1
if name[i:i + 3] in key:
matches += 1
if trials:
return float(matches) / trials
return 0
if '_params' in self.__dict__:
return [key for key in self._params if _Overlaps(name, key) > 0.5]
return []
def _KeyErrorString(self, name):
similar = self._SimilarKeys(name)
if similar:
return name + ' (did you mean: [%s])' % (','.join(sorted(similar)))
if '_params' in self.__dict__:
return name + ' (keys are %s)' % self._params.keys()
return name
def Copy(self):
return self._CopyTo(type(self)())
def _CopyTo(self, res):
# pylint: disable=protected-access
res._params = copy.deepcopy(self._params)
res._immutable = self._immutable
# pylint: enable=protected-access
return res
# TODO(sadovsky):
# - Maybe let users specify whether this parameter is allowed to have
# value=None, and if not, assert on Get(), like required proto field.
# - Maybe enforce that value is one of
# {number, string, bool, list, dict, Params}.
def Define(self, name, default_value, description):
if self._immutable:
raise TypeError('This Params instance is immutable.')
assert name is not None and isinstance(
name, str) and (re.match('^[a-z][a-z0-9_]*$', name) is not None)
if name in self._params:
raise AttributeError('Parameter %s is already defined' % name)
self._params[name] = _Param(name, default_value, description)
def Freeze(self):
self._immutable = True
def IsImmutable(self):
return self._immutable
def _GetNested(self, name):
parts = name.split('.')
curr = self
for i, part in enumerate(parts[:-1]):
# Get the value (nested Params object) associated with name 'part'.
try:
is_list = re.match(r'^(.+)\[(.+)\]$', part)
if is_list:
part = is_list.group(1)
list_index = int(is_list.group(2))
# pylint: disable=protected-access
curr = curr._params[part].Get()
if is_list:
curr = curr[list_index]
except KeyError:
raise AttributeError('.'.join(parts[:i + 1]))
assert isinstance(curr, Params), (
'Cannot introspect %s for %s' % (type(curr), '.'.join(parts[:i + 1])))
return curr, parts[-1]
def Set(self, **kwargs):
if self._immutable:
raise TypeError('This Params instance is immutable: %s' % self)
for name, value in kwargs.items():
# Get nested param.
param, key = self._GetNested(name)
# Update the value associated with key.
try:
# pylint: disable=protected-access
param._params[key].Set(value)
except KeyError:
raise AttributeError(self._KeyErrorString(name))
return self
def Get(self, name):
param, key = self._GetNested(name)
# Get the value associated with key.
try:
# pylint: disable=protected-access
return param._params[key].Get()
except KeyError:
raise AttributeError(self._KeyErrorString(name))
def Delete(self, *args):
if self._immutable:
raise TypeError('This Params instance is immutable.')
for name in args:
# Get nested param.
param, key = self._GetNested(name)
# Delete the key.
try:
# pylint: disable=protected-access
del param._params[key]
except KeyError:
raise AttributeError(self._KeyErrorString(name))
return self
def IterParams(self):
for name, param in self._params.items():
yield (name, param.Get())
def ToProto(self):
def _ToParamValue(val):
param_pb = hyperparams_pb2.HyperparamValue()
if isinstance(val, Params):
param_pb.param_val.CopyFrom(_ToParam(val))
elif isinstance(val, list) or isinstance(val, range):
# The range function is serialized by explicitely calling it.
param_pb.list_val.items.extend([_ToParamValue(v) for v in val])
elif dataclasses.is_dataclass(val) or _IsNamedTuple(val):
val_cls = type(val)
vals = val.__dict__.values() if dataclasses.is_dataclass(
val) else val._asdict().values()
param_pb.named_tuple_val.type = inspect.getmodule(
val_cls).__name__ + '/' + val_cls.__name__
param_pb.named_tuple_val.items.extend([_ToParamValue(v) for v in vals])
elif isinstance(val, tuple):
param_pb.tuple_val.items.extend([_ToParamValue(v) for v in val])
elif isinstance(val, dict):
param_pb.dict_val.SetInParent()
for k, v in val.items():
param_pb.dict_val.items[k].CopyFrom(_ToParamValue(v))
elif isinstance(val, type):
param_pb.type_val = inspect.getmodule(val).__name__ + '/' + val.__name__
elif isinstance(val, tf.DType):
param_pb.dtype_val = val.name
elif isinstance(val, str):
param_pb.string_val = val
elif isinstance(val, bool):
param_pb.bool_val = val
elif isinstance(val, int):
param_pb.int_val = val
elif isinstance(val, float):
param_pb.float_val = val
elif isinstance(val, enum.Enum):
enum_cls = type(val)
param_pb.enum_val.type = inspect.getmodule(
enum_cls).__name__ + '/' + enum_cls.__name__
param_pb.enum_val.name = val.name
elif isinstance(val, message.Message):
proto_cls = type(val)
param_pb.proto_val.type = inspect.getmodule(
proto_cls).__name__ + '/' + proto_cls.__name__
param_pb.proto_val.val = val.SerializeToString()
elif val is None:
# We represent a NoneType by the absence of any of the oneof.
pass
else:
raise AttributeError('Unsupported type: %s for value %s' %
(type(val), val))
return param_pb
def _ToParam(val):
param_pb = hyperparams_pb2.Hyperparam()
for k, v in val.IterParams():
param_pb.items[k].CopyFrom(_ToParamValue(v))
return param_pb
return _ToParam(self)
# TODO(tonybruguier): Move to module-level function (cls is never used).
@classmethod
def FromProto(cls, param_pb):
def _LoadClass(module_and_class_name):
tokens = module_and_class_name.split('/')
assert len(tokens) == 2, module_and_class_name
return getattr(importlib.import_module(tokens[0]), tokens[1])
def _FromParamValue(param_pb):
which_oneof = param_pb.WhichOneof('kind')
if which_oneof == 'param_val':
return _FromParam(param_pb.param_val)
elif which_oneof == 'list_val':
return [_FromParamValue(val) for val in param_pb.list_val.items]
elif which_oneof == 'named_tuple_val':
named_tuple_cls = _LoadClass(param_pb.named_tuple_val.type)
if not dataclasses.is_dataclass(named_tuple_cls) and not issubclass(
named_tuple_cls, tuple):
return None
return named_tuple_cls(
*[_FromParamValue(val) for val in param_pb.named_tuple_val.items])
elif which_oneof == 'tuple_val':
return tuple([_FromParamValue(val) for val in param_pb.tuple_val.items])
elif which_oneof == 'dict_val':
dict_val = dict()
for k in param_pb.dict_val.items:
dict_val[k] = _FromParamValue(param_pb.dict_val.items[k])
return dict_val
elif which_oneof == 'type_val':
tokens = param_pb.type_val.split('/')
assert len(tokens) == 2
return getattr(importlib.import_module(tokens[0]), tokens[1])
elif which_oneof == 'dtype_val':
return tf.as_dtype(param_pb.dtype_val)
elif which_oneof == 'string_val':
return param_pb.string_val
elif which_oneof == 'int_val':
return param_pb.int_val
elif which_oneof == 'float_val':
return param_pb.float_val
elif which_oneof == 'bool_val':
return param_pb.bool_val
elif which_oneof == 'enum_val':
enum_cls = _LoadClass(param_pb.enum_val.type)
if not issubclass(enum_cls, enum.Enum):
return None
return enum_cls[param_pb.enum_val.name]
elif which_oneof == 'proto_val':
proto_cls = _LoadClass(param_pb.proto_val.type)
if not issubclass(proto_cls, message.Message):
return None
proto_msg = proto_cls()
proto_msg.ParseFromString(param_pb.proto_val.val)
return proto_msg
else:
# If nothing is set, it's the None type.
return None
def _FromParam(param_pb):
params = InstantiableParams() if 'cls' in param_pb.items else Params()
for k in param_pb.items:
val = _FromParamValue(param_pb.items[k])
if k == 'cls':
params.Set(**{k: val})
else:
params.Define(k, val, '')
return params
return _FromParam(param_pb)
def ToText(self, include_types=False):
kv = {}
types = {}
def GetRepr(val):
if isinstance(val, Params):
return _SortedDict({k: GetRepr(v) for k, v in val.IterParams()})
if isinstance(val, dict):
return _SortedDict({k: GetRepr(v) for k, v in val.items()})
if dataclasses.is_dataclass(val):
return _SortedDict({k: GetRepr(v) for k, v in val.__dict__.items()})
if _IsNamedTuple(val):
return _SortedDict({k: GetRepr(v) for k, v in val._asdict().items()})
if isinstance(val, (list, tuple)):
return type(val)([GetRepr(v) for v in val])
if isinstance(val, (int, float, bool, str, enum.Enum)):
return val
if isinstance(val, tf.DType):
return val.name
if isinstance(val, message.Message):
proto_str = text_format.MessageToString(val, as_one_line=True)
return 'proto/%s/%s/%s' % (inspect.getmodule(val).__name__,
type(val).__name__, proto_str)
if isinstance(val, type):
return 'type/' + inspect.getmodule(val).__name__ + '/' + val.__name__
return type(val).__name__
def Traverse(p, prefix, kv):
if isinstance(p, Params):
for key, val in p.IterParams():
Traverse(val, prefix + '.' + key, kv)
elif (isinstance(p, (list, tuple)) and
all(isinstance(x, Params) for x in p)):
for i, val in enumerate(p):
Traverse(val, '%s[%d]' % (prefix, i), kv)
elif isinstance(p, str):
kv[prefix] = _QuoteString(p)
types[prefix[1:]] = 'str'
else:
kv[prefix] = str(GetRepr(p))
types[prefix[1:]] = type(p).__name__
Traverse(self, '', kv)
ret = ''
for (k, v) in sorted(kv.items()):
ret += k[1:] + ' : ' + v + '\n'
return (ret, types) if include_types else ret
def FromText(self, text, type_overrides=None):
if self._immutable:
raise TypeError('This Params instance is immutable.')
kv = {}
type_overrides = type_overrides or {}
string_continue = None
for line in text.split('\n'):
if string_continue:
value_stripped = line.rstrip()
if not _EndsWithTerminalQuote(value_stripped, string_continue[1]):
string_continue = (string_continue[0], string_continue[1],
string_continue[2] + '\n' + line)
continue
kv[string_continue[0]] = string_continue[2] + '\n' + value_stripped
string_continue = None
continue
line = line.strip()
if not line or line[0] == '#':
continue
pair = line.split(':', 1)
if len(pair) == 2:
key = pair[0].strip()
value = pair[1].lstrip()
value_stripped = value.rstrip()
if value and value[0] in ['"', '\'']:
quote_char = value[0]
if not _EndsWithTerminalQuote(value[1:], quote_char):
# Multi-line string.
string_continue = (key, quote_char, value)
continue
kv[key] = value_stripped
else:
raise ValueError('Line {} is not in <key>:<value> format'.format(line))
def _ValueFromText(key, old_val, val):
val_type = type(old_val).__name__
if isinstance(old_val, str):
val_type = 'str'
if key in type_overrides:
val_type = type_overrides[key]
# Converts val (a string) to a best-guessed typed value.
if val_type == 'bool':
return val and (val != 'False') and (val != 'false')
elif val_type == 'int':
return int(val)
elif val_type == 'float':
return float(val)
elif val_type == 'DType':
return tf.as_dtype(val)
elif dataclasses.is_dataclass(old_val) or _IsNamedTuple(old_val):
# Maps field name to new value (or its string repr, if non-POD).
name_to_new_value = ast.literal_eval(val)
contents = {}
items = old_val.__dict__.items() if dataclasses.is_dataclass(
old_val) else old_val._asdict().items()
for k, old_field_value in items:
new_field_value = name_to_new_value[k]
# Recurse to parse any non-POD contents not converted by
# literal_eval().
if isinstance(new_field_value, str):
contents[k] = _ValueFromText(k, old_field_value, new_field_value)
else:
contents[k] = new_field_value
return type(old_val)(**contents)
elif val_type in ['list', 'tuple']:
return ast.literal_eval(val)
elif val_type == 'dict':
return ast.literal_eval(val) if val != 'dict' else {}
elif val_type == 'str':
val = _UnquoteString(val)
if val.startswith('[') and val.endswith(']'):
# We may have stored a list as a string, try converting to a list.
# In case of ValueError - use the string as is.
try:
return ast.literal_eval(val)
except ValueError:
pass
return val
elif isinstance(old_val, enum.Enum):
cls, _, name = val.rpartition('.')
if val_type != cls:
raise ValueError('Expected enum of class %s but got %s' %
(val_type, cls))
return type(old_val)[name]
elif (isinstance(old_val, type) or isinstance(old_val, message.Message) or
old_val is None):
if val == 'NoneType':
return None
elif old_val is None and val in ('False', 'false'):
return False
elif old_val is None and val in ('True', 'true'):
return True
else:
try:
val_type, pkg, cls = val.split('/', 2)
if val_type == 'type':
return getattr(sys.modules[pkg], cls)
elif val_type == 'proto':
cls, proto_str = cls.split('/', 1)
proto_cls = getattr(sys.modules[pkg], cls)
if not issubclass(proto_cls, message.Message):
raise ValueError('%s is not a proto class.' % proto_cls)
return text_format.Parse(proto_str, proto_cls())
except ValueError as e:
raise ValueError('Error processing %r : %r with %r' % (key, val, e))
else:
raise ValueError('Failed to read a parameter: %r : %r' % (key, val))
for key, val in kv.items():
old_val = self.Get(key)
new_val = _ValueFromText(key, old_val, val)
self.Set(**{key: new_val})
def ToTextWithTypes(self):
text, types = self.ToText(include_types=True)
text += '\n\n'
for (k, v) in sorted(types.items()):
text += k + ' : ' + v + '\n'
return text
def FromTextWithTypes(self, text):
text, types_str = text.split('\n\n')
types = {}
for row in types_str.split('\n'):
if not row:
continue
k, v = row.split(':')
types[k.strip()] = v.strip()
self.FromText(text, type_overrides=types)
def TextDiff(self, other):
def IsStringy(x) -> bool:
return isinstance(x, (str, bytes))
def TextDiffHelper(a, b, key: str, spaces: str) -> str:
if a == b:
return ''
if isinstance(a, Params) and isinstance(b, Params):
diff = ''
diff += '?' + spaces + key + ':\n'
diff += TextDiffParamsHelper(a, b, spaces + ' ')
return diff
sequences = False
try:
len(a)
len(b)
sequences = True
except TypeError:
pass
if sequences and not IsStringy(a) and not IsStringy(b):
return TextDiffSequenceHelper(a, b, key, spaces)
diff = ''
diff += '>' + spaces + key + ': ' + str(a) + '\n'
diff += '<' + spaces + key + ': ' + str(b) + '\n'
return diff
def TextDiffSequenceHelper(a: Sequence[Any], b: Sequence[Any], key: str,
spaces: str):
diff = ''
for i in range(max([len(a), len(b)])):
key_i = f'{key}[{i}]'
if i < len(a) and i < len(b):
diff += TextDiffHelper(a[i], b[i], key_i, spaces)
elif i < len(a):
diff += '>' + spaces + key_i + ': ' + str(a[i]) + '\n'
else:
diff += '<' + spaces + key_i + ': ' + str(b[i]) + '\n'
return diff
def TextDiffParamsHelper(a: Params, b: Params, spaces: str) -> str:
a_keys = set([key for key, _ in a.IterParams()])
b_keys = set([key for key, _ in b.IterParams()])
all_keys = a_keys.union(b_keys)
diff = ''
for key in sorted(all_keys):
if key in a_keys and key not in b_keys:
diff += '>' + spaces + key + ': ' + str(a.Get(key)) + '\n'
elif key in b_keys and key not in a_keys:
diff += '<' + spaces + key + ': ' + str(b.Get(key)) + '\n'
elif a.Get(key) != b.Get(key):
diff += TextDiffHelper(a.Get(key), b.Get(key), key, spaces)
return diff
return TextDiffParamsHelper(self, other, spaces=' ')
T = TypeVar('T')
class InstantiableParams(Params, Generic[T]):
def __init__(self, cls: T = None):
super().__init__()
self.Define('cls', cls, 'Cls that this param object is associated with.')
def Instantiate(self, **args) -> T:
assert self.cls is not None
# The class initializer is expected to support initialization using Params.
return self.cls(self, **args)
def Copy(self) -> 'InstantiableParams[T]':
return self._CopyTo(type(self)(self.cls))
| true | true |
1c367960001e5f99736b49d68f44c26095c3aa65 | 384 | py | Python | app037.py | ChloeRuan/HelloWorld | e1297ee871c9a84a6e7c50e0d3aa1c332daef27f | [
"MIT"
] | null | null | null | app037.py | ChloeRuan/HelloWorld | e1297ee871c9a84a6e7c50e0d3aa1c332daef27f | [
"MIT"
] | null | null | null | app037.py | ChloeRuan/HelloWorld | e1297ee871c9a84a6e7c50e0d3aa1c332daef27f | [
"MIT"
] | null | null | null | # machine learning, artificial intelligence
# step 1: import data, step 2: clean data, step 3: split teh data into Training/ Test data, Step 4: Create a model, step 5: Train teh model, step 6: make predictionss
# python learning library step 7: evaluate and eimprove
# load a dataset from a csv file in jupyter, jupyter helps you to read codes easier and visualize codes separately
| 64 | 167 | 0.770833 | true | true | |
1c367997a96db0e841924b7d4cff96586c69656b | 499 | py | Python | bamboo/config/celeryconfig_test.py | pld/bamboo | a0fc77aebd6ff6b1087ba46896b0ce705fbb25a3 | [
"BSD-3-Clause"
] | 27 | 2015-01-14T15:57:54.000Z | 2020-12-27T19:34:41.000Z | bamboo/config/celeryconfig_test.py | biswapanda/bamboo | 72fc260822a27ce52cbe65de178f8fa1b60311f3 | [
"BSD-3-Clause"
] | 2 | 2015-08-06T15:23:28.000Z | 2016-01-28T00:05:25.000Z | bamboo/config/celeryconfig_test.py | biswapanda/bamboo | 72fc260822a27ce52cbe65de178f8fa1b60311f3 | [
"BSD-3-Clause"
] | 10 | 2015-08-07T01:50:39.000Z | 2019-05-15T21:41:18.000Z | from bamboo.config import settings
BROKER_BACKEND = 'mongodb'
BROKER_URL = 'mongodb://localhost:27017/%s' % settings.TEST_DATABASE_NAME
CELERY_RESULT_BACKEND = 'mongodb'
CELERY_MONGODB_BACKEND_SETTINGS = {
'host': 'localhost',
'port': 27017,
'database': settings.TEST_DATABASE_NAME,
'taskmeta_collection': 'celery_tasks',
}
CELERY_IMPORTS = (
'bamboo.core.merge',
'bamboo.lib.readers',
'bamboo.models.calculation',
'bamboo.models.dataset',
)
CELERYD_CONCURRENCY = 1
| 26.263158 | 73 | 0.725451 | from bamboo.config import settings
BROKER_BACKEND = 'mongodb'
BROKER_URL = 'mongodb://localhost:27017/%s' % settings.TEST_DATABASE_NAME
CELERY_RESULT_BACKEND = 'mongodb'
CELERY_MONGODB_BACKEND_SETTINGS = {
'host': 'localhost',
'port': 27017,
'database': settings.TEST_DATABASE_NAME,
'taskmeta_collection': 'celery_tasks',
}
CELERY_IMPORTS = (
'bamboo.core.merge',
'bamboo.lib.readers',
'bamboo.models.calculation',
'bamboo.models.dataset',
)
CELERYD_CONCURRENCY = 1
| true | true |
1c367a40aa021df8731766670141f0a87f722daa | 476 | py | Python | algos/example.py | acse-ra2617/algo_trading | 7dc198c22cbaf71294e7f8c88730619b842394dd | [
"MIT"
] | null | null | null | algos/example.py | acse-ra2617/algo_trading | 7dc198c22cbaf71294e7f8c88730619b842394dd | [
"MIT"
] | null | null | null | algos/example.py | acse-ra2617/algo_trading | 7dc198c22cbaf71294e7f8c88730619b842394dd | [
"MIT"
] | 1 | 2022-01-09T22:37:36.000Z | 2022-01-09T22:37:36.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 23:00:27 2021
@author: acse-ra2617
"""
"Testing about"
__all__ = ['summation']
def summation(a, b):
"""
given two values, 'a' and 'b' the sum of these values are returned
parameters
----------
a : float or integer
any value
b : float or integer
any value
examples
--------
>>> a = 2.0
>>> b = 3.0
>>> c = summation(a,b)
>>> c
5.0
"""
return a+b
| 15.354839 | 70 | 0.497899 |
__all__ = ['summation']
def summation(a, b):
return a+b
| true | true |
1c367bac5f7aa642b61f4dd3377c4bc271528497 | 407 | py | Python | backend/app/schemas/job.py | yangyuchi/ml-job-scheduler | bdafbf7fd266751a974d716e8d3ba64d3187fdcd | [
"MIT"
] | null | null | null | backend/app/schemas/job.py | yangyuchi/ml-job-scheduler | bdafbf7fd266751a974d716e8d3ba64d3187fdcd | [
"MIT"
] | null | null | null | backend/app/schemas/job.py | yangyuchi/ml-job-scheduler | bdafbf7fd266751a974d716e8d3ba64d3187fdcd | [
"MIT"
] | null | null | null | from datetime import datetime
from datetime import datetime
from enum import Enum
from pydantic import BaseModel
class TypeEnum(str, Enum):
cron = "cron"
single = "single"
class ActionEnum(str, Enum):
resume = "resume"
pause = "pause"
class JobCreate(BaseModel):
name: str
job_class: str
args: dict
job_type: TypeEnum
crontab: str
created_time: datetime = None
| 16.958333 | 33 | 0.690418 | from datetime import datetime
from datetime import datetime
from enum import Enum
from pydantic import BaseModel
class TypeEnum(str, Enum):
cron = "cron"
single = "single"
class ActionEnum(str, Enum):
resume = "resume"
pause = "pause"
class JobCreate(BaseModel):
name: str
job_class: str
args: dict
job_type: TypeEnum
crontab: str
created_time: datetime = None
| true | true |
1c367bc3046f110da7694671b37f86b0c1fa7c86 | 3,508 | py | Python | examples/pipelines/download_models.py | tcl326/forte | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | [
"Apache-2.0"
] | null | null | null | examples/pipelines/download_models.py | tcl326/forte | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | [
"Apache-2.0"
] | 13 | 2019-12-01T04:51:38.000Z | 2020-02-11T23:55:11.000Z | examples/pipelines/download_models.py | tcl326/forte | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import OrderedDict
from pathlib import Path
from forte.data.data_utils import maybe_download
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", default="srl",
help="Model name to download")
parser.add_argument("--path", default="resources/",
help="Path to where the models will be saved")
args = parser.parse_args()
if __name__ == "__main__":
model_path = Path(args.path)
# create the model path if it doesn't exist
model_path.mkdir(parents=True, exist_ok=True)
# download srl model
if args.model_name.lower() == "srl":
embedding_path = Path("srl/embeddings")
(model_path / embedding_path).mkdir(parents=True, exist_ok=True)
pretrained_path = Path("srl/pretrained")
(model_path / pretrained_path).mkdir(parents=True, exist_ok=True)
urls_to_file_names = OrderedDict({
"https://drive.google.com/file/d/102YRcdXqDFLOjToR7L-3XYcU-yqcKAO8/"
"view?usp=sharing": embedding_path / "char_vocab.english.txt",
"https://drive.google.com/file/d/1hgwmUBk8Mb3iZYiHi1UpCpPFOCfOQLLB/"
"view?usp=sharing": embedding_path / "glove.840B.300d.05.filtered",
"https://drive.google.com/file/d/1H4PZhJhGoFBqrSMRPufjJ-9zwROw8hAK/"
"view?usp=sharing": embedding_path / "glove_50_300_2.filtered",
"https://drive.google.com/file/d/1uoA5EnZMWl5m5DMevGcI7UjiXxQRlD9W/"
"view?usp=sharing": embedding_path / "word_vocab.english.txt",
"https://drive.google.com/file/d/1UZc8x-mhdXg7Rtt6FSBDlEoJb_nHxDAQ/"
"view?usp=sharing": pretrained_path / "model.pt"
})
maybe_download(urls=list(urls_to_file_names.keys()), path=model_path,
filenames=list(urls_to_file_names.values()))
# download ner model
elif args.model_name.lower() == "ner":
urls = ["https://drive.google.com/file/d/1j3i5U1YffYqKTdSbnlsrMAm9j86nL"
"jxC/view?usp=sharing",
"https://drive.google.com/file/d/1aRPS_b4AFaZTsk9uZ41tkWIBvWbO"
"_s_V/view?usp=sharing",
"https://drive.google.com/file/d/1SYpcWEDeTlbOsXlRevS8YS_dyP_k2"
"9g0/view?usp=sharing",
"https://drive.google.com/file/d/1S2UMDBX7Ci-Mrm30434t0LOBL__Db"
"92Y/view?usp=sharing",
"https://drive.google.com/file/d/1O4iFhBPuogwEgz7bpJjEqDqAlYf5"
"caP4/view?usp=sharing"]
filenames = ["model.pkl", "word_embedding_table.pkl",
"word_alphabet.pkl", "ner_alphabet.pkl",
"char_alphabet.pkl"]
maybe_download(urls=urls, path=model_path / "ner", filenames=filenames)
else:
print(f"Incorrect 'model_name' {args.model_name}. Available values are "
f"'srl' and 'ner'.")
| 45.558442 | 80 | 0.662486 |
import argparse
from collections import OrderedDict
from pathlib import Path
from forte.data.data_utils import maybe_download
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", default="srl",
help="Model name to download")
parser.add_argument("--path", default="resources/",
help="Path to where the models will be saved")
args = parser.parse_args()
if __name__ == "__main__":
model_path = Path(args.path)
model_path.mkdir(parents=True, exist_ok=True)
# download srl model
if args.model_name.lower() == "srl":
embedding_path = Path("srl/embeddings")
(model_path / embedding_path).mkdir(parents=True, exist_ok=True)
pretrained_path = Path("srl/pretrained")
(model_path / pretrained_path).mkdir(parents=True, exist_ok=True)
urls_to_file_names = OrderedDict({
"https://drive.google.com/file/d/102YRcdXqDFLOjToR7L-3XYcU-yqcKAO8/"
"view?usp=sharing": embedding_path / "char_vocab.english.txt",
"https://drive.google.com/file/d/1hgwmUBk8Mb3iZYiHi1UpCpPFOCfOQLLB/"
"view?usp=sharing": embedding_path / "glove.840B.300d.05.filtered",
"https://drive.google.com/file/d/1H4PZhJhGoFBqrSMRPufjJ-9zwROw8hAK/"
"view?usp=sharing": embedding_path / "glove_50_300_2.filtered",
"https://drive.google.com/file/d/1uoA5EnZMWl5m5DMevGcI7UjiXxQRlD9W/"
"view?usp=sharing": embedding_path / "word_vocab.english.txt",
"https://drive.google.com/file/d/1UZc8x-mhdXg7Rtt6FSBDlEoJb_nHxDAQ/"
"view?usp=sharing": pretrained_path / "model.pt"
})
maybe_download(urls=list(urls_to_file_names.keys()), path=model_path,
filenames=list(urls_to_file_names.values()))
# download ner model
elif args.model_name.lower() == "ner":
urls = ["https://drive.google.com/file/d/1j3i5U1YffYqKTdSbnlsrMAm9j86nL"
"jxC/view?usp=sharing",
"https://drive.google.com/file/d/1aRPS_b4AFaZTsk9uZ41tkWIBvWbO"
"_s_V/view?usp=sharing",
"https://drive.google.com/file/d/1SYpcWEDeTlbOsXlRevS8YS_dyP_k2"
"9g0/view?usp=sharing",
"https://drive.google.com/file/d/1S2UMDBX7Ci-Mrm30434t0LOBL__Db"
"92Y/view?usp=sharing",
"https://drive.google.com/file/d/1O4iFhBPuogwEgz7bpJjEqDqAlYf5"
"caP4/view?usp=sharing"]
filenames = ["model.pkl", "word_embedding_table.pkl",
"word_alphabet.pkl", "ner_alphabet.pkl",
"char_alphabet.pkl"]
maybe_download(urls=urls, path=model_path / "ner", filenames=filenames)
else:
print(f"Incorrect 'model_name' {args.model_name}. Available values are "
f"'srl' and 'ner'.")
| true | true |
1c367be144ed8242cd2bbbf542def3eacabe9053 | 5,586 | py | Python | src/ggrc_basic_permissions/roles/Editor.py | lyykfi/ggrc-core | 8c40308370897c1dff3ca631805ce17da4f87d8e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc_basic_permissions/roles/Editor.py | lyykfi/ggrc-core | 8c40308370897c1dff3ca631805ce17da4f87d8e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc_basic_permissions/roles/Editor.py | lyykfi/ggrc-core | 8c40308370897c1dff3ca631805ce17da4f87d8e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Editor permissions. """
# pylint: disable=invalid-name
scope = "System"
description = """
This role grants a user basic object creation and editing permission.
"""
permissions = {
"read": [
"AccessControlRole",
"BackgroundTask",
"Workflow",
"TaskGroup",
"TaskGroupObject",
"TaskGroupTask",
"Cycle",
"CycleTaskGroup",
"CycleTaskGroupObjectTask",
"CycleTaskEntry",
"AccessControlList",
"Audit",
"Snapshot",
"Categorization",
"Category",
"Comment",
"ControlCategory",
"ControlAssertion",
"Control",
"Assessment",
"AssessmentTemplate",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Evidence",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"Risk",
"RiskAssessment",
"Threat",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"Person",
"Program",
"Proposal",
"TechnologyEnvironment",
"Revision",
"ProductGroup",
"Role",
"Context",
"UserRole",
],
"create": [
"Audit",
"BackgroundTask",
"Snapshot",
"Workflow",
"TaskGroup",
"TaskGroupObject",
"TaskGroupTask",
"Cycle",
"CycleTaskGroupObjectTask",
"CycleTaskEntry",
"Categorization",
"Category",
"Comment",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Evidence",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"Risk",
"RiskAssessment",
"Threat",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"Program",
"Proposal",
"TechnologyEnvironment",
"Role",
"ProductGroup",
"UserRole",
"Context",
"Review"
],
"update": [
{
"type": "Audit",
"terms": {
"property_name": "archived",
"prevent_if": True
},
"condition": "has_not_changed"
},
"Snapshot",
"Workflow",
"TaskGroup",
"TaskGroupTask",
"CycleTaskGroupObjectTask",
"CycleTaskEntry",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Evidence",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Person",
"Option",
"OrgGroup",
"RiskAssessment",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Proposal",
"TechnologyEnvironment",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"ProductGroup",
"Program",
"Role",
"UserRole",
"Context",
"Review",
],
"delete": [
{
"type": "Audit",
"terms": {
"property_name": "archived",
"prevent_if": False
},
"condition": "has_changed"
},
"Workflow",
"TaskGroup",
"TaskGroupObject",
"TaskGroupTask",
"CycleTaskEntry",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"RiskAssessment",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"Program",
"TechnologyEnvironment",
"Role",
"UserRole",
"ProductGroup",
"Context",
]
}
| 22.166667 | 78 | 0.463122 |
scope = "System"
description = """
This role grants a user basic object creation and editing permission.
"""
permissions = {
"read": [
"AccessControlRole",
"BackgroundTask",
"Workflow",
"TaskGroup",
"TaskGroupObject",
"TaskGroupTask",
"Cycle",
"CycleTaskGroup",
"CycleTaskGroupObjectTask",
"CycleTaskEntry",
"AccessControlList",
"Audit",
"Snapshot",
"Categorization",
"Category",
"Comment",
"ControlCategory",
"ControlAssertion",
"Control",
"Assessment",
"AssessmentTemplate",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Evidence",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"Risk",
"RiskAssessment",
"Threat",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"Person",
"Program",
"Proposal",
"TechnologyEnvironment",
"Revision",
"ProductGroup",
"Role",
"Context",
"UserRole",
],
"create": [
"Audit",
"BackgroundTask",
"Snapshot",
"Workflow",
"TaskGroup",
"TaskGroupObject",
"TaskGroupTask",
"Cycle",
"CycleTaskGroupObjectTask",
"CycleTaskEntry",
"Categorization",
"Category",
"Comment",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Evidence",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"Risk",
"RiskAssessment",
"Threat",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"Program",
"Proposal",
"TechnologyEnvironment",
"Role",
"ProductGroup",
"UserRole",
"Context",
"Review"
],
"update": [
{
"type": "Audit",
"terms": {
"property_name": "archived",
"prevent_if": True
},
"condition": "has_not_changed"
},
"Snapshot",
"Workflow",
"TaskGroup",
"TaskGroupTask",
"CycleTaskGroupObjectTask",
"CycleTaskEntry",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Evidence",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Person",
"Option",
"OrgGroup",
"RiskAssessment",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Proposal",
"TechnologyEnvironment",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"ProductGroup",
"Program",
"Role",
"UserRole",
"Context",
"Review",
],
"delete": [
{
"type": "Audit",
"terms": {
"property_name": "archived",
"prevent_if": False
},
"condition": "has_changed"
},
"Workflow",
"TaskGroup",
"TaskGroupObject",
"TaskGroupTask",
"CycleTaskEntry",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Assessment",
"AssessmentTemplate",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Facility",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"RiskAssessment",
"Vendor",
"PopulationSample",
"Product",
"Project",
"Relationship",
"Requirement",
"SystemOrProcess",
"System",
"Process",
"Metric",
"Program",
"TechnologyEnvironment",
"Role",
"UserRole",
"ProductGroup",
"Context",
]
}
| true | true |
1c367c5624945a852fd90cb88491344d43a6d51d | 818 | py | Python | tests/test_rottenTomatoesMoviesBrowser.py | seanbreckenridge/rotten_tomatoes_cli | e250ad0d8bc70bf385540ab9264c440feb90d02c | [
"MIT"
] | null | null | null | tests/test_rottenTomatoesMoviesBrowser.py | seanbreckenridge/rotten_tomatoes_cli | e250ad0d8bc70bf385540ab9264c440feb90d02c | [
"MIT"
] | null | null | null | tests/test_rottenTomatoesMoviesBrowser.py | seanbreckenridge/rotten_tomatoes_cli | e250ad0d8bc70bf385540ab9264c440feb90d02c | [
"MIT"
] | null | null | null | from unittest import TestCase
from mock import Mock, patch
from data.services import RottenTomatoesMoviesBrowser
class TestRottenTomatoesMoviesBrowser(TestCase):
browser = RottenTomatoesMoviesBrowser()
@patch("rotten_tomatoes_client.RottenTomatoesClient.browse_movies")
def test_browse(self, mocked_client_browse_movies):
results = {"results": "jaebaebae"}
mocked_client_browse_movies.return_value = results
self.browser.parser.parse = Mock("mock_parser")
self.browser.parser.parse.return_value = "jaebaebae"
movies = self.browser.browse(query="query")
self.browser.parser.parse.assert_called_once_with(movie_results="jaebaebae")
mocked_client_browse_movies.assert_called_once_with(query="query")
self.assertEqual("jaebaebae", movies)
| 35.565217 | 84 | 0.757946 | from unittest import TestCase
from mock import Mock, patch
from data.services import RottenTomatoesMoviesBrowser
class TestRottenTomatoesMoviesBrowser(TestCase):
browser = RottenTomatoesMoviesBrowser()
@patch("rotten_tomatoes_client.RottenTomatoesClient.browse_movies")
def test_browse(self, mocked_client_browse_movies):
results = {"results": "jaebaebae"}
mocked_client_browse_movies.return_value = results
self.browser.parser.parse = Mock("mock_parser")
self.browser.parser.parse.return_value = "jaebaebae"
movies = self.browser.browse(query="query")
self.browser.parser.parse.assert_called_once_with(movie_results="jaebaebae")
mocked_client_browse_movies.assert_called_once_with(query="query")
self.assertEqual("jaebaebae", movies)
| true | true |
1c367dde187ad06a25c92656822c65b0275512c4 | 279 | py | Python | ncdjango/middleware.py | consbio/ncdjango | d35a896bc8fe0de1168171ed0e450f654c231319 | [
"BSD-3-Clause"
] | 6 | 2017-08-13T11:51:12.000Z | 2021-07-26T13:07:02.000Z | ncdjango/middleware.py | consbio/ncdjango | d35a896bc8fe0de1168171ed0e450f654c231319 | [
"BSD-3-Clause"
] | 7 | 2015-10-13T00:55:06.000Z | 2021-03-25T23:05:17.000Z | ncdjango/middleware.py | consbio/ncdjango | d35a896bc8fe0de1168171ed0e450f654c231319 | [
"BSD-3-Clause"
] | null | null | null | from tastypie.authentication import ApiKeyAuthentication
class TastypieApiKeyMiddleware(object):
"""Middleware to authenticate users using API keys for regular Django views"""
def process_request(self, request):
ApiKeyAuthentication().is_authenticated(request) | 34.875 | 82 | 0.792115 | from tastypie.authentication import ApiKeyAuthentication
class TastypieApiKeyMiddleware(object):
def process_request(self, request):
ApiKeyAuthentication().is_authenticated(request) | true | true |
1c367eda490abd607eea5d7d76879046faaf94c7 | 8,207 | py | Python | sdk/python/pulumi_azure_native/network/v20191101/firewall_policy_rule_group.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20191101/firewall_policy_rule_group.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20191101/firewall_policy_rule_group.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FirewallPolicyRuleGroup']
class FirewallPolicyRuleGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_policy_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_group_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.InputType['FirewallPolicyFilterRuleArgs'], pulumi.InputType['FirewallPolicyNatRuleArgs']]]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Rule Group resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] firewall_policy_name: The name of the Firewall Policy.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[int] priority: Priority of the Firewall Policy Rule Group resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] rule_group_name: The name of the FirewallPolicyRuleGroup.
:param pulumi.Input[Sequence[pulumi.Input[Union[pulumi.InputType['FirewallPolicyFilterRuleArgs'], pulumi.InputType['FirewallPolicyNatRuleArgs']]]]] rules: Group of Firewall Policy rules.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if firewall_policy_name is None and not opts.urn:
raise TypeError("Missing required property 'firewall_policy_name'")
__props__['firewall_policy_name'] = firewall_policy_name
__props__['id'] = id
__props__['name'] = name
__props__['priority'] = priority
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['rule_group_name'] = rule_group_name
__props__['rules'] = rules
__props__['etag'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20191101:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/latest:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/latest:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190601:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190601:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190701:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190701:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190801:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190801:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190901:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190901:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20191201:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20191201:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20200301:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200301:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20200401:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200401:FirewallPolicyRuleGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallPolicyRuleGroup, __self__).__init__(
'azure-native:network/v20191101:FirewallPolicyRuleGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallPolicyRuleGroup':
"""
Get an existing FirewallPolicyRuleGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["etag"] = None
__props__["name"] = None
__props__["priority"] = None
__props__["provisioning_state"] = None
__props__["rules"] = None
__props__["type"] = None
return FirewallPolicyRuleGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
"""
Priority of the Firewall Policy Rule Group resource.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the firewall policy rule group resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence[Any]]]:
"""
Group of Firewall Policy rules.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Rule Group type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.975155 | 1,519 | 0.679907 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FirewallPolicyRuleGroup']
class FirewallPolicyRuleGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_policy_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_group_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.InputType['FirewallPolicyFilterRuleArgs'], pulumi.InputType['FirewallPolicyNatRuleArgs']]]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if firewall_policy_name is None and not opts.urn:
raise TypeError("Missing required property 'firewall_policy_name'")
__props__['firewall_policy_name'] = firewall_policy_name
__props__['id'] = id
__props__['name'] = name
__props__['priority'] = priority
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['rule_group_name'] = rule_group_name
__props__['rules'] = rules
__props__['etag'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20191101:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/latest:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/latest:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190601:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190601:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190701:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190701:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190801:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190801:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20190901:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190901:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20191201:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20191201:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20200301:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200301:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-native:network/v20200401:FirewallPolicyRuleGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200401:FirewallPolicyRuleGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallPolicyRuleGroup, __self__).__init__(
'azure-native:network/v20191101:FirewallPolicyRuleGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallPolicyRuleGroup':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["etag"] = None
__props__["name"] = None
__props__["priority"] = None
__props__["provisioning_state"] = None
__props__["rules"] = None
__props__["type"] = None
return FirewallPolicyRuleGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence[Any]]]:
return pulumi.get(self, "rules")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
1c367edc638ba223845ce4f661ce8bfd0b5db630 | 76 | wsgi | Python | 1/index.wsgi | catsky/rebang | 7053de769a3a7093f84cb6117a361ed394472ef0 | [
"MIT"
] | 29 | 2015-01-04T09:34:43.000Z | 2019-02-20T20:16:03.000Z | 1/index.wsgi | catsky/rebang | 7053de769a3a7093f84cb6117a361ed394472ef0 | [
"MIT"
] | 1 | 2015-01-19T07:05:54.000Z | 2015-06-02T05:01:38.000Z | 1/index.wsgi | catsky/rebang | 7053de769a3a7093f84cb6117a361ed394472ef0 | [
"MIT"
] | 17 | 2015-01-11T06:31:52.000Z | 2018-09-03T05:55:23.000Z | import sae
from chartnet import app
application = sae.create_wsgi_app(app) | 15.2 | 38 | 0.815789 | import sae
from chartnet import app
application = sae.create_wsgi_app(app) | true | true |
1c367ffc289bd6885a9c7f9b102884a07f96fd15 | 8,870 | py | Python | mutagen/_file.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | 2 | 2020-09-16T07:00:41.000Z | 2020-12-20T19:56:03.000Z | mutagen/_file.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | null | null | null | mutagen/_file.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | 2 | 2020-09-17T08:27:12.000Z | 2021-08-23T11:13:52.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import warnings
from mutagen._util import DictMixin, loadfile
class FileType(DictMixin):
"""FileType(filething, **kwargs)
Args:
filething (filething): A filename or a file-like object
Subclasses might take further options via keyword arguments.
An abstract object wrapping tags and audio stream information.
Each file format has different potential tags and stream
information.
FileTypes implement an interface very similar to Metadata; the
dict interface, save, load, and delete calls on a FileType call
the appropriate methods on its tag data.
Attributes:
info (`StreamInfo`): contains length, bitrate, sample rate
tags (`Tags`): metadata tags, if any, otherwise `None`
"""
__module__ = "mutagen"
info = None
tags = None
filename = None
_mimes = ["application/octet-stream"]
def __init__(self, *args, **kwargs):
if not args and not kwargs:
warnings.warn("FileType constructor requires a filename",
DeprecationWarning)
else:
self.load(*args, **kwargs)
@loadfile()
def load(self, filething, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
"""Look up a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
"""Delete a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
del(self.tags[key])
def keys(self):
"""Return a list of keys in the metadata tag.
If the file has no tags at all, an empty list is returned.
"""
if self.tags is None:
return []
else:
return self.tags.keys()
@loadfile(writable=True)
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.
In cases where the tagging format is independent of the file type
(for example `mutagen.id3.ID3`) all traces of the tagging format will
be removed.
In cases where the tag is part of the file type, all tags and
padding will be removed.
The tags attribute will be cleared as well if there is one.
Does nothing if the file has no tags.
Raises:
mutagen.MutagenError: if deleting wasn't possible
"""
if self.tags is not None:
return self.tags.delete(filething)
@loadfile(writable=True)
def save(self, filething=None, **kwargs):
"""save(filething=None, **kwargs)
Save metadata tags.
Raises:
MutagenError: if saving wasn't possible
"""
if self.tags is not None:
return self.tags.save(filething, **kwargs)
def pprint(self):
"""
Returns:
text: stream information and comment key=value pairs.
"""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return stream + ((tags and "\n" + tags) or "")
def add_tags(self):
"""Adds new tags to the file.
Raises:
mutagen.MutagenError:
if tags already exist or adding is not possible.
"""
raise NotImplementedError
@property
def mime(self):
"""A list of mime types (:class:`mutagen.text`)"""
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if mime not in mimes:
mimes.append(mime)
return mimes
@staticmethod
def score(filename, fileobj, header):
"""Returns a score for how likely the file can be parsed by this type.
Args:
filename (fspath): a file path
fileobj (fileobj): a file object open in rb mode. Position is
undefined
header (bytes): data of undefined length, starts with the start of
the file.
Returns:
int: negative if definitely not a matching type, otherwise a score,
the bigger the more certain that the file can be loaded.
"""
raise NotImplementedError
class StreamInfo(object):
"""Abstract stream information object.
Provides attributes for length, bitrate, sample rate etc.
See the implementations for details.
"""
__module__ = "mutagen"
def pprint(self):
"""
Returns:
text: Print stream information
"""
raise NotImplementedError
@loadfile(method=False)
def File(filething, options=None, easy=False):
"""File(filething, options=None, easy=False)
Guess the type of the file and try to open it.
The file type is decided by several things, such as the first 128
bytes (which usually contains a file type identifier), the
filename extension, and the presence of existing tags.
If no appropriate type could be found, None is returned.
Args:
filething (filething)
options: Sequence of :class:`FileType` implementations,
defaults to all included ones.
easy (bool): If the easy wrappers should be returned if available.
For example :class:`EasyMP3 <mp3.EasyMP3>` instead of
:class:`MP3 <mp3.MP3>`.
Returns:
FileType: A FileType instance for the detected type or `None` in case
the type couldn't be determined.
Raises:
MutagenError: in case the detected type fails to load the file.
"""
if options is None:
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
if easy:
from mutagen.easyid3 import EasyID3FileType as ID3FileType
else:
from mutagen.id3 import ID3FileType
if easy:
from mutagen.mp3 import EasyMP3 as MP3
else:
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
if easy:
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
else:
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
if easy:
from mutagen.easymp4 import EasyMP4 as MP4
else:
from mutagen.mp4 import MP4
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
from mutagen.ac3 import AC3
from mutagen.smf import SMF
from mutagen.tak import TAK
from mutagen.dsf import DSF
from mutagen.dsdiff import DSDIFF
from mutagen.wave import WAVE
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC, AC3,
SMF, TAK, DSF, DSDIFF, WAVE]
if not options:
return None
fileobj = filething.fileobj
try:
header = fileobj.read(128)
except IOError:
header = b""
# Sort by name after score. Otherwise import order affects
# Kind sort order, which affects treatment of things with
# equals scores.
results = [(Kind.score(filething.name, fileobj, header), Kind.__name__)
for Kind in options]
results = list(zip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:
try:
fileobj.seek(0, 0)
except IOError:
pass
return Kind(fileobj, filename=filething.filename)
else:
return None
| 29.177632 | 79 | 0.607779 |
import warnings
from mutagen._util import DictMixin, loadfile
class FileType(DictMixin):
__module__ = "mutagen"
info = None
tags = None
filename = None
_mimes = ["application/octet-stream"]
def __init__(self, *args, **kwargs):
if not args and not kwargs:
warnings.warn("FileType constructor requires a filename",
DeprecationWarning)
else:
self.load(*args, **kwargs)
@loadfile()
def load(self, filething, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
if self.tags is None:
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
if self.tags is None:
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
if self.tags is None:
raise KeyError(key)
else:
del(self.tags[key])
def keys(self):
if self.tags is None:
return []
else:
return self.tags.keys()
@loadfile(writable=True)
def delete(self, filething=None):
if self.tags is not None:
return self.tags.delete(filething)
@loadfile(writable=True)
def save(self, filething=None, **kwargs):
if self.tags is not None:
return self.tags.save(filething, **kwargs)
def pprint(self):
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return stream + ((tags and "\n" + tags) or "")
def add_tags(self):
raise NotImplementedError
@property
def mime(self):
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if mime not in mimes:
mimes.append(mime)
return mimes
@staticmethod
def score(filename, fileobj, header):
raise NotImplementedError
class StreamInfo(object):
__module__ = "mutagen"
def pprint(self):
raise NotImplementedError
@loadfile(method=False)
def File(filething, options=None, easy=False):
if options is None:
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
if easy:
from mutagen.easyid3 import EasyID3FileType as ID3FileType
else:
from mutagen.id3 import ID3FileType
if easy:
from mutagen.mp3 import EasyMP3 as MP3
else:
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
if easy:
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
else:
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
if easy:
from mutagen.easymp4 import EasyMP4 as MP4
else:
from mutagen.mp4 import MP4
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
from mutagen.ac3 import AC3
from mutagen.smf import SMF
from mutagen.tak import TAK
from mutagen.dsf import DSF
from mutagen.dsdiff import DSDIFF
from mutagen.wave import WAVE
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC, AC3,
SMF, TAK, DSF, DSDIFF, WAVE]
if not options:
return None
fileobj = filething.fileobj
try:
header = fileobj.read(128)
except IOError:
header = b""
results = [(Kind.score(filething.name, fileobj, header), Kind.__name__)
for Kind in options]
results = list(zip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:
try:
fileobj.seek(0, 0)
except IOError:
pass
return Kind(fileobj, filename=filething.filename)
else:
return None
| true | true |
1c3680e45aee221a37add29b429f5b29378d9092 | 1,714 | py | Python | examples/mnist_example.py | cactusWhiskey/tensor-evolution | 7e1f71667bd4c586236e48ec0ec55c68a3e99081 | [
"Apache-2.0"
] | null | null | null | examples/mnist_example.py | cactusWhiskey/tensor-evolution | 7e1f71667bd4c586236e48ec0ec55c68a3e99081 | [
"Apache-2.0"
] | null | null | null | examples/mnist_example.py | cactusWhiskey/tensor-evolution | 7e1f71667bd4c586236e48ec0ec55c68a3e99081 | [
"Apache-2.0"
] | null | null | null | """MNIST example using tensorflow dataset.
Derived from Tensorflow quickstart beginner example. The original work contained the following copyright/notice info:
Copyright 2019 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
This derivative work is also licensed under Apache 2.0.
"""
import tensorflow as tf
from tensorEvolution import tensor_evolution
def main():
# get mnist dataset
mnist = tf.keras.datasets.mnist
# scale the data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# pack into data tuple
data = x_train, y_train, x_test, y_test
# create evolution worker
worker = tensor_evolution.EvolutionWorker()
# evolve
worker.evolve(data=data)
best = worker.get_best_individual()
tensor_net = best[1]
tensor_net.draw_graphviz_svg()
model = tensor_net.build_model()
model.summary()
model.compile(loss=worker.master_config.loss, optimizer=worker.master_config.opt,
metrics=worker.master_config.config['metrics'])
model.fit(x_train, y_train, epochs=10)
model.evaluate(x_test, y_test)
if __name__ == "__main__":
main()
| 30.607143 | 117 | 0.732205 | import tensorflow as tf
from tensorEvolution import tensor_evolution
def main():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
data = x_train, y_train, x_test, y_test
worker = tensor_evolution.EvolutionWorker()
worker.evolve(data=data)
best = worker.get_best_individual()
tensor_net = best[1]
tensor_net.draw_graphviz_svg()
model = tensor_net.build_model()
model.summary()
model.compile(loss=worker.master_config.loss, optimizer=worker.master_config.opt,
metrics=worker.master_config.config['metrics'])
model.fit(x_train, y_train, epochs=10)
model.evaluate(x_test, y_test)
if __name__ == "__main__":
main()
| true | true |
1c36825fa3a796d0548f17227cfe023ceee9c27f | 651 | py | Python | tpubar/host.py | trisongz/tpubar | adf44909a9a1afb30aeb38664200f372a1d7f34d | [
"MIT"
] | 3 | 2020-12-13T00:58:33.000Z | 2021-02-17T22:30:44.000Z | tpubar/host.py | trisongz/tpubar | adf44909a9a1afb30aeb38664200f372a1d7f34d | [
"MIT"
] | 1 | 2020-12-09T00:01:59.000Z | 2020-12-09T00:01:59.000Z | tpubar/host.py | trisongz/tpubar | adf44909a9a1afb30aeb38664200f372a1d7f34d | [
"MIT"
] | 1 | 2021-02-17T22:30:47.000Z | 2021-02-17T22:30:47.000Z | import psutil
import platform
from tpubar.utils import run_command
def queryhw():
host_os = platform.system()
if host_os == 'Linux':
cpu_name = run_command("lscpu |grep 'Model name'")
cpu_name = cpu_name.split(':')[-1].strip()
elif host_os == 'Darwin':
# dunno why a TPU would run on macos but i kept it anyways
cpu_name = run_command("sysctl -n machdep.cpu.brand_string | sed -e 's/ *$//'").strip()
else:
cpu_name = platform.processor()
cores = psutil.cpu_count(logical=False)
threads = psutil.cpu_count(logical=True)
return {'name': cpu_name, 'cores': cores, 'threads': threads} | 34.263158 | 95 | 0.648233 | import psutil
import platform
from tpubar.utils import run_command
def queryhw():
host_os = platform.system()
if host_os == 'Linux':
cpu_name = run_command("lscpu |grep 'Model name'")
cpu_name = cpu_name.split(':')[-1].strip()
elif host_os == 'Darwin':
cpu_name = run_command("sysctl -n machdep.cpu.brand_string | sed -e 's/ *$//'").strip()
else:
cpu_name = platform.processor()
cores = psutil.cpu_count(logical=False)
threads = psutil.cpu_count(logical=True)
return {'name': cpu_name, 'cores': cores, 'threads': threads} | true | true |
1c3682f0aa02ce5a326ed581c79171e4ca8c0219 | 742 | py | Python | esengine/utils/payload/suggesters.py | 0mars/esengine | 7da722e2950a51df46b3c6f8130477c00cff1d46 | [
"MIT"
] | 53 | 2017-04-29T16:39:13.000Z | 2022-01-25T23:46:31.000Z | esengine/utils/payload/suggesters.py | 0mars/esengine | 7da722e2950a51df46b3c6f8130477c00cff1d46 | [
"MIT"
] | 9 | 2017-11-22T18:22:17.000Z | 2020-06-05T19:26:07.000Z | esengine/utils/payload/suggesters.py | 0mars/esengine | 7da722e2950a51df46b3c6f8130477c00cff1d46 | [
"MIT"
] | 13 | 2017-10-27T13:44:04.000Z | 2021-02-28T12:33:04.000Z | from esengine.utils.payload.meta import BaseSuggester, MetaSuggester
from esengine.utils.payload.exception import NoSuggester
SUGGESTERS = {
'term': {
'args': ('field', ),
'kwargs': ('analyzer', 'size', 'sort', 'suggest_mode')
},
'phrase': {
'args': ('field', ),
'kwargs': (
'gram_size', 'real_word_error_likelihood', 'confidence',
'max_errors', 'separator', 'size', 'analyzer', 'shard_size',
'collate'
)
},
'completion': {
'args': ('field', ),
'kwargs': ('size', )
}
}
class Suggester(BaseSuggester):
__metaclass__ = MetaSuggester
_ee_type = 'suggester'
_definitions = SUGGESTERS
_exception = NoSuggester
| 24.733333 | 72 | 0.570081 | from esengine.utils.payload.meta import BaseSuggester, MetaSuggester
from esengine.utils.payload.exception import NoSuggester
SUGGESTERS = {
'term': {
'args': ('field', ),
'kwargs': ('analyzer', 'size', 'sort', 'suggest_mode')
},
'phrase': {
'args': ('field', ),
'kwargs': (
'gram_size', 'real_word_error_likelihood', 'confidence',
'max_errors', 'separator', 'size', 'analyzer', 'shard_size',
'collate'
)
},
'completion': {
'args': ('field', ),
'kwargs': ('size', )
}
}
class Suggester(BaseSuggester):
__metaclass__ = MetaSuggester
_ee_type = 'suggester'
_definitions = SUGGESTERS
_exception = NoSuggester
| true | true |
1c36862b813408d8d94891db534a30a2f6c31afe | 1,156 | py | Python | pynab/users.py | bigblue/pynab | 3495bdfef2a3566c0545d09ec3ded6675fe80091 | [
"BSD-3-Clause"
] | 161 | 2015-01-09T12:45:26.000Z | 2019-11-18T18:21:56.000Z | pynab/users.py | bigblue/pynab | 3495bdfef2a3566c0545d09ec3ded6675fe80091 | [
"BSD-3-Clause"
] | 187 | 2015-01-01T02:18:25.000Z | 2018-08-07T00:53:27.000Z | pynab/users.py | bigblue/pynab | 3495bdfef2a3566c0545d09ec3ded6675fe80091 | [
"BSD-3-Clause"
] | 53 | 2015-01-04T09:42:53.000Z | 2019-07-03T11:04:18.000Z | import hashlib
import uuid
from pynab.db import db_session, User
def list():
"""List all users."""
with db_session() as db:
users = db.query(User).order_by(User.email)
user_list = []
for user in users:
user_list.append([user.email, user.api_key, user.grabs])
return user_list
def info(email):
"""Information about a specific email."""
with db_session() as db:
user = db.query(User).filter(User.email == email).first()
if user:
return [user.email, user.api_key, user.grabs]
else:
return None
def create(email):
"""Creates a user by email with a random API key."""
api_key = hashlib.md5(uuid.uuid4().bytes).hexdigest()
with db_session() as db:
user = User()
user.email = email
user.api_key = api_key
user.grabs = 0
db.merge(user)
return api_key
def delete(email):
"""Deletes a user by email."""
with db_session() as db:
deleted = db.query(User).filter(User.email == email).delete()
if deleted:
db.commit()
return True
return False
| 23.591837 | 69 | 0.584775 | import hashlib
import uuid
from pynab.db import db_session, User
def list():
with db_session() as db:
users = db.query(User).order_by(User.email)
user_list = []
for user in users:
user_list.append([user.email, user.api_key, user.grabs])
return user_list
def info(email):
with db_session() as db:
user = db.query(User).filter(User.email == email).first()
if user:
return [user.email, user.api_key, user.grabs]
else:
return None
def create(email):
api_key = hashlib.md5(uuid.uuid4().bytes).hexdigest()
with db_session() as db:
user = User()
user.email = email
user.api_key = api_key
user.grabs = 0
db.merge(user)
return api_key
def delete(email):
with db_session() as db:
deleted = db.query(User).filter(User.email == email).delete()
if deleted:
db.commit()
return True
return False
| true | true |
1c36862be396602451d98590ad0818253f4223ee | 9,041 | py | Python | hiveone_py/hive.py | hive-one/hive-py | 98655cf0da2fa457f3bb767a78fe43d2497a59d6 | [
"MIT"
] | null | null | null | hiveone_py/hive.py | hive-one/hive-py | 98655cf0da2fa457f3bb767a78fe43d2497a59d6 | [
"MIT"
] | null | null | null | hiveone_py/hive.py | hive-one/hive-py | 98655cf0da2fa457f3bb767a78fe43d2497a59d6 | [
"MIT"
] | null | null | null | import requests
import json
class HiveResponse:
def __init__(self, data, etag):
self.data = data
self.etag = etag
def __repr__(self):
return repr(self.data)
def __getitem__(self, key):
return self.data.__getitem__(key)
class Hive:
def __init__(self, api_key, default_format = 'screen_name', host = 'https://hive.one/'):
if len(api_key) == 0: raise Exception('You must provide an API Key')
self.api_key = api_key
self.default_format = default_format
self.host = host
def available_influencers(self, id_format = None, etag = ''):
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
response = requests.get(
"{host}api/v1/influencers/".format(host=self.host),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
}
)
if response.status_code == 200:
data = response.json()
def return_id(id_arr):
return id_arr[0 if id_format == 'id' else 1]
return HiveResponse(
data=list(map(return_id, data['data']['available'])),
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def top_influencers(self, cluster = 'Crypto', after = 0, sort_by = 'rank', order = 'asc', etag = ''):
sort_map = [
'followers',
'following',
'screen_name',
'change_week',
'score',
'rank',
]
if cluster not in ['Crypto', 'BTC', 'ETH', 'XRP']:
raise Exception("{passed_cluster} is not one of: Crypto, BTC, ETH, XRP".format(passed_cluster=cluster))
if type(after) != int:
raise Exception("after should be type int")
if sort_by not in sort_map:
raise Exception("Sort: {passed_sort} is not supported`".format(passed_sort=sort_by))
if order not in ['asc', 'desc']:
raise Exception("Order: {passed_order} is not supported".format(passed_order=order))
response = requests.get(
"{host}api/v1/influencers/top/".format(host=self.host),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('cluster', cluster),
('after', after),
('sort_by', sort_by),
('order', order)
)
)
if response.status_code == 200:
data = response.json()
def return_node(item):
return item['node']
return HiveResponse(
data=list(map(return_node, data['data']['people']['edges'])),
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_details(self, influencer_id = None, id_format = None, rank_type = 'all', include_followers = 0, etag = ''):
if influencer_id is None:
raise Exception('You must provide an influencers ID')
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
if rank_type not in ['all', 'personal']:
raise Exception('Rank Type not one of all, personal')
if include_followers not in [0, 1]:
raise Exception('Include Followres not one of 0, 1')
response = requests.get(
"{host}api/v1/influencers/{id_format}/{influencer_id}".format(
host=self.host,
id_format=id_format,
influencer_id=influencer_id
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('rank_type', rank_type),
('include_followers', include_followers),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data['data'],
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_history(self, influencer_id = None, id_format = None, rank_type = 'all', etag = ''):
if influencer_id is None:
raise Exception('You must provide an influencers ID')
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
if rank_type not in ['all', 'personal']:
raise Exception('Rank Type not one of all, personal')
response = requests.get(
"{host}api/v1/influencers/{id_format}/{influencer_id}/history/".format(
host=self.host,
id_format=id_format,
influencer_id=influencer_id
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('rank_type', rank_type),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data,
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_podcasts(self, influencer_id = None, id_format = None, appearance_type = 'all', after = 0, etag = ''):
if influencer_id is None:
raise Exception('You must provide an influencers ID')
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
if appearance_type not in []:
raise Exception('Appearance Type not one of all, host, guest')
if type(after) != int:
raise Exception('after should be type int')
response = requests.get(
"{host}api/v1/influencers/{id_format}/{influencer_id}/podcasts/".format(
host=self.host,
id_format=id_format,
influencer_id=influencer_id
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('appearance_type', appearance_type),
('after', after),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data,
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_batch(self, influencer_ids = [], rank_type = 'all', include_followers = 0):
if not influencer_ids:
raise Exception('influencerIDS not provided')
if rank_type not in ['all', 'personal']:
raise Exception('Rank Type not one of all, personal')
if include_followers not in [0, 1]:
raise Exception('Include Followres not one of 0, 1')
response = requests.get(
"{host}api/v1/influencers/batch/".format(
host=self.host,
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key)
},
params=(
('twitter_ids', json.dumps(influencer_ids)),
('rank_type', rank_type),
('include_followers', include_followers),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data['data']['success'],
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass | 35.316406 | 126 | 0.521402 | import requests
import json
class HiveResponse:
def __init__(self, data, etag):
self.data = data
self.etag = etag
def __repr__(self):
return repr(self.data)
def __getitem__(self, key):
return self.data.__getitem__(key)
class Hive:
def __init__(self, api_key, default_format = 'screen_name', host = 'https://hive.one/'):
if len(api_key) == 0: raise Exception('You must provide an API Key')
self.api_key = api_key
self.default_format = default_format
self.host = host
def available_influencers(self, id_format = None, etag = ''):
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
response = requests.get(
"{host}api/v1/influencers/".format(host=self.host),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
}
)
if response.status_code == 200:
data = response.json()
def return_id(id_arr):
return id_arr[0 if id_format == 'id' else 1]
return HiveResponse(
data=list(map(return_id, data['data']['available'])),
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def top_influencers(self, cluster = 'Crypto', after = 0, sort_by = 'rank', order = 'asc', etag = ''):
sort_map = [
'followers',
'following',
'screen_name',
'change_week',
'score',
'rank',
]
if cluster not in ['Crypto', 'BTC', 'ETH', 'XRP']:
raise Exception("{passed_cluster} is not one of: Crypto, BTC, ETH, XRP".format(passed_cluster=cluster))
if type(after) != int:
raise Exception("after should be type int")
if sort_by not in sort_map:
raise Exception("Sort: {passed_sort} is not supported`".format(passed_sort=sort_by))
if order not in ['asc', 'desc']:
raise Exception("Order: {passed_order} is not supported".format(passed_order=order))
response = requests.get(
"{host}api/v1/influencers/top/".format(host=self.host),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('cluster', cluster),
('after', after),
('sort_by', sort_by),
('order', order)
)
)
if response.status_code == 200:
data = response.json()
def return_node(item):
return item['node']
return HiveResponse(
data=list(map(return_node, data['data']['people']['edges'])),
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_details(self, influencer_id = None, id_format = None, rank_type = 'all', include_followers = 0, etag = ''):
if influencer_id is None:
raise Exception('You must provide an influencers ID')
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
if rank_type not in ['all', 'personal']:
raise Exception('Rank Type not one of all, personal')
if include_followers not in [0, 1]:
raise Exception('Include Followres not one of 0, 1')
response = requests.get(
"{host}api/v1/influencers/{id_format}/{influencer_id}".format(
host=self.host,
id_format=id_format,
influencer_id=influencer_id
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('rank_type', rank_type),
('include_followers', include_followers),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data['data'],
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_history(self, influencer_id = None, id_format = None, rank_type = 'all', etag = ''):
if influencer_id is None:
raise Exception('You must provide an influencers ID')
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
if rank_type not in ['all', 'personal']:
raise Exception('Rank Type not one of all, personal')
response = requests.get(
"{host}api/v1/influencers/{id_format}/{influencer_id}/history/".format(
host=self.host,
id_format=id_format,
influencer_id=influencer_id
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('rank_type', rank_type),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data,
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_podcasts(self, influencer_id = None, id_format = None, appearance_type = 'all', after = 0, etag = ''):
if influencer_id is None:
raise Exception('You must provide an influencers ID')
if id_format is None:
id_format = self.default_format
else:
if id_format not in ["screen_name", "id"]:
raise Exception("{passed_id_format} is not one of: screen_name, id".format(passed_id_format=id_format))
if appearance_type not in []:
raise Exception('Appearance Type not one of all, host, guest')
if type(after) != int:
raise Exception('after should be type int')
response = requests.get(
"{host}api/v1/influencers/{id_format}/{influencer_id}/podcasts/".format(
host=self.host,
id_format=id_format,
influencer_id=influencer_id
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key),
"If-None-Match": etag
},
params=(
('appearance_type', appearance_type),
('after', after),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data,
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass
def influencer_batch(self, influencer_ids = [], rank_type = 'all', include_followers = 0):
if not influencer_ids:
raise Exception('influencerIDS not provided')
if rank_type not in ['all', 'personal']:
raise Exception('Rank Type not one of all, personal')
if include_followers not in [0, 1]:
raise Exception('Include Followres not one of 0, 1')
response = requests.get(
"{host}api/v1/influencers/batch/".format(
host=self.host,
),
headers={
"Authorization": "Token {api_key}".format(api_key=self.api_key)
},
params=(
('twitter_ids', json.dumps(influencer_ids)),
('rank_type', rank_type),
('include_followers', include_followers),
)
)
if response.status_code == 200:
data = response.json()
return HiveResponse(
data=data['data']['success'],
etag=response.headers['ETag']
)
elif response.status_code == 304:
return True
else:
pass | true | true |
1c3687dc86338cea79b91a8f77f1d39d9b83244d | 530 | py | Python | greens/routers/exceptions.py | grillazz/fastapi-mongodb | f3a38d93a1255d69218cee9a884bd862c50313be | [
"MIT"
] | 4 | 2021-12-09T20:11:34.000Z | 2022-03-01T20:31:23.000Z | greens/routers/exceptions.py | grillazz/fastapi-mongodb | f3a38d93a1255d69218cee9a884bd862c50313be | [
"MIT"
] | 10 | 2021-09-27T13:03:28.000Z | 2022-02-27T10:28:21.000Z | greens/routers/exceptions.py | grillazz/fastapi-mongodb | f3a38d93a1255d69218cee9a884bd862c50313be | [
"MIT"
] | null | null | null | from fastapi import HTTPException, status
class NotFoundHTTPException(HTTPException):
def __init__(self, msg: str):
super().__init__(
status_code=status.HTTP_404_NOT_FOUND,
detail=msg if msg else "Requested resource is not found",
)
class AlreadyExistsHTTPException(HTTPException):
def __init__(self, msg: str):
super().__init__(
status_code=status.HTTP_409_CONFLICT,
detail=msg if msg else "Document with specified id already exists",
)
| 29.444444 | 79 | 0.669811 | from fastapi import HTTPException, status
class NotFoundHTTPException(HTTPException):
def __init__(self, msg: str):
super().__init__(
status_code=status.HTTP_404_NOT_FOUND,
detail=msg if msg else "Requested resource is not found",
)
class AlreadyExistsHTTPException(HTTPException):
def __init__(self, msg: str):
super().__init__(
status_code=status.HTTP_409_CONFLICT,
detail=msg if msg else "Document with specified id already exists",
)
| true | true |
1c3688346f3b428a588d08006ca5015f5dcfb08f | 452 | py | Python | multiplication_table/multiplication_table.py | hanyugeon/SimplePythonProject | e50729c50d9b3a7fa47403f7a508741637a71a6b | [
"MIT"
] | null | null | null | multiplication_table/multiplication_table.py | hanyugeon/SimplePythonProject | e50729c50d9b3a7fa47403f7a508741637a71a6b | [
"MIT"
] | null | null | null | multiplication_table/multiplication_table.py | hanyugeon/SimplePythonProject | e50729c50d9b3a7fa47403f7a508741637a71a6b | [
"MIT"
] | null | null | null | # 구구단 결과를 불러오는 함수를 적는다.
# import를 활용하여 index.py 에 호출시킬것.
# 자주일어나는 일은 간단 명료하게.
def multiply(n):
while n < 1 or n > 9:
print('오류 : 1과 9 사이의 숫자를 입력하세요')
n = int(input('구구단을 외자~ (숫자입력) :'))
for m in range(1, 10):
print('%d x %d = %d' % (n, m, n * m))
# def main():
# print("Testing mt.one function")
#
# test_number = 1
# one(test_number)
#
# print("test is end!")
#
# if __name__='__name__':
# main() | 20.545455 | 45 | 0.526549 |
def multiply(n):
while n < 1 or n > 9:
print('오류 : 1과 9 사이의 숫자를 입력하세요')
n = int(input('구구단을 외자~ (숫자입력) :'))
for m in range(1, 10):
print('%d x %d = %d' % (n, m, n * m))
| true | true |
1c36889438334cbfa61ceaecb38c7913e32b677f | 1,326 | py | Python | audioFile.py | hsabillon7/multiplataforma | 6a3dd7f7fb8d8f885dd2aa11c6908b614c358d98 | [
"MIT"
] | 3 | 2020-02-28T00:55:06.000Z | 2020-03-13T20:38:27.000Z | audioFile.py | hsabillon7/multiplataforma | 6a3dd7f7fb8d8f885dd2aa11c6908b614c358d98 | [
"MIT"
] | null | null | null | audioFile.py | hsabillon7/multiplataforma | 6a3dd7f7fb8d8f885dd2aa11c6908b614c358d98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Programa: audioFile.py
# Objetivo: Accede a los archivos soportados y verifica que
# el archivo actual se puede reproducir.
# Autor: Héctor Sabillón
# Fecha: 25/febrero/2020
from supportedExtensions import Mp3File, WavFile, OggFile
class AudioFile:
"""
Verifica y reproduce un tipo de archivo.
El archivo solamente se reproduce si existe un códec
encargado de transformarlo.
"""
def __init__(self, file_name):
"""
Verifica el nombre del archivo en busca de su extensión y
compara la misma contra todos los tipos de archivos soportados
para obtener si es o no reproducible.
"""
valid_format = Mp3File(file_name)
if not file_name.endswith(valid_format.ext):
valid_format = WavFile(file_name)
if not file_name.endswith(valid_format.ext):
valid_format = OggFile(file_name)
if not file_name.endswith(valid_format.ext):
print("No se puede reproducir el archivo.")
print("Razón: ")
print("¡Extensión no soportada por el reproductor!")
else:
valid_format.play()
else:
valid_format.play()
else:
valid_format.play()
| 34 | 72 | 0.606335 |
from supportedExtensions import Mp3File, WavFile, OggFile
class AudioFile:
def __init__(self, file_name):
valid_format = Mp3File(file_name)
if not file_name.endswith(valid_format.ext):
valid_format = WavFile(file_name)
if not file_name.endswith(valid_format.ext):
valid_format = OggFile(file_name)
if not file_name.endswith(valid_format.ext):
print("No se puede reproducir el archivo.")
print("Razón: ")
print("¡Extensión no soportada por el reproductor!")
else:
valid_format.play()
else:
valid_format.play()
else:
valid_format.play()
| true | true |
1c3688ef5fb7b5378fe2a18ccdd8e96411bacdbf | 20,381 | py | Python | spark_auto_mapper_fhir/value_sets/practice_setting_code_value_set.py | imranq2/SparkAutoMapper.FHIR | dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2 | [
"Apache-2.0"
] | 1 | 2020-10-31T23:25:07.000Z | 2020-10-31T23:25:07.000Z | spark_auto_mapper_fhir/value_sets/practice_setting_code_value_set.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper_fhir/value_sets/practice_setting_code_value_set.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class PracticeSettingCodeValueSetCode(GenericTypeCode):
"""
PracticeSettingCodeValueSet
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
This is the code representing the clinical specialty of the clinician or
provider who interacted with, treated, or provided a service to/for the
patient. The value set used for clinical specialty has been limited by HITSP
to the value set reproduced from HITSP C80 Table 2-149 Clinical Specialty
Value Set Definition.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://snomed.info/sct
"""
codeset: FhirUri = "http://snomed.info/sct"
class PracticeSettingCodeValueSetCodeValues:
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
AdultMentalIllness = PracticeSettingCodeValueSetCode("408467006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Anesthetics = PracticeSettingCodeValueSetCode("394577000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
AudiologicalMedicine = PracticeSettingCodeValueSetCode("394578005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
BloodBankingAndTransfusionMedicine = PracticeSettingCodeValueSetCode("421661004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
BurnsCare = PracticeSettingCodeValueSetCode("408462000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Cardiology = PracticeSettingCodeValueSetCode("394579002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalCytogeneticsAndMolecularGenetics = PracticeSettingCodeValueSetCode(
"394804000"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalGenetics = PracticeSettingCodeValueSetCode("394580004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalHematology = PracticeSettingCodeValueSetCode("394803006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalImmunology = PracticeSettingCodeValueSetCode("408480009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalMicrobiology = PracticeSettingCodeValueSetCode("408454008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalNeuro_physiology = PracticeSettingCodeValueSetCode("394809005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalOncology = PracticeSettingCodeValueSetCode("394592004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalPharmacology = PracticeSettingCodeValueSetCode("394600006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ClinicalPhysiology = PracticeSettingCodeValueSetCode("394601005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
CommunityMedicine = PracticeSettingCodeValueSetCode("394581000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
CriticalCareMedicine = PracticeSettingCodeValueSetCode("408478003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
DentalMedicineSpecialties = PracticeSettingCodeValueSetCode("394812008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Dental_GeneralDentalPractice = PracticeSettingCodeValueSetCode("408444009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Dermatology = PracticeSettingCodeValueSetCode("394582007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
DiabeticMedicine = PracticeSettingCodeValueSetCode("408475000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
DiveMedicine = PracticeSettingCodeValueSetCode("410005002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Endocrinology = PracticeSettingCodeValueSetCode("394583002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
FamilyPractice = PracticeSettingCodeValueSetCode("419772000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Gastroenterology = PracticeSettingCodeValueSetCode("394584008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
GeneralMedicalPractice = PracticeSettingCodeValueSetCode("408443003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
GeneralMedicine = PracticeSettingCodeValueSetCode("394802001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
GeneralPathology = PracticeSettingCodeValueSetCode("394915009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
GeneralPractice = PracticeSettingCodeValueSetCode("394814009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Genito_urinaryMedicine = PracticeSettingCodeValueSetCode("394808002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
GeriatricMedicine = PracticeSettingCodeValueSetCode("394811001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
GynecologicalOncology = PracticeSettingCodeValueSetCode("408446006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Gynecology = PracticeSettingCodeValueSetCode("394586005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Hematopathology = PracticeSettingCodeValueSetCode("394916005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Hepatology = PracticeSettingCodeValueSetCode("408472002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Histopathology = PracticeSettingCodeValueSetCode("394597005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Immunopathology = PracticeSettingCodeValueSetCode("394598000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
InfectiousDiseases = PracticeSettingCodeValueSetCode("394807007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
InternalMedicine = PracticeSettingCodeValueSetCode("419192003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
LearningDisability = PracticeSettingCodeValueSetCode("408468001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
MedicalOncology = PracticeSettingCodeValueSetCode("394593009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
MedicalOphthalmology = PracticeSettingCodeValueSetCode("394813003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
MilitaryMedicine = PracticeSettingCodeValueSetCode("410001006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Nephrology = PracticeSettingCodeValueSetCode("394589003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Neurology = PracticeSettingCodeValueSetCode("394591006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Neuropathology = PracticeSettingCodeValueSetCode("394599008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
NuclearMedicine = PracticeSettingCodeValueSetCode("394649004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Obstetrics = PracticeSettingCodeValueSetCode("408470005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ObstetricsAndGynecology = PracticeSettingCodeValueSetCode("394585009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
OccupationalMedicine = PracticeSettingCodeValueSetCode("394821009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
OphthalmicSurgery = PracticeSettingCodeValueSetCode("422191005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Ophthalmology = PracticeSettingCodeValueSetCode("394594003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
OsteopathicManipulativeMedicine = PracticeSettingCodeValueSetCode("416304004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Otolaryngology = PracticeSettingCodeValueSetCode("418960008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PainManagement = PracticeSettingCodeValueSetCode("394882004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PalliativeMedicine = PracticeSettingCodeValueSetCode("394806003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Pediatric_ChildAndAdolescent_Psychiatry = PracticeSettingCodeValueSetCode(
"394588006"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricCardiology = PracticeSettingCodeValueSetCode("408459003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricDentistry = PracticeSettingCodeValueSetCode("394607009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricEndocrinology = PracticeSettingCodeValueSetCode("419610006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricGastroenterology = PracticeSettingCodeValueSetCode("418058008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricGenetics = PracticeSettingCodeValueSetCode("420208008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricHematology = PracticeSettingCodeValueSetCode("418652005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricImmunology = PracticeSettingCodeValueSetCode("418535003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricInfectiousDiseases = PracticeSettingCodeValueSetCode("418862001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricNephrology = PracticeSettingCodeValueSetCode("419365004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricOncology = PracticeSettingCodeValueSetCode("418002000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricOphthalmology = PracticeSettingCodeValueSetCode("419983000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricPulmonology = PracticeSettingCodeValueSetCode("419170002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricRheumatology = PracticeSettingCodeValueSetCode("419472004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricSurgery = PracticeSettingCodeValueSetCode("394539006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PediatricSurgery_boneMarrowTransplantation = PracticeSettingCodeValueSetCode(
"420112009"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PreventiveMedicine = PracticeSettingCodeValueSetCode("409968004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Psychiatry = PracticeSettingCodeValueSetCode("394587001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Psychotherapy = PracticeSettingCodeValueSetCode("394913002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PublicHealthMedicine = PracticeSettingCodeValueSetCode("408440000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
PulmonaryMedicine = PracticeSettingCodeValueSetCode("418112009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
RadiationOncology = PracticeSettingCodeValueSetCode("419815003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Radiology = PracticeSettingCodeValueSetCode("394914008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Radiology_InterventionalRadiology = PracticeSettingCodeValueSetCode("408455009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Rehabilitation = PracticeSettingCodeValueSetCode("394602003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
RespiteCare = PracticeSettingCodeValueSetCode("408447002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Rheumatology = PracticeSettingCodeValueSetCode("394810000")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
SleepStudies = PracticeSettingCodeValueSetCode("408450004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_BoneAndMarrowTransplantation = PracticeSettingCodeValueSetCode("408476004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_BreastSurgery = PracticeSettingCodeValueSetCode("408469009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_CardiacSurgery = PracticeSettingCodeValueSetCode("408466002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_CardiothoracicTransplantation = PracticeSettingCodeValueSetCode("408471009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_ColorectalSurgery = PracticeSettingCodeValueSetCode("408464004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_Endodontics = PracticeSettingCodeValueSetCode("408441001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_OralAndMaxillofacialSurgery = PracticeSettingCodeValueSetCode(
"408465003"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_OralSurgery = PracticeSettingCodeValueSetCode("394605001")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_Orthodontics = PracticeSettingCodeValueSetCode("394608004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_PeriodontalSurgery = PracticeSettingCodeValueSetCode("408461007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_ProstheticDentistry_Prosthodontics_ = (
PracticeSettingCodeValueSetCode("408460008")
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dental_surgical_Prosthodontics = PracticeSettingCodeValueSetCode(
"408460008"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dentistry_RestorativeDentistry = PracticeSettingCodeValueSetCode(
"394606000"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dentistry_surgical = PracticeSettingCodeValueSetCode("408449004")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Dentistry_surgical_Orthodontics = PracticeSettingCodeValueSetCode(
"394608004"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_DermatologicSurgery = PracticeSettingCodeValueSetCode("418018006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Ear_NoseAndThroatSurgery = PracticeSettingCodeValueSetCode("394604002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_general = PracticeSettingCodeValueSetCode("394609007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_HepatobiliaryAndPancreaticSurgery = PracticeSettingCodeValueSetCode(
"408474001"
)
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Neurosurgery = PracticeSettingCodeValueSetCode("394610002")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_PlasticSurgery = PracticeSettingCodeValueSetCode("394611003")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_TransplantationSurgery = PracticeSettingCodeValueSetCode("408477008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_TraumaAndOrthopedics = PracticeSettingCodeValueSetCode("394801008")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgery_Vascular = PracticeSettingCodeValueSetCode("408463005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
SurgicalOncology = PracticeSettingCodeValueSetCode("419321007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Surgical_Accident_Emergency = PracticeSettingCodeValueSetCode("394576009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
ThoracicMedicine = PracticeSettingCodeValueSetCode("394590007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Toxicology = PracticeSettingCodeValueSetCode("409967009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
TropicalMedicine = PracticeSettingCodeValueSetCode("408448007")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
UrologicalOncology = PracticeSettingCodeValueSetCode("419043006")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
Urology = PracticeSettingCodeValueSetCode("394612005")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
MedicalSpecialty_OTHER_NOTLISTED = PracticeSettingCodeValueSetCode("394733009")
"""
From: http://hl7.org/fhir/ValueSet/c80-practice-codes in valuesets.xml
"""
SurgicalSpecialty_OTHER_NOTLISTED = PracticeSettingCodeValueSetCode("394732004")
| 39.26975 | 88 | 0.711692 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
class PracticeSettingCodeValueSetCode(GenericTypeCode):
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
codeset: FhirUri = "http://snomed.info/sct"
class PracticeSettingCodeValueSetCodeValues:
AdultMentalIllness = PracticeSettingCodeValueSetCode("408467006")
Anesthetics = PracticeSettingCodeValueSetCode("394577000")
AudiologicalMedicine = PracticeSettingCodeValueSetCode("394578005")
BloodBankingAndTransfusionMedicine = PracticeSettingCodeValueSetCode("421661004")
BurnsCare = PracticeSettingCodeValueSetCode("408462000")
Cardiology = PracticeSettingCodeValueSetCode("394579002")
ClinicalCytogeneticsAndMolecularGenetics = PracticeSettingCodeValueSetCode(
"394804000"
)
ClinicalGenetics = PracticeSettingCodeValueSetCode("394580004")
ClinicalHematology = PracticeSettingCodeValueSetCode("394803006")
ClinicalImmunology = PracticeSettingCodeValueSetCode("408480009")
ClinicalMicrobiology = PracticeSettingCodeValueSetCode("408454008")
ClinicalNeuro_physiology = PracticeSettingCodeValueSetCode("394809005")
ClinicalOncology = PracticeSettingCodeValueSetCode("394592004")
ClinicalPharmacology = PracticeSettingCodeValueSetCode("394600006")
ClinicalPhysiology = PracticeSettingCodeValueSetCode("394601005")
CommunityMedicine = PracticeSettingCodeValueSetCode("394581000")
CriticalCareMedicine = PracticeSettingCodeValueSetCode("408478003")
DentalMedicineSpecialties = PracticeSettingCodeValueSetCode("394812008")
Dental_GeneralDentalPractice = PracticeSettingCodeValueSetCode("408444009")
Dermatology = PracticeSettingCodeValueSetCode("394582007")
DiabeticMedicine = PracticeSettingCodeValueSetCode("408475000")
DiveMedicine = PracticeSettingCodeValueSetCode("410005002")
Endocrinology = PracticeSettingCodeValueSetCode("394583002")
FamilyPractice = PracticeSettingCodeValueSetCode("419772000")
Gastroenterology = PracticeSettingCodeValueSetCode("394584008")
GeneralMedicalPractice = PracticeSettingCodeValueSetCode("408443003")
GeneralMedicine = PracticeSettingCodeValueSetCode("394802001")
GeneralPathology = PracticeSettingCodeValueSetCode("394915009")
GeneralPractice = PracticeSettingCodeValueSetCode("394814009")
Genito_urinaryMedicine = PracticeSettingCodeValueSetCode("394808002")
GeriatricMedicine = PracticeSettingCodeValueSetCode("394811001")
GynecologicalOncology = PracticeSettingCodeValueSetCode("408446006")
Gynecology = PracticeSettingCodeValueSetCode("394586005")
Hematopathology = PracticeSettingCodeValueSetCode("394916005")
Hepatology = PracticeSettingCodeValueSetCode("408472002")
Histopathology = PracticeSettingCodeValueSetCode("394597005")
Immunopathology = PracticeSettingCodeValueSetCode("394598000")
InfectiousDiseases = PracticeSettingCodeValueSetCode("394807007")
InternalMedicine = PracticeSettingCodeValueSetCode("419192003")
LearningDisability = PracticeSettingCodeValueSetCode("408468001")
MedicalOncology = PracticeSettingCodeValueSetCode("394593009")
MedicalOphthalmology = PracticeSettingCodeValueSetCode("394813003")
MilitaryMedicine = PracticeSettingCodeValueSetCode("410001006")
Nephrology = PracticeSettingCodeValueSetCode("394589003")
Neurology = PracticeSettingCodeValueSetCode("394591006")
Neuropathology = PracticeSettingCodeValueSetCode("394599008")
NuclearMedicine = PracticeSettingCodeValueSetCode("394649004")
Obstetrics = PracticeSettingCodeValueSetCode("408470005")
ObstetricsAndGynecology = PracticeSettingCodeValueSetCode("394585009")
OccupationalMedicine = PracticeSettingCodeValueSetCode("394821009")
OphthalmicSurgery = PracticeSettingCodeValueSetCode("422191005")
Ophthalmology = PracticeSettingCodeValueSetCode("394594003")
OsteopathicManipulativeMedicine = PracticeSettingCodeValueSetCode("416304004")
Otolaryngology = PracticeSettingCodeValueSetCode("418960008")
PainManagement = PracticeSettingCodeValueSetCode("394882004")
PalliativeMedicine = PracticeSettingCodeValueSetCode("394806003")
Pediatric_ChildAndAdolescent_Psychiatry = PracticeSettingCodeValueSetCode(
"394588006"
)
PediatricCardiology = PracticeSettingCodeValueSetCode("408459003")
PediatricDentistry = PracticeSettingCodeValueSetCode("394607009")
PediatricEndocrinology = PracticeSettingCodeValueSetCode("419610006")
PediatricGastroenterology = PracticeSettingCodeValueSetCode("418058008")
PediatricGenetics = PracticeSettingCodeValueSetCode("420208008")
PediatricHematology = PracticeSettingCodeValueSetCode("418652005")
PediatricImmunology = PracticeSettingCodeValueSetCode("418535003")
PediatricInfectiousDiseases = PracticeSettingCodeValueSetCode("418862001")
PediatricNephrology = PracticeSettingCodeValueSetCode("419365004")
PediatricOncology = PracticeSettingCodeValueSetCode("418002000")
PediatricOphthalmology = PracticeSettingCodeValueSetCode("419983000")
PediatricPulmonology = PracticeSettingCodeValueSetCode("419170002")
PediatricRheumatology = PracticeSettingCodeValueSetCode("419472004")
PediatricSurgery = PracticeSettingCodeValueSetCode("394539006")
PediatricSurgery_boneMarrowTransplantation = PracticeSettingCodeValueSetCode(
"420112009"
)
PreventiveMedicine = PracticeSettingCodeValueSetCode("409968004")
Psychiatry = PracticeSettingCodeValueSetCode("394587001")
Psychotherapy = PracticeSettingCodeValueSetCode("394913002")
PublicHealthMedicine = PracticeSettingCodeValueSetCode("408440000")
PulmonaryMedicine = PracticeSettingCodeValueSetCode("418112009")
RadiationOncology = PracticeSettingCodeValueSetCode("419815003")
Radiology = PracticeSettingCodeValueSetCode("394914008")
Radiology_InterventionalRadiology = PracticeSettingCodeValueSetCode("408455009")
Rehabilitation = PracticeSettingCodeValueSetCode("394602003")
RespiteCare = PracticeSettingCodeValueSetCode("408447002")
Rheumatology = PracticeSettingCodeValueSetCode("394810000")
SleepStudies = PracticeSettingCodeValueSetCode("408450004")
Surgery_BoneAndMarrowTransplantation = PracticeSettingCodeValueSetCode("408476004")
Surgery_BreastSurgery = PracticeSettingCodeValueSetCode("408469009")
Surgery_CardiacSurgery = PracticeSettingCodeValueSetCode("408466002")
Surgery_CardiothoracicTransplantation = PracticeSettingCodeValueSetCode("408471009")
Surgery_ColorectalSurgery = PracticeSettingCodeValueSetCode("408464004")
Surgery_Dental_Endodontics = PracticeSettingCodeValueSetCode("408441001")
Surgery_Dental_OralAndMaxillofacialSurgery = PracticeSettingCodeValueSetCode(
"408465003"
)
Surgery_Dental_OralSurgery = PracticeSettingCodeValueSetCode("394605001")
Surgery_Dental_Orthodontics = PracticeSettingCodeValueSetCode("394608004")
Surgery_Dental_PeriodontalSurgery = PracticeSettingCodeValueSetCode("408461007")
Surgery_Dental_ProstheticDentistry_Prosthodontics_ = (
PracticeSettingCodeValueSetCode("408460008")
)
Surgery_Dental_surgical_Prosthodontics = PracticeSettingCodeValueSetCode(
"408460008"
)
Surgery_Dentistry_RestorativeDentistry = PracticeSettingCodeValueSetCode(
"394606000"
)
Surgery_Dentistry_surgical = PracticeSettingCodeValueSetCode("408449004")
Surgery_Dentistry_surgical_Orthodontics = PracticeSettingCodeValueSetCode(
"394608004"
)
Surgery_DermatologicSurgery = PracticeSettingCodeValueSetCode("418018006")
Surgery_Ear_NoseAndThroatSurgery = PracticeSettingCodeValueSetCode("394604002")
Surgery_general = PracticeSettingCodeValueSetCode("394609007")
Surgery_HepatobiliaryAndPancreaticSurgery = PracticeSettingCodeValueSetCode(
"408474001"
)
Surgery_Neurosurgery = PracticeSettingCodeValueSetCode("394610002")
Surgery_PlasticSurgery = PracticeSettingCodeValueSetCode("394611003")
Surgery_TransplantationSurgery = PracticeSettingCodeValueSetCode("408477008")
Surgery_TraumaAndOrthopedics = PracticeSettingCodeValueSetCode("394801008")
Surgery_Vascular = PracticeSettingCodeValueSetCode("408463005")
SurgicalOncology = PracticeSettingCodeValueSetCode("419321007")
Surgical_Accident_Emergency = PracticeSettingCodeValueSetCode("394576009")
ThoracicMedicine = PracticeSettingCodeValueSetCode("394590007")
Toxicology = PracticeSettingCodeValueSetCode("409967009")
TropicalMedicine = PracticeSettingCodeValueSetCode("408448007")
UrologicalOncology = PracticeSettingCodeValueSetCode("419043006")
Urology = PracticeSettingCodeValueSetCode("394612005")
MedicalSpecialty_OTHER_NOTLISTED = PracticeSettingCodeValueSetCode("394733009")
SurgicalSpecialty_OTHER_NOTLISTED = PracticeSettingCodeValueSetCode("394732004")
| true | true |
1c368a5b14dca3856beeafe64910e58644b4fbed | 9,803 | py | Python | tests/conftest.py | adamchainz/sentry-python | d8c161fb289b44b5cd4b83d0bb31112031925713 | [
"BSD-2-Clause"
] | null | null | null | tests/conftest.py | adamchainz/sentry-python | d8c161fb289b44b5cd4b83d0bb31112031925713 | [
"BSD-2-Clause"
] | null | null | null | tests/conftest.py | adamchainz/sentry-python | d8c161fb289b44b5cd4b83d0bb31112031925713 | [
"BSD-2-Clause"
] | null | null | null | import os
import json
import pytest
import jsonschema
import gevent
import eventlet
import sentry_sdk
from sentry_sdk._compat import reraise, string_types, iteritems
from sentry_sdk.transport import Transport
from sentry_sdk.envelope import Envelope
from sentry_sdk.utils import capture_internal_exceptions
from tests import _warning_recorder, _warning_recorder_mgr
SENTRY_EVENT_SCHEMA = "./checkouts/data-schemas/relay/event.schema.json"
if not os.path.isfile(SENTRY_EVENT_SCHEMA):
SENTRY_EVENT_SCHEMA = None
else:
with open(SENTRY_EVENT_SCHEMA) as f:
SENTRY_EVENT_SCHEMA = json.load(f)
try:
import pytest_benchmark
except ImportError:
@pytest.fixture
def benchmark():
return lambda x: x()
else:
del pytest_benchmark
@pytest.fixture(autouse=True)
def internal_exceptions(request, monkeypatch):
errors = []
if "tests_internal_exceptions" in request.keywords:
return
def _capture_internal_exception(self, exc_info):
errors.append(exc_info)
@request.addfinalizer
def _():
for e in errors:
reraise(*e)
monkeypatch.setattr(
sentry_sdk.Hub, "_capture_internal_exception", _capture_internal_exception
)
return errors
@pytest.fixture(autouse=True, scope="session")
def _capture_internal_warnings():
yield
_warning_recorder_mgr.__exit__(None, None, None)
recorder = _warning_recorder
for warning in recorder:
try:
if isinstance(warning.message, ResourceWarning):
continue
except NameError:
pass
if "sentry_sdk" not in str(warning.filename) and "sentry-sdk" not in str(
warning.filename
):
continue
# pytest-django
if "getfuncargvalue" in str(warning.message):
continue
# Happens when re-initializing the SDK
if "but it was only enabled on init()" in str(warning.message):
continue
# sanic's usage of aiohttp for test client
if "verify_ssl is deprecated, use ssl=False instead" in str(warning.message):
continue
if "getargspec" in str(warning.message) and warning.filename.endswith(
("pyramid/config/util.py", "pyramid/config/views.py")
):
continue
if "isAlive() is deprecated" in str(
warning.message
) and warning.filename.endswith("celery/utils/timer2.py"):
continue
if "collections.abc" in str(warning.message) and warning.filename.endswith(
("celery/canvas.py", "werkzeug/datastructures.py", "tornado/httputil.py")
):
continue
# Django 1.7 emits a (seemingly) false-positive warning for our test
# app and suggests to use a middleware that does not exist in later
# Django versions.
if "SessionAuthenticationMiddleware" in str(warning.message):
continue
if "Something has already installed a non-asyncio" in str(warning.message):
continue
if "dns.hash" in str(warning.message) or "dns/namedict" in warning.filename:
continue
raise AssertionError(warning)
@pytest.fixture
def monkeypatch_test_transport(monkeypatch, validate_event_schema):
def check_event(event):
def check_string_keys(map):
for key, value in iteritems(map):
assert isinstance(key, string_types)
if isinstance(value, dict):
check_string_keys(value)
with capture_internal_exceptions():
check_string_keys(event)
validate_event_schema(event)
def check_envelope(envelope):
with capture_internal_exceptions():
# Assert error events are sent without envelope to server, for compat.
assert not any(item.data_category == "error" for item in envelope.items)
assert not any(item.get_event() is not None for item in envelope.items)
def inner(client):
monkeypatch.setattr(
client, "transport", TestTransport(check_event, check_envelope)
)
return inner
@pytest.fixture
def validate_event_schema(tmpdir):
def inner(event):
if SENTRY_EVENT_SCHEMA:
jsonschema.validate(instance=event, schema=SENTRY_EVENT_SCHEMA)
return inner
@pytest.fixture
def sentry_init(monkeypatch_test_transport, request):
def inner(*a, **kw):
hub = sentry_sdk.Hub.current
client = sentry_sdk.Client(*a, **kw)
hub.bind_client(client)
if "transport" not in kw:
monkeypatch_test_transport(sentry_sdk.Hub.current.client)
if request.node.get_closest_marker("forked"):
# Do not run isolation if the test is already running in
# ultimate isolation (seems to be required for celery tests that
# fork)
yield inner
else:
with sentry_sdk.Hub(None):
yield inner
class TestTransport(Transport):
def __init__(self, capture_event_callback, capture_envelope_callback):
Transport.__init__(self)
self.capture_event = capture_event_callback
self.capture_envelope = capture_envelope_callback
self._queue = None
@pytest.fixture
def capture_events(monkeypatch):
def inner():
events = []
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
old_capture_envelope = test_client.transport.capture_envelope
def append_event(event):
events.append(event)
return old_capture_event(event)
def append_envelope(envelope):
for item in envelope:
if item.headers.get("type") in ("event", "transaction"):
test_client.transport.capture_event(item.payload.json)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_event", append_event)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_envelope)
return events
return inner
@pytest.fixture
def capture_envelopes(monkeypatch):
def inner():
envelopes = []
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
old_capture_envelope = test_client.transport.capture_envelope
def append_event(event):
envelope = Envelope()
envelope.add_event(event)
envelopes.append(envelope)
return old_capture_event(event)
def append_envelope(envelope):
envelopes.append(envelope)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_event", append_event)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_envelope)
return envelopes
return inner
@pytest.fixture
def capture_events_forksafe(monkeypatch, capture_events, request):
def inner():
capture_events()
events_r, events_w = os.pipe()
events_r = os.fdopen(events_r, "rb", 0)
events_w = os.fdopen(events_w, "wb", 0)
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
def append(event):
events_w.write(json.dumps(event).encode("utf-8"))
events_w.write(b"\n")
return old_capture_event(event)
def flush(timeout=None, callback=None):
events_w.write(b"flush\n")
monkeypatch.setattr(test_client.transport, "capture_event", append)
monkeypatch.setattr(test_client, "flush", flush)
return EventStreamReader(events_r)
return inner
class EventStreamReader(object):
def __init__(self, file):
self.file = file
def read_event(self):
return json.loads(self.file.readline().decode("utf-8"))
def read_flush(self):
assert self.file.readline() == b"flush\n"
# scope=session ensures that fixture is run earlier
@pytest.fixture(
scope="session",
params=[None, "eventlet", "gevent"],
ids=("threads", "eventlet", "greenlet"),
)
def maybe_monkeypatched_threading(request):
if request.param == "eventlet":
try:
eventlet.monkey_patch()
except AttributeError as e:
if "'thread.RLock' object has no attribute" in str(e):
# https://bitbucket.org/pypy/pypy/issues/2962/gevent-cannot-patch-rlock-under-pypy-27-7
pytest.skip("https://github.com/eventlet/eventlet/issues/546")
else:
raise
elif request.param == "gevent":
try:
gevent.monkey.patch_all()
except Exception as e:
if "_RLock__owner" in str(e):
pytest.skip("https://github.com/gevent/gevent/issues/1380")
else:
raise
else:
assert request.param is None
return request.param
@pytest.fixture
def render_span_tree():
def inner(event):
assert event["type"] == "transaction"
by_parent = {}
for span in event["spans"]:
by_parent.setdefault(span["parent_span_id"], []).append(span)
def render_span(span):
yield "- op={}: description={}".format(
json.dumps(span.get("op")), json.dumps(span.get("description"))
)
for subspan in by_parent.get(span["span_id"]) or ():
for line in render_span(subspan):
yield " {}".format(line)
root_span = event["contexts"]["trace"]
# Return a list instead of a multiline string because black will know better how to format that
return "\n".join(render_span(root_span))
return inner
| 29.887195 | 103 | 0.649291 | import os
import json
import pytest
import jsonschema
import gevent
import eventlet
import sentry_sdk
from sentry_sdk._compat import reraise, string_types, iteritems
from sentry_sdk.transport import Transport
from sentry_sdk.envelope import Envelope
from sentry_sdk.utils import capture_internal_exceptions
from tests import _warning_recorder, _warning_recorder_mgr
SENTRY_EVENT_SCHEMA = "./checkouts/data-schemas/relay/event.schema.json"
if not os.path.isfile(SENTRY_EVENT_SCHEMA):
SENTRY_EVENT_SCHEMA = None
else:
with open(SENTRY_EVENT_SCHEMA) as f:
SENTRY_EVENT_SCHEMA = json.load(f)
try:
import pytest_benchmark
except ImportError:
@pytest.fixture
def benchmark():
return lambda x: x()
else:
del pytest_benchmark
@pytest.fixture(autouse=True)
def internal_exceptions(request, monkeypatch):
errors = []
if "tests_internal_exceptions" in request.keywords:
return
def _capture_internal_exception(self, exc_info):
errors.append(exc_info)
@request.addfinalizer
def _():
for e in errors:
reraise(*e)
monkeypatch.setattr(
sentry_sdk.Hub, "_capture_internal_exception", _capture_internal_exception
)
return errors
@pytest.fixture(autouse=True, scope="session")
def _capture_internal_warnings():
yield
_warning_recorder_mgr.__exit__(None, None, None)
recorder = _warning_recorder
for warning in recorder:
try:
if isinstance(warning.message, ResourceWarning):
continue
except NameError:
pass
if "sentry_sdk" not in str(warning.filename) and "sentry-sdk" not in str(
warning.filename
):
continue
if "getfuncargvalue" in str(warning.message):
continue
if "but it was only enabled on init()" in str(warning.message):
continue
if "verify_ssl is deprecated, use ssl=False instead" in str(warning.message):
continue
if "getargspec" in str(warning.message) and warning.filename.endswith(
("pyramid/config/util.py", "pyramid/config/views.py")
):
continue
if "isAlive() is deprecated" in str(
warning.message
) and warning.filename.endswith("celery/utils/timer2.py"):
continue
if "collections.abc" in str(warning.message) and warning.filename.endswith(
("celery/canvas.py", "werkzeug/datastructures.py", "tornado/httputil.py")
):
continue
# Django 1.7 emits a (seemingly) false-positive warning for our test
# app and suggests to use a middleware that does not exist in later
# Django versions.
if "SessionAuthenticationMiddleware" in str(warning.message):
continue
if "Something has already installed a non-asyncio" in str(warning.message):
continue
if "dns.hash" in str(warning.message) or "dns/namedict" in warning.filename:
continue
raise AssertionError(warning)
@pytest.fixture
def monkeypatch_test_transport(monkeypatch, validate_event_schema):
def check_event(event):
def check_string_keys(map):
for key, value in iteritems(map):
assert isinstance(key, string_types)
if isinstance(value, dict):
check_string_keys(value)
with capture_internal_exceptions():
check_string_keys(event)
validate_event_schema(event)
def check_envelope(envelope):
with capture_internal_exceptions():
# Assert error events are sent without envelope to server, for compat.
assert not any(item.data_category == "error" for item in envelope.items)
assert not any(item.get_event() is not None for item in envelope.items)
def inner(client):
monkeypatch.setattr(
client, "transport", TestTransport(check_event, check_envelope)
)
return inner
@pytest.fixture
def validate_event_schema(tmpdir):
def inner(event):
if SENTRY_EVENT_SCHEMA:
jsonschema.validate(instance=event, schema=SENTRY_EVENT_SCHEMA)
return inner
@pytest.fixture
def sentry_init(monkeypatch_test_transport, request):
def inner(*a, **kw):
hub = sentry_sdk.Hub.current
client = sentry_sdk.Client(*a, **kw)
hub.bind_client(client)
if "transport" not in kw:
monkeypatch_test_transport(sentry_sdk.Hub.current.client)
if request.node.get_closest_marker("forked"):
# Do not run isolation if the test is already running in
# ultimate isolation (seems to be required for celery tests that
# fork)
yield inner
else:
with sentry_sdk.Hub(None):
yield inner
class TestTransport(Transport):
def __init__(self, capture_event_callback, capture_envelope_callback):
Transport.__init__(self)
self.capture_event = capture_event_callback
self.capture_envelope = capture_envelope_callback
self._queue = None
@pytest.fixture
def capture_events(monkeypatch):
def inner():
events = []
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
old_capture_envelope = test_client.transport.capture_envelope
def append_event(event):
events.append(event)
return old_capture_event(event)
def append_envelope(envelope):
for item in envelope:
if item.headers.get("type") in ("event", "transaction"):
test_client.transport.capture_event(item.payload.json)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_event", append_event)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_envelope)
return events
return inner
@pytest.fixture
def capture_envelopes(monkeypatch):
def inner():
envelopes = []
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
old_capture_envelope = test_client.transport.capture_envelope
def append_event(event):
envelope = Envelope()
envelope.add_event(event)
envelopes.append(envelope)
return old_capture_event(event)
def append_envelope(envelope):
envelopes.append(envelope)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_event", append_event)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_envelope)
return envelopes
return inner
@pytest.fixture
def capture_events_forksafe(monkeypatch, capture_events, request):
def inner():
capture_events()
events_r, events_w = os.pipe()
events_r = os.fdopen(events_r, "rb", 0)
events_w = os.fdopen(events_w, "wb", 0)
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
def append(event):
events_w.write(json.dumps(event).encode("utf-8"))
events_w.write(b"\n")
return old_capture_event(event)
def flush(timeout=None, callback=None):
events_w.write(b"flush\n")
monkeypatch.setattr(test_client.transport, "capture_event", append)
monkeypatch.setattr(test_client, "flush", flush)
return EventStreamReader(events_r)
return inner
class EventStreamReader(object):
def __init__(self, file):
self.file = file
def read_event(self):
return json.loads(self.file.readline().decode("utf-8"))
def read_flush(self):
assert self.file.readline() == b"flush\n"
# scope=session ensures that fixture is run earlier
@pytest.fixture(
scope="session",
params=[None, "eventlet", "gevent"],
ids=("threads", "eventlet", "greenlet"),
)
def maybe_monkeypatched_threading(request):
if request.param == "eventlet":
try:
eventlet.monkey_patch()
except AttributeError as e:
if "'thread.RLock' object has no attribute" in str(e):
# https://bitbucket.org/pypy/pypy/issues/2962/gevent-cannot-patch-rlock-under-pypy-27-7
pytest.skip("https://github.com/eventlet/eventlet/issues/546")
else:
raise
elif request.param == "gevent":
try:
gevent.monkey.patch_all()
except Exception as e:
if "_RLock__owner" in str(e):
pytest.skip("https://github.com/gevent/gevent/issues/1380")
else:
raise
else:
assert request.param is None
return request.param
@pytest.fixture
def render_span_tree():
def inner(event):
assert event["type"] == "transaction"
by_parent = {}
for span in event["spans"]:
by_parent.setdefault(span["parent_span_id"], []).append(span)
def render_span(span):
yield "- op={}: description={}".format(
json.dumps(span.get("op")), json.dumps(span.get("description"))
)
for subspan in by_parent.get(span["span_id"]) or ():
for line in render_span(subspan):
yield " {}".format(line)
root_span = event["contexts"]["trace"]
# Return a list instead of a multiline string because black will know better how to format that
return "\n".join(render_span(root_span))
return inner
| true | true |
1c368c294b9fbe391fd6ed3b79da06cbf2355d19 | 707 | py | Python | web/routes.py | bdwlyon/devicehive-audio-analysis | 8bd23578dbf043d9d6a618c40bc4c8161361df30 | [
"Apache-2.0"
] | 201 | 2017-11-10T18:19:20.000Z | 2021-12-21T08:13:45.000Z | web/routes.py | bdwlyon/devicehive-audio-analysis | 8bd23578dbf043d9d6a618c40bc4c8161361df30 | [
"Apache-2.0"
] | 17 | 2017-11-29T02:43:53.000Z | 2021-07-08T07:12:48.000Z | web/routes.py | bdwlyon/devicehive-audio-analysis | 8bd23578dbf043d9d6a618c40bc4c8161361df30 | [
"Apache-2.0"
] | 84 | 2017-11-16T11:26:04.000Z | 2021-02-03T18:49:14.000Z | # Copyright (C) 2017 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .controllers import Events, EventsUpdate
routes = [
(r'^/events/$', Events),
(r'^/events/update/$', EventsUpdate),
]
| 33.666667 | 74 | 0.735502 |
from .controllers import Events, EventsUpdate
routes = [
(r'^/events/$', Events),
(r'^/events/update/$', EventsUpdate),
]
| true | true |
1c368c905d873422490eb916e211106a9912d4f5 | 6,457 | py | Python | python/Lib/test/test_winconsoleio.py | jasam/ciclo_vida_datos_scraping | 3f7cffc944a0a0752a502dc7868cf43c4144f16c | [
"MIT"
] | null | null | null | python/Lib/test/test_winconsoleio.py | jasam/ciclo_vida_datos_scraping | 3f7cffc944a0a0752a502dc7868cf43c4144f16c | [
"MIT"
] | null | null | null | python/Lib/test/test_winconsoleio.py | jasam/ciclo_vida_datos_scraping | 3f7cffc944a0a0752a502dc7868cf43c4144f16c | [
"MIT"
] | null | null | null | '''Tests for WindowsConsoleIO
'''
import io
import os
import sys
import tempfile
import unittest
from test import support
if sys.platform != 'win32':
raise unittest.SkipTest("test only relevant on win32")
from _testconsole import write_input
ConIO = io._WindowsConsoleIO
class WindowsConsoleIOTests(unittest.TestCase):
def test_abc(self):
self.assertTrue(issubclass(ConIO, io.RawIOBase))
self.assertFalse(issubclass(ConIO, io.BufferedIOBase))
self.assertFalse(issubclass(ConIO, io.TextIOBase))
def test_open_fd(self):
self.assertRaisesRegex(ValueError,
"negative file descriptor", ConIO, -1)
with tempfile.TemporaryFile() as tmpfile:
fd = tmpfile.fileno()
# Windows 10: "Cannot open non-console file"
# Earlier: "Cannot open console output buffer for reading"
self.assertRaisesRegex(ValueError,
"Cannot open (console|non-console file)", ConIO, fd)
try:
f = ConIO(0)
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertEqual(0, f.fileno())
f.close() # multiple close should not crash
f.close()
try:
f = ConIO(1, 'w')
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertEqual(1, f.fileno())
f.close()
f.close()
try:
f = ConIO(2, 'w')
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertEqual(2, f.fileno())
f.close()
f.close()
def test_open_name(self):
self.assertRaises(ValueError, ConIO, sys.executable)
f = ConIO("CON")
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertIsNotNone(f.fileno())
f.close() # multiple close should not crash
f.close()
f = ConIO('CONIN$')
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertIsNotNone(f.fileno())
f.close()
f.close()
f = ConIO('CONOUT$', 'w')
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertIsNotNone(f.fileno())
f.close()
f.close()
f = open('C:/con', 'rb', buffering=0)
self.assertIsInstance(f, ConIO)
f.close()
@unittest.skipIf(sys.getwindowsversion()[:2] <= (6, 1),
"test does not work on Windows 7 and earlier")
def test_conin_conout_names(self):
f = open(r'\\.\conin$', 'rb', buffering=0)
self.assertIsInstance(f, ConIO)
f.close()
f = open('//?/conout$', 'wb', buffering=0)
self.assertIsInstance(f, ConIO)
f.close()
def test_conout_path(self):
temp_path = tempfile.mkdtemp()
self.addCleanup(support.rmtree, temp_path)
conout_path = os.path.join(temp_path, 'CONOUT$')
with open(conout_path, 'wb', buffering=0) as f:
if sys.getwindowsversion()[:2] > (6, 1):
self.assertIsInstance(f, ConIO)
else:
self.assertNotIsInstance(f, ConIO)
def test_write_empty_data(self):
with ConIO('CONOUT$', 'w') as f:
self.assertEqual(f.write(b''), 0)
def assertStdinRoundTrip(self, text):
stdin = open('CONIN$', 'r')
old_stdin = sys.stdin
try:
sys.stdin = stdin
write_input(
stdin.buffer.raw,
(text + '\r\n').encode('utf-16-le', 'surrogatepass')
)
actual = input()
finally:
sys.stdin = old_stdin
self.assertEqual(actual, text)
def test_input(self):
# ASCII
self.assertStdinRoundTrip('abc123')
# Non-ASCII
self.assertStdinRoundTrip('ϼўТλФЙ')
# Combining characters
self.assertStdinRoundTrip('A͏B ﬖ̳AA̝')
# Non-BMP
self.assertStdinRoundTrip('\U00100000\U0010ffff\U0010fffd')
def test_partial_reads(self):
# Test that reading less than 1 full character works when stdin
# contains multibyte UTF-8 sequences
source = 'ϼўТλФЙ\r\n'.encode('utf-16-le')
expected = 'ϼўТλФЙ\r\n'.encode('utf-8')
for read_count in range(1, 16):
with open('CONIN$', 'rb', buffering=0) as stdin:
write_input(stdin, source)
actual = b''
while not actual.endswith(b'\n'):
b = stdin.read(read_count)
actual += b
self.assertEqual(actual, expected, 'stdin.read({})'.format(read_count))
def test_partial_surrogate_reads(self):
# Test that reading less than 1 full character works when stdin
# contains surrogate pairs that cannot be decoded to UTF-8 without
# reading an extra character.
source = '\U00101FFF\U00101001\r\n'.encode('utf-16-le')
expected = '\U00101FFF\U00101001\r\n'.encode('utf-8')
for read_count in range(1, 16):
with open('CONIN$', 'rb', buffering=0) as stdin:
write_input(stdin, source)
actual = b''
while not actual.endswith(b'\n'):
b = stdin.read(read_count)
actual += b
self.assertEqual(actual, expected, 'stdin.read({})'.format(read_count))
def test_ctrl_z(self):
with open('CONIN$', 'rb', buffering=0) as stdin:
source = '\xC4\x1A\r\n'.encode('utf-16-le')
expected = '\xC4'.encode('utf-8')
write_input(stdin, source)
a, b = stdin.read(1), stdin.readall()
self.assertEqual(expected[0:1], a)
self.assertEqual(expected[1:], b)
if __name__ == "__main__":
unittest.main()
| 33.283505 | 88 | 0.545919 |
import io
import os
import sys
import tempfile
import unittest
from test import support
if sys.platform != 'win32':
raise unittest.SkipTest("test only relevant on win32")
from _testconsole import write_input
ConIO = io._WindowsConsoleIO
class WindowsConsoleIOTests(unittest.TestCase):
def test_abc(self):
self.assertTrue(issubclass(ConIO, io.RawIOBase))
self.assertFalse(issubclass(ConIO, io.BufferedIOBase))
self.assertFalse(issubclass(ConIO, io.TextIOBase))
def test_open_fd(self):
self.assertRaisesRegex(ValueError,
"negative file descriptor", ConIO, -1)
with tempfile.TemporaryFile() as tmpfile:
fd = tmpfile.fileno()
self.assertRaisesRegex(ValueError,
"Cannot open (console|non-console file)", ConIO, fd)
try:
f = ConIO(0)
except ValueError:
pass
else:
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertEqual(0, f.fileno())
f.close() # multiple close should not crash
f.close()
try:
f = ConIO(1, 'w')
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertEqual(1, f.fileno())
f.close()
f.close()
try:
f = ConIO(2, 'w')
except ValueError:
pass
else:
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertEqual(2, f.fileno())
f.close()
f.close()
def test_open_name(self):
self.assertRaises(ValueError, ConIO, sys.executable)
f = ConIO("CON")
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertIsNotNone(f.fileno())
f.close() # multiple close should not crash
f.close()
f = ConIO('CONIN$')
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertIsNotNone(f.fileno())
f.close()
f.close()
f = ConIO('CONOUT$', 'w')
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertIsNotNone(f.fileno())
f.close()
f.close()
f = open('C:/con', 'rb', buffering=0)
self.assertIsInstance(f, ConIO)
f.close()
@unittest.skipIf(sys.getwindowsversion()[:2] <= (6, 1),
"test does not work on Windows 7 and earlier")
def test_conin_conout_names(self):
f = open(r'\\.\conin$', 'rb', buffering=0)
self.assertIsInstance(f, ConIO)
f.close()
f = open('//?/conout$', 'wb', buffering=0)
self.assertIsInstance(f, ConIO)
f.close()
def test_conout_path(self):
temp_path = tempfile.mkdtemp()
self.addCleanup(support.rmtree, temp_path)
conout_path = os.path.join(temp_path, 'CONOUT$')
with open(conout_path, 'wb', buffering=0) as f:
if sys.getwindowsversion()[:2] > (6, 1):
self.assertIsInstance(f, ConIO)
else:
self.assertNotIsInstance(f, ConIO)
def test_write_empty_data(self):
with ConIO('CONOUT$', 'w') as f:
self.assertEqual(f.write(b''), 0)
def assertStdinRoundTrip(self, text):
stdin = open('CONIN$', 'r')
old_stdin = sys.stdin
try:
sys.stdin = stdin
write_input(
stdin.buffer.raw,
(text + '\r\n').encode('utf-16-le', 'surrogatepass')
)
actual = input()
finally:
sys.stdin = old_stdin
self.assertEqual(actual, text)
def test_input(self):
# ASCII
self.assertStdinRoundTrip('abc123')
# Non-ASCII
self.assertStdinRoundTrip('ϼўТλФЙ')
# Combining characters
self.assertStdinRoundTrip('A͏B ﬖ̳AA̝')
# Non-BMP
self.assertStdinRoundTrip('\U00100000\U0010ffff\U0010fffd')
def test_partial_reads(self):
# Test that reading less than 1 full character works when stdin
# contains multibyte UTF-8 sequences
source = 'ϼўТλФЙ\r\n'.encode('utf-16-le')
expected = 'ϼўТλФЙ\r\n'.encode('utf-8')
for read_count in range(1, 16):
with open('CONIN$', 'rb', buffering=0) as stdin:
write_input(stdin, source)
actual = b''
while not actual.endswith(b'\n'):
b = stdin.read(read_count)
actual += b
self.assertEqual(actual, expected, 'stdin.read({})'.format(read_count))
def test_partial_surrogate_reads(self):
# Test that reading less than 1 full character works when stdin
# contains surrogate pairs that cannot be decoded to UTF-8 without
# reading an extra character.
source = '\U00101FFF\U00101001\r\n'.encode('utf-16-le')
expected = '\U00101FFF\U00101001\r\n'.encode('utf-8')
for read_count in range(1, 16):
with open('CONIN$', 'rb', buffering=0) as stdin:
write_input(stdin, source)
actual = b''
while not actual.endswith(b'\n'):
b = stdin.read(read_count)
actual += b
self.assertEqual(actual, expected, 'stdin.read({})'.format(read_count))
def test_ctrl_z(self):
with open('CONIN$', 'rb', buffering=0) as stdin:
source = '\xC4\x1A\r\n'.encode('utf-16-le')
expected = '\xC4'.encode('utf-8')
write_input(stdin, source)
a, b = stdin.read(1), stdin.readall()
self.assertEqual(expected[0:1], a)
self.assertEqual(expected[1:], b)
if __name__ == "__main__":
unittest.main()
| true | true |
1c368ca4d0bd3f06ada72a008a49083823684fc4 | 241 | py | Python | python programs/inheritence.py | saddam-gif/Python-crushcourse | 63e1e1ff1eeb9a5d34bb0354cc86566c4de60260 | [
"MIT"
] | null | null | null | python programs/inheritence.py | saddam-gif/Python-crushcourse | 63e1e1ff1eeb9a5d34bb0354cc86566c4de60260 | [
"MIT"
] | null | null | null | python programs/inheritence.py | saddam-gif/Python-crushcourse | 63e1e1ff1eeb9a5d34bb0354cc86566c4de60260 | [
"MIT"
] | null | null | null | class phone:
def call(self):
print("call")
def message(self):
print("message")
class samsung(phone):
def photo(self):
print("photo")
s = samsung()
s.call()
s.message()
print(issubclass(phone,samsung)) | 14.176471 | 32 | 0.593361 | class phone:
def call(self):
print("call")
def message(self):
print("message")
class samsung(phone):
def photo(self):
print("photo")
s = samsung()
s.call()
s.message()
print(issubclass(phone,samsung)) | true | true |
1c368cb215bc8be5b20edc91b56a5b158d21a4ed | 4,706 | py | Python | aiocoap/blockwise.py | roysjosh/aiocoap | 1f03d4ceb969b2b443c288c312d44c3b7c3e2031 | [
"MIT"
] | null | null | null | aiocoap/blockwise.py | roysjosh/aiocoap | 1f03d4ceb969b2b443c288c312d44c3b7c3e2031 | [
"MIT"
] | null | null | null | aiocoap/blockwise.py | roysjosh/aiocoap | 1f03d4ceb969b2b443c288c312d44c3b7c3e2031 | [
"MIT"
] | null | null | null | # This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""Helpers for the implementation of RFC7959 blockwise transfers"""
import types
from . import numbers
from .numbers.optionnumbers import OptionNumber
from .error import ConstructionRenderableError
from .message import Message
from .optiontypes import BlockOption
from .util.asyncio.timeoutdict import TimeoutDict
def _extract_block_key(message):
"""Extract a key that hashes equally for all blocks of a blockwise
operation from a request message.
See discussion at <https://mailarchive.ietf.org/arch/msg/core/I-6LzAL6lIUVDA6_g9YM3Zjhg8E>.
"""
return (message.remote.blockwise_key, message.code, message.get_cache_key([
OptionNumber.BLOCK1,
OptionNumber.BLOCK2,
OptionNumber.OBSERVE,
]))
class ContinueException(ConstructionRenderableError):
"""Not an error in the CoAP sense, but an error in the processing sense,
indicating that no complete request message is available for processing.
It reflects back the request's block1 option when rendered.
"""
def __init__(self, block1):
self.block1 = block1
def to_message(self):
m = super().to_message()
m.opt.block1 = self.block1
return m
code = numbers.CONTINUE
class IncompleteException(ConstructionRenderableError):
code = numbers.REQUEST_ENTITY_INCOMPLETE
class Block1Spool:
def __init__(self):
# FIXME: introduce an actual parameter here
self._assemblies = TimeoutDict(numbers.MAX_TRANSMIT_WAIT)
def feed_and_take(self, req: Message) -> Message:
"""Assemble the request into the spool. This either produces a
reassembled request message, or raises either a Continue or a Request
Entity Incomplete exception.
Requests without block1 are simply passed through."""
if req.opt.block1 is None:
return req
block_key = _extract_block_key(req)
if req.opt.block1.block_number == 0:
# silently discarding any old incomplete operation
self._assemblies[block_key] = req
else:
try:
self._assemblies[block_key]._append_request_block(req)
except KeyError:
# KeyError: Received unmatched blockwise response
# ValueError: Failed to assemble -- gaps or overlaps in data
raise IncompleteException from None
if req.opt.block1.more:
raise ContinueException(req.opt.block1)
else:
return self._assemblies[block_key]
# which happens to carry the last block's block1 option
class Block2Cache:
"""A cache of responses to a give block key.
Use this when result rendering is expensive, not idempotent or has varying
output -- otherwise it's often better to calculate the full response again
and serve chunks.
"""
def __init__(self):
# FIXME: introduce an actual parameter here
self._completes = TimeoutDict(numbers.MAX_TRANSMIT_WAIT)
async def extract_or_insert(self, req: Message, response_builder: types.CoroutineType):
"""Given a request message,
* if it is querying a particular block, look it up in the cache or
raise Request Entity Incomplete.
* otherwise,
* await the response builder
* return the response if it doesn't need chunking, or
* return the first chunk and store it for later use
"""
block_key = _extract_block_key(req)
if req.opt.block2 is None or req.opt.block2.block_number == 0:
assembled = await response_builder()
else:
try:
assembled = self._completes[block_key]
except KeyError:
raise IncompleteException from None
if len(assembled.payload) > req.remote.maximum_payload_size or \
req.opt.block2 is not None and len(assembled.payload) > req.opt.block2.size:
self._completes[block_key] = assembled
block2 = req.opt.block2 or \
BlockOption.BlockwiseTuple(0, 0, req.remote.maximum_block_size_exp)
return assembled._extract_block(
block2.block_number,
block2.size_exponent,
req.remote.maximum_payload_size
)
else:
return assembled
| 35.923664 | 95 | 0.665108 |
import types
from . import numbers
from .numbers.optionnumbers import OptionNumber
from .error import ConstructionRenderableError
from .message import Message
from .optiontypes import BlockOption
from .util.asyncio.timeoutdict import TimeoutDict
def _extract_block_key(message):
return (message.remote.blockwise_key, message.code, message.get_cache_key([
OptionNumber.BLOCK1,
OptionNumber.BLOCK2,
OptionNumber.OBSERVE,
]))
class ContinueException(ConstructionRenderableError):
def __init__(self, block1):
self.block1 = block1
def to_message(self):
m = super().to_message()
m.opt.block1 = self.block1
return m
code = numbers.CONTINUE
class IncompleteException(ConstructionRenderableError):
code = numbers.REQUEST_ENTITY_INCOMPLETE
class Block1Spool:
def __init__(self):
self._assemblies = TimeoutDict(numbers.MAX_TRANSMIT_WAIT)
def feed_and_take(self, req: Message) -> Message:
if req.opt.block1 is None:
return req
block_key = _extract_block_key(req)
if req.opt.block1.block_number == 0:
self._assemblies[block_key] = req
else:
try:
self._assemblies[block_key]._append_request_block(req)
except KeyError:
raise IncompleteException from None
if req.opt.block1.more:
raise ContinueException(req.opt.block1)
else:
return self._assemblies[block_key]
class Block2Cache:
def __init__(self):
# FIXME: introduce an actual parameter here
self._completes = TimeoutDict(numbers.MAX_TRANSMIT_WAIT)
async def extract_or_insert(self, req: Message, response_builder: types.CoroutineType):
block_key = _extract_block_key(req)
if req.opt.block2 is None or req.opt.block2.block_number == 0:
assembled = await response_builder()
else:
try:
assembled = self._completes[block_key]
except KeyError:
raise IncompleteException from None
if len(assembled.payload) > req.remote.maximum_payload_size or \
req.opt.block2 is not None and len(assembled.payload) > req.opt.block2.size:
self._completes[block_key] = assembled
block2 = req.opt.block2 or \
BlockOption.BlockwiseTuple(0, 0, req.remote.maximum_block_size_exp)
return assembled._extract_block(
block2.block_number,
block2.size_exponent,
req.remote.maximum_payload_size
)
else:
return assembled
| true | true |
1c368d032835f38e783bd66da50df1e94cf9b3d7 | 756 | py | Python | ooobuild/csslo/setup/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/csslo/setup/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/csslo/setup/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...lo.setup.update_check import UpdateCheck as UpdateCheck
from ...lo.setup.update_check_config import UpdateCheckConfig as UpdateCheckConfig
| 39.789474 | 82 | 0.771164 |
from ...lo.setup.update_check import UpdateCheck as UpdateCheck
from ...lo.setup.update_check_config import UpdateCheckConfig as UpdateCheckConfig
| true | true |
1c368d03d7d6d609c75fcb71a12a42d3295ba672 | 1,747 | py | Python | kolga/plugins/slack/messages.py | Hi-Fi/kolga | 821169fc24eb7e3883e6b4993ac75117a0c59766 | [
"MIT"
] | 7 | 2020-04-24T13:47:21.000Z | 2022-03-09T05:17:59.000Z | kolga/plugins/slack/messages.py | Hi-Fi/kolga | 821169fc24eb7e3883e6b4993ac75117a0c59766 | [
"MIT"
] | 16 | 2021-01-19T13:18:27.000Z | 2022-02-01T13:50:14.000Z | kolga/plugins/slack/messages.py | Hi-Fi/kolga | 821169fc24eb7e3883e6b4993ac75117a0c59766 | [
"MIT"
] | 6 | 2020-08-17T08:56:56.000Z | 2021-05-29T07:34:47.000Z | from typing import TYPE_CHECKING, Any, List, TypedDict
if TYPE_CHECKING:
from kolga.libs.project import Project
class _SlackMessageField(TypedDict, total=False):
type: str
text: str
class _SlackMessageBody(TypedDict, total=False):
type: str
fields: List[_SlackMessageField]
def new_environment_message(environment_track: str, project: "Project") -> List[Any]:
# Import settings in function to not have circular imports
from kolga.settings import settings
message: List[Any] = []
title_section = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*New {environment_track} deployment for {project.verbose_name}*",
},
}
body_section: _SlackMessageBody = {"type": "section", "fields": []}
if project.url:
body_section["fields"].append(
{"type": "mrkdwn", "text": f"*:link: URL:*\n <{project.url}|Link>"}
)
if settings.PR_URL and settings.PR_TITLE:
body_section["fields"].append(
{
"type": "mrkdwn",
"text": f"*:pick: Pull/Merge Request:*\n <{settings.PR_URL}|{settings.PR_TITLE}>",
}
)
if settings.JOB_ACTOR:
body_section["fields"].append(
{
"type": "mrkdwn",
"text": f"*:bust_in_silhouette: Pipeline creator*\n{settings.JOB_ACTOR}",
}
)
if settings.PR_ASSIGNEES:
body_section["fields"].append(
{
"type": "mrkdwn",
"text": f"*:busts_in_silhouette: Reviewers:*\n{settings.PR_ASSIGNEES}",
}
)
message.append(title_section)
message.append(body_section)
return message
| 27.296875 | 98 | 0.576417 | from typing import TYPE_CHECKING, Any, List, TypedDict
if TYPE_CHECKING:
from kolga.libs.project import Project
class _SlackMessageField(TypedDict, total=False):
type: str
text: str
class _SlackMessageBody(TypedDict, total=False):
type: str
fields: List[_SlackMessageField]
def new_environment_message(environment_track: str, project: "Project") -> List[Any]:
from kolga.settings import settings
message: List[Any] = []
title_section = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*New {environment_track} deployment for {project.verbose_name}*",
},
}
body_section: _SlackMessageBody = {"type": "section", "fields": []}
if project.url:
body_section["fields"].append(
{"type": "mrkdwn", "text": f"*:link: URL:*\n <{project.url}|Link>"}
)
if settings.PR_URL and settings.PR_TITLE:
body_section["fields"].append(
{
"type": "mrkdwn",
"text": f"*:pick: Pull/Merge Request:*\n <{settings.PR_URL}|{settings.PR_TITLE}>",
}
)
if settings.JOB_ACTOR:
body_section["fields"].append(
{
"type": "mrkdwn",
"text": f"*:bust_in_silhouette: Pipeline creator*\n{settings.JOB_ACTOR}",
}
)
if settings.PR_ASSIGNEES:
body_section["fields"].append(
{
"type": "mrkdwn",
"text": f"*:busts_in_silhouette: Reviewers:*\n{settings.PR_ASSIGNEES}",
}
)
message.append(title_section)
message.append(body_section)
return message
| true | true |
1c368d2ea1c7c053c9d189524d8dd68e73f02c99 | 6,622 | py | Python | tests/test_utils_gsheets.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | 18 | 2018-02-05T20:03:30.000Z | 2022-01-19T00:58:00.000Z | tests/test_utils_gsheets.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | 31 | 2018-01-19T14:21:03.000Z | 2021-09-07T02:04:40.000Z | tests/test_utils_gsheets.py | harvard-dce/zoom-ingester | ef5c8910e35e84d33f1c612cfb4643309c52040d | [
"Apache-2.0"
] | 5 | 2019-12-04T17:44:10.000Z | 2021-02-23T03:14:24.000Z | import site
from os.path import dirname, join
import pytest
site.addsitedir(join(dirname(dirname(__file__)), "functions"))
from utils import gsheets
def test_schedule_successful_parsing(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
schedule1_expected = {
"0123456789": {
"course_code": "BIOS E-18A",
"events": [{"day": "T", "time": "20:10", "title": "Section"}],
"opencast_series_id": "20210112345",
"zoom_series_id": "0123456789",
},
"9876543210": {
"course_code": "BIOS E-18B",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
},
"555555555": {
"course_code": "CSCI E-61",
"events": [
{"day": "T", "time": "9:15", "title": "Lecture"},
{"day": "W", "time": "12:00", "title": "Staff Meeting"},
{"day": "R", "time": "20:00", "title": "Section"},
{"day": "F", "time": "13:15", "title": "Lecture"},
],
"opencast_series_id": "20210161616",
"zoom_series_id": "555555555",
},
"876876876": {
"course_code": "ECON E-10A",
"events": [
{"day": "M", "time": "11:00", "title": "Lecture"},
{"day": "T", "time": "11:00", "title": "Lecture"},
{"day": "R", "time": "11:00", "title": "Lecture"},
],
"opencast_series_id": "20210187678",
"zoom_series_id": "876876876",
},
"200200200": {
"course_code": "ECON E-20A",
"events": [
{"day": "T", "time": "13:00", "title": "Lecture"},
{"day": "R", "time": "13:00", "title": "Lecture"},
{"day": "F", "time": "13:00", "title": "Lecture"},
],
"opencast_series_id": "20210120020",
"zoom_series_id": "200200200",
},
"123123123": {
"course_code": "MATH E-55",
"events": [
{"day": "T", "time": "14:30", "title": "Lecture"},
{"day": "R", "time": "14:30", "title": "Lecture"},
],
"opencast_series_id": "20210155155",
"zoom_series_id": "123123123",
},
"100100100": {
"course_code": "STAT S-100",
"events": [
{"day": "S", "time": "18:30", "title": "Section"},
{"day": "U", "time": "18:30", "title": "Section"},
],
"opencast_series_id": "20210110010",
"zoom_series_id": "100100100",
},
}
# pass something into schedule csv
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule1.csv"
)
mock_json_to_dynamo.assert_called_with(
mock_table_name, schedule_data=schedule1_expected
)
def test_schedule_missing_req_field(mocker):
with pytest.raises(Exception, match="Missing required field"):
gsheets.schedule_csv_to_dynamo(
"mock_table_name", "tests/input/schedule2.csv"
)
def test_schedule_missing_zoom_link(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
# 2 meetings but 1 does not have a Zoom link so only 1 meeting added
schedule3_expected = {
"9876543210": {
"course_code": "TEST E-2",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule3.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule3_expected
)
def test_schedule_invalid_zoom_link(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
# 2 meetings but 1 has an invalid Zoom link so only 1 meeting added
schedule4_expected = {
"9876543210": {
"course_code": "TEST E-2",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule4.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule4_expected
)
def test_schedule_missing_oc_series(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
# 2 meetings but 1 has no opencast series id so only 1 meeting added
schedule5_expected = {
"9876543210": {
"course_code": "TEST E-2",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule5.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule5_expected
)
def test_schedule_invalid_time(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
# 2 meetings but 1 has an invalid time so only 1 meeting added
schedule6_expected = {
"9876543210": {
"course_code": "TEST E-1",
"events": [{"day": "T", "time": "20:10", "title": "Section"}],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule6.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule6_expected
)
| 32.782178 | 74 | 0.53579 | import site
from os.path import dirname, join
import pytest
site.addsitedir(join(dirname(dirname(__file__)), "functions"))
from utils import gsheets
def test_schedule_successful_parsing(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
schedule1_expected = {
"0123456789": {
"course_code": "BIOS E-18A",
"events": [{"day": "T", "time": "20:10", "title": "Section"}],
"opencast_series_id": "20210112345",
"zoom_series_id": "0123456789",
},
"9876543210": {
"course_code": "BIOS E-18B",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
},
"555555555": {
"course_code": "CSCI E-61",
"events": [
{"day": "T", "time": "9:15", "title": "Lecture"},
{"day": "W", "time": "12:00", "title": "Staff Meeting"},
{"day": "R", "time": "20:00", "title": "Section"},
{"day": "F", "time": "13:15", "title": "Lecture"},
],
"opencast_series_id": "20210161616",
"zoom_series_id": "555555555",
},
"876876876": {
"course_code": "ECON E-10A",
"events": [
{"day": "M", "time": "11:00", "title": "Lecture"},
{"day": "T", "time": "11:00", "title": "Lecture"},
{"day": "R", "time": "11:00", "title": "Lecture"},
],
"opencast_series_id": "20210187678",
"zoom_series_id": "876876876",
},
"200200200": {
"course_code": "ECON E-20A",
"events": [
{"day": "T", "time": "13:00", "title": "Lecture"},
{"day": "R", "time": "13:00", "title": "Lecture"},
{"day": "F", "time": "13:00", "title": "Lecture"},
],
"opencast_series_id": "20210120020",
"zoom_series_id": "200200200",
},
"123123123": {
"course_code": "MATH E-55",
"events": [
{"day": "T", "time": "14:30", "title": "Lecture"},
{"day": "R", "time": "14:30", "title": "Lecture"},
],
"opencast_series_id": "20210155155",
"zoom_series_id": "123123123",
},
"100100100": {
"course_code": "STAT S-100",
"events": [
{"day": "S", "time": "18:30", "title": "Section"},
{"day": "U", "time": "18:30", "title": "Section"},
],
"opencast_series_id": "20210110010",
"zoom_series_id": "100100100",
},
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule1.csv"
)
mock_json_to_dynamo.assert_called_with(
mock_table_name, schedule_data=schedule1_expected
)
def test_schedule_missing_req_field(mocker):
with pytest.raises(Exception, match="Missing required field"):
gsheets.schedule_csv_to_dynamo(
"mock_table_name", "tests/input/schedule2.csv"
)
def test_schedule_missing_zoom_link(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
schedule3_expected = {
"9876543210": {
"course_code": "TEST E-2",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule3.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule3_expected
)
def test_schedule_invalid_zoom_link(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
schedule4_expected = {
"9876543210": {
"course_code": "TEST E-2",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule4.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule4_expected
)
def test_schedule_missing_oc_series(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
schedule5_expected = {
"9876543210": {
"course_code": "TEST E-2",
"events": [
{"day": "M", "time": "19:40", "title": "Lecture"},
{"day": "W", "time": "19:40", "title": "Lecture"},
],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule5.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule5_expected
)
def test_schedule_invalid_time(mocker):
mock_table_name = "mock_table_name"
mock_json_to_dynamo = mocker.patch.object(
gsheets, "schedule_json_to_dynamo"
)
schedule6_expected = {
"9876543210": {
"course_code": "TEST E-1",
"events": [{"day": "T", "time": "20:10", "title": "Section"}],
"opencast_series_id": "20210155555",
"zoom_series_id": "9876543210",
}
}
gsheets.schedule_csv_to_dynamo(
mock_table_name, "tests/input/schedule6.csv"
)
mock_json_to_dynamo.assert_called_with(
"mock_table_name", schedule_data=schedule6_expected
)
| true | true |
1c368d4d5cea0ebbc7c219f6ca749505321bbbb3 | 1,402 | py | Python | notifications/views.py | thulasi-ram/logistika | a9a7b649f0e15bf8cdad43fdab2a8bd61326f83d | [
"MIT"
] | null | null | null | notifications/views.py | thulasi-ram/logistika | a9a7b649f0e15bf8cdad43fdab2a8bd61326f83d | [
"MIT"
] | null | null | null | notifications/views.py | thulasi-ram/logistika | a9a7b649f0e15bf8cdad43fdab2a8bd61326f83d | [
"MIT"
] | null | null | null | from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import PageNotAnInteger, Paginator, EmptyPage
from django.shortcuts import render
# Create your views here.
from django.template.response import TemplateResponse
from django.views.generic import TemplateView
from itertools import chain
from notifications.models import Notifications
class NotificationsFeed(LoginRequiredMixin, TemplateView):
template_name = 'notifications/notifications.html'
def get(self, request, *args, **kwargs):
notifs = Notifications.objects.filter(user=request.user).order_by('-created_at')
notifs.filter(status=Notifications.UNREAD,
created_at__lte=request.user.profile.last_read_notif_time).update(status=Notifications.READ)
request.user.profile.last_read_notif_time = datetime.now()
request.user.profile.save()
page = request.GET.get('page')
items_per_page = request.META.get('items_per_page', 10)
paginator = Paginator(notifs, items_per_page)
try:
clients = paginator.page(page)
except PageNotAnInteger:
clients = paginator.page(1)
except EmptyPage:
clients = paginator.page(paginator.num_pages)
page = clients.number
return TemplateResponse(request, self.template_name, {'notifs': notifs})
| 40.057143 | 114 | 0.728959 | from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import PageNotAnInteger, Paginator, EmptyPage
from django.shortcuts import render
from django.template.response import TemplateResponse
from django.views.generic import TemplateView
from itertools import chain
from notifications.models import Notifications
class NotificationsFeed(LoginRequiredMixin, TemplateView):
template_name = 'notifications/notifications.html'
def get(self, request, *args, **kwargs):
notifs = Notifications.objects.filter(user=request.user).order_by('-created_at')
notifs.filter(status=Notifications.UNREAD,
created_at__lte=request.user.profile.last_read_notif_time).update(status=Notifications.READ)
request.user.profile.last_read_notif_time = datetime.now()
request.user.profile.save()
page = request.GET.get('page')
items_per_page = request.META.get('items_per_page', 10)
paginator = Paginator(notifs, items_per_page)
try:
clients = paginator.page(page)
except PageNotAnInteger:
clients = paginator.page(1)
except EmptyPage:
clients = paginator.page(paginator.num_pages)
page = clients.number
return TemplateResponse(request, self.template_name, {'notifs': notifs})
| true | true |
1c368d84132844b5c51d4fdecc4d5f87324927af | 5,054 | py | Python | libtrust/rsa_key.py | tomwei7/libtrust-py3 | b1d71eee57b95621b5111cebd3c44751442740c5 | [
"Apache-2.0"
] | 1 | 2020-03-26T13:17:10.000Z | 2020-03-26T13:17:10.000Z | libtrust/rsa_key.py | tomwei7/libtrust-py3 | b1d71eee57b95621b5111cebd3c44751442740c5 | [
"Apache-2.0"
] | null | null | null | libtrust/rsa_key.py | tomwei7/libtrust-py3 | b1d71eee57b95621b5111cebd3c44751442740c5 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import copy
from cryptography import utils as cry_utils
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from . import hash as hash_
from . import key
from . import util
__all__ = ['RSAPublicKey', 'RSAPrivateKey']
PublicKey = key.PublicKey
PrivateKey = key.PrivateKey
class RSAKey(object):
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
self.numbers = self._numbers()
def _numbers(self):
raise NotImplementedError()
def key_type(self):
return 'RSA'
def key_id(self):
der_bytes = self.crypto_public_key().public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
hasher = hashes.Hash(hashes.SHA256(), default_backend())
hasher.update(der_bytes)
hash_bytes = hasher.finalize()
return util.key_id_encode(hash_bytes[:30])
def __eq__(self, other):
return self.crypto_public_key().public_numbers() == other.crypto_public_key().public_numbers()
class RSAPublicKey(RSAKey, PublicKey):
def __init__(self, public_key):
self.key = public_key
def _numbers(self):
return self.key.public_numbers()
@classmethod
def from_pem(cls, key_data):
public_key = serialization.load_pem_public_key(key_data, default_backend())
return cls(public_key)
def pem_block(self):
return self._key.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def crypto_public_key(self):
return copy.copy(self._key)
def to_map(self):
return {
'kty': self.key_type(),
'kid': self.key_id(),
'n': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.n)),
'e': util.jose_base64_url_encode(util.serialize_rsa_public_exponent_param(self.numbers.e))
}
def verify(self, buffer, alg, signature):
sig_alg = hash_.rsa_signature_algorithm_by_name(alg)
verifier = self.key.verifier(
signature,
padding.PKCS1v15(),
sig_alg.hasher()
)
while True:
d = buffer.read(1024)
if not d:
break
verifier.update(d)
try:
verifier.verify()
except Exception as e:
raise e
return True
class RSAPrivateKey(RSAKey, PrivateKey):
def __init__(self, private_key):
self.key = private_key
def _numbers(self):
return self.key.private_numbers()
@classmethod
def from_pem(cls, key_data, passphrase=None):
private_key = serialization.load_pem_private_key(
key_data,
passphrase,
default_backend()
)
return cls(private_key)
def pem_block(self):
return self.key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
def public_key(self):
return RSAPublicKey(self.crypto_public_key())
def crypto_private_key(self):
return copy.copy(self._key)
def crypto_public_key(self):
return self._key.public_key()
def to_map(self):
public_key_map = self.public_key().to_map()
private_key_map = {
'd': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.d)),
'p': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.p)),
'q': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.q)),
'dp': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.dmp1)),
'dq': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.dmq1)),
'qi': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.iqmp)),
}
private_key_map.update(public_key_map)
return private_key_map
def sign(self, buffer, hash_id):
sig_alg = hash_.rsa_pkcs1v15_signature_algorithm_for_hash_id(hash_id)
signer = self.key.signer(
padding.PKCS1v15(),
sig_alg.hasher()
)
while True:
d = buffer.read(1024)
if not d:
break
signer.update(d)
return signer.finalize(), sig_alg.header_param()
def rsa_public_key_from_map(jwk):
nb64url = jwk['n']
eb64url = jwk['e']
n = util.parse_rsa_modules_params(nb64url)
e = util.parse_rsa_public_exponent_param(eb64url)
public_key = rsa.RSAPublicNumbers(e, n).public_key(default_backend())
return RSAPublicKey(public_key)
| 29.729412 | 102 | 0.651959 | from __future__ import unicode_literals
import copy
from cryptography import utils as cry_utils
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from . import hash as hash_
from . import key
from . import util
__all__ = ['RSAPublicKey', 'RSAPrivateKey']
PublicKey = key.PublicKey
PrivateKey = key.PrivateKey
class RSAKey(object):
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
self.numbers = self._numbers()
def _numbers(self):
raise NotImplementedError()
def key_type(self):
return 'RSA'
def key_id(self):
der_bytes = self.crypto_public_key().public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
hasher = hashes.Hash(hashes.SHA256(), default_backend())
hasher.update(der_bytes)
hash_bytes = hasher.finalize()
return util.key_id_encode(hash_bytes[:30])
def __eq__(self, other):
return self.crypto_public_key().public_numbers() == other.crypto_public_key().public_numbers()
class RSAPublicKey(RSAKey, PublicKey):
def __init__(self, public_key):
self.key = public_key
def _numbers(self):
return self.key.public_numbers()
@classmethod
def from_pem(cls, key_data):
public_key = serialization.load_pem_public_key(key_data, default_backend())
return cls(public_key)
def pem_block(self):
return self._key.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def crypto_public_key(self):
return copy.copy(self._key)
def to_map(self):
return {
'kty': self.key_type(),
'kid': self.key_id(),
'n': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.n)),
'e': util.jose_base64_url_encode(util.serialize_rsa_public_exponent_param(self.numbers.e))
}
def verify(self, buffer, alg, signature):
sig_alg = hash_.rsa_signature_algorithm_by_name(alg)
verifier = self.key.verifier(
signature,
padding.PKCS1v15(),
sig_alg.hasher()
)
while True:
d = buffer.read(1024)
if not d:
break
verifier.update(d)
try:
verifier.verify()
except Exception as e:
raise e
return True
class RSAPrivateKey(RSAKey, PrivateKey):
def __init__(self, private_key):
self.key = private_key
def _numbers(self):
return self.key.private_numbers()
@classmethod
def from_pem(cls, key_data, passphrase=None):
private_key = serialization.load_pem_private_key(
key_data,
passphrase,
default_backend()
)
return cls(private_key)
def pem_block(self):
return self.key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
def public_key(self):
return RSAPublicKey(self.crypto_public_key())
def crypto_private_key(self):
return copy.copy(self._key)
def crypto_public_key(self):
return self._key.public_key()
def to_map(self):
public_key_map = self.public_key().to_map()
private_key_map = {
'd': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.d)),
'p': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.p)),
'q': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.q)),
'dp': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.dmp1)),
'dq': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.dmq1)),
'qi': util.jose_base64_url_encode(cry_utils.int_to_bytes(self.numbers.iqmp)),
}
private_key_map.update(public_key_map)
return private_key_map
def sign(self, buffer, hash_id):
sig_alg = hash_.rsa_pkcs1v15_signature_algorithm_for_hash_id(hash_id)
signer = self.key.signer(
padding.PKCS1v15(),
sig_alg.hasher()
)
while True:
d = buffer.read(1024)
if not d:
break
signer.update(d)
return signer.finalize(), sig_alg.header_param()
def rsa_public_key_from_map(jwk):
nb64url = jwk['n']
eb64url = jwk['e']
n = util.parse_rsa_modules_params(nb64url)
e = util.parse_rsa_public_exponent_param(eb64url)
public_key = rsa.RSAPublicNumbers(e, n).public_key(default_backend())
return RSAPublicKey(public_key)
| true | true |
1c368e2412241e3f97b6cddb30678921750921d1 | 995 | py | Python | Windows - Python 3.9, 3.6/src/explosion.py | Python-Coderss/Attack-Of-The-Robots-Compiled | 5da1b9a7d8531879cf3c3061c2d2d5abde2586e8 | [
"Apache-2.0"
] | 1 | 2022-03-08T01:24:32.000Z | 2022-03-08T01:24:32.000Z | Windows - Python 3.9, 3.6/src/explosion.py | Python-Coderss/Attack-Of-The-Robots-Compiled | 5da1b9a7d8531879cf3c3061c2d2d5abde2586e8 | [
"Apache-2.0"
] | null | null | null | Windows - Python 3.9, 3.6/src/explosion.py | Python-Coderss/Attack-Of-The-Robots-Compiled | 5da1b9a7d8531879cf3c3061c2d2d5abde2586e8 | [
"Apache-2.0"
] | null | null | null | import pygame
class Explosion(pygame.sprite.Sprite):
def __init__(self, screen, x, y, images, duration, damage, damage_player):
pygame.sprite.Sprite.__init__(self, self.containers)
self.screen = screen
self.x = x
self.y = y
self.images = images
self.duration = duration
self.damage = damage
self.damage_player = damage_player
self.rect = self.images[0].get_rect()
self.rect.center = (self.x, self.y)
self.a_timer = duration
self.frame_to_draw = 0
self.last_frame = len(self.images) - 1
self.animation_timer = self.duration
def update(self):
self.animation_timer -= 1
if self.animation_timer <= 0:
if self.frame_to_draw < self.last_frame:
self.frame_to_draw += 1
self.animation_timer = self.duration
else:
self.kill()
self.screen.blit(self.images[self.frame_to_draw], self.rect)
| 35.535714 | 78 | 0.60201 | import pygame
class Explosion(pygame.sprite.Sprite):
def __init__(self, screen, x, y, images, duration, damage, damage_player):
pygame.sprite.Sprite.__init__(self, self.containers)
self.screen = screen
self.x = x
self.y = y
self.images = images
self.duration = duration
self.damage = damage
self.damage_player = damage_player
self.rect = self.images[0].get_rect()
self.rect.center = (self.x, self.y)
self.a_timer = duration
self.frame_to_draw = 0
self.last_frame = len(self.images) - 1
self.animation_timer = self.duration
def update(self):
self.animation_timer -= 1
if self.animation_timer <= 0:
if self.frame_to_draw < self.last_frame:
self.frame_to_draw += 1
self.animation_timer = self.duration
else:
self.kill()
self.screen.blit(self.images[self.frame_to_draw], self.rect)
| true | true |
1c368e62a56e684bcef585a3442b87c6538625d1 | 1,156 | py | Python | nicos_mlz/jcnsse/setups/jhg1.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/jcnsse/setups/jhg1.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_mlz/jcnsse/setups/jhg1.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'JCNS humidity generator'
group = 'plugplay'
tango_base = 'tango://%s:10000/box/' % setupname
devices = {
'%s_flowrate' % setupname: device('nicos.devices.tango.WindowTimeoutAO',
description = 'Flow rate through humidity cell',
tangodevice = tango_base + 'mhg/flowrate',
fmtstr = '%.1f',
unit = 'ml',
timeout = 600.0,
precision = 0.2,
),
'%s_humidity' % setupname: device('nicos.devices.tango.WindowTimeoutAO',
description = 'Humidity in humidity cell',
tangodevice = tango_base + 'mhg/humidity',
fmtstr = '%.1f',
unit = '%rH',
timeout = 600.0,
precision = 1,
),
'T_%s_cell' % setupname: device('nicos.devices.tango.Sensor',
description = 'Temperature in humidity cell',
tangodevice = tango_base + 'mhg/temperature',
fmtstr = '%.1f',
unit = 'degC',
),
'%s_standby' % setupname: device('nicos.devices.tango.NamedDigitalOutput',
description = 'Switches standby mode on and off',
tangodevice = tango_base + 'mhg/standby',
mapping = {'on': 1, 'off': 0},
),
}
| 32.111111 | 78 | 0.588235 | description = 'JCNS humidity generator'
group = 'plugplay'
tango_base = 'tango://%s:10000/box/' % setupname
devices = {
'%s_flowrate' % setupname: device('nicos.devices.tango.WindowTimeoutAO',
description = 'Flow rate through humidity cell',
tangodevice = tango_base + 'mhg/flowrate',
fmtstr = '%.1f',
unit = 'ml',
timeout = 600.0,
precision = 0.2,
),
'%s_humidity' % setupname: device('nicos.devices.tango.WindowTimeoutAO',
description = 'Humidity in humidity cell',
tangodevice = tango_base + 'mhg/humidity',
fmtstr = '%.1f',
unit = '%rH',
timeout = 600.0,
precision = 1,
),
'T_%s_cell' % setupname: device('nicos.devices.tango.Sensor',
description = 'Temperature in humidity cell',
tangodevice = tango_base + 'mhg/temperature',
fmtstr = '%.1f',
unit = 'degC',
),
'%s_standby' % setupname: device('nicos.devices.tango.NamedDigitalOutput',
description = 'Switches standby mode on and off',
tangodevice = tango_base + 'mhg/standby',
mapping = {'on': 1, 'off': 0},
),
}
| true | true |
1c368e99b4da3894ecbd4955cf1c4f25f7cd2607 | 3,367 | py | Python | pyhpecw7/features/l2vpn.py | HPENetworking/hpe-cw7-ansible | a7569b1dd21ad38a53d825eb4d4b2caf8ff6ea16 | [
"Apache-2.0"
] | 4 | 2022-01-10T21:02:00.000Z | 2022-03-09T03:05:22.000Z | pyhpecw7/features/l2vpn.py | flycoolman/hpe-cw7-ansible | a7569b1dd21ad38a53d825eb4d4b2caf8ff6ea16 | [
"Apache-2.0"
] | null | null | null | pyhpecw7/features/l2vpn.py | flycoolman/hpe-cw7-ansible | a7569b1dd21ad38a53d825eb4d4b2caf8ff6ea16 | [
"Apache-2.0"
] | 2 | 2022-01-10T21:03:07.000Z | 2022-01-20T09:11:44.000Z | """Manage L2VPN on HPCOM7 devices.
"""
from pyhpecw7.utils.xml.lib import *
class L2VPN(object):
"""Enable/Disable L2VPN globally on a HP Comware 7 switch.
Args:
device (HPCOM7): connected instance of a ``pyhpecw7.comware.HPCOM7``
object.
Attributes:
device (HPCOM7): connected instance of a ``pyhpecw7.comware.HPCOM7``
object.
"""
def __init__(self, device):
self.device = device
def get_config(self):
"""Get current L2VPN global configuration state.
"""
KEYMAP = {
'enable': 'Enable',
'vsi_supported': 'SupportVsiInterface'
}
VALUE_MAP = {
'Enable': {
'true': 'enabled',
'false': 'disabled'
}
}
E = data_element_maker()
top = E.top(
E.L2VPN(
E.Base()
)
)
nc_get_reply = self.device.get(('subtree', top))
return_l2vpn = data_elem_to_dict(nc_get_reply.data_ele, KEYMAP, value_map=VALUE_MAP)
return return_l2vpn.get('enable')
def enable(self, stage=False):
"""Stage or execute a config object to enable L2VPN
Args:
stage (bool): whether to stage the commands or execute
immediately
Returns:
True if stage=True and successfully staged
etree.Element XML response if immediate execution
"""
config = self._build_config(state='enabled')
if stage:
return self.device.stage_config(config, 'edit_config')
else:
return self.device.edit_config(config)
def disable(self, stage=False):
"""Stage or execute a config object to disable L2VPN
Args:
stage (bool): whether to stage the commands or execute
immediately
Returns:
True if stage=True and successfully staged
etree.Element XML response if immediate execution
"""
config = self._build_config(state='disabled')
if stage:
return self.device.stage_config(config, 'edit_config')
else:
return self.device.edit_config(config)
def _build_config(self, state):
"""Build config object to configure L2VPN global features
Args:
state (str): must be "enabled" or "disabled" and is the desired
state of the L2VPN global feature
Returns:
etree.Element config object to configure L2VPN global features
"""
if state == 'enabled':
value = 'true'
elif state == 'disabled':
value = 'false'
EN = nc_element_maker()
EC = config_element_maker()
config = EN.config(
EC.top(
EC.L2VPN(
EC.Base(
EC.Enable(value),
**operation_kwarg('merge')
)
)
)
)
return config
def config(self,stage=False):
commands = []
commands.append('l2vpn enable')
if commands:
commands.append('\n')
if stage:
self.device.stage_config(commands, 'cli_config')
else:
self.device.cli_config(commands) | 27.373984 | 92 | 0.536086 |
from pyhpecw7.utils.xml.lib import *
class L2VPN(object):
def __init__(self, device):
self.device = device
def get_config(self):
KEYMAP = {
'enable': 'Enable',
'vsi_supported': 'SupportVsiInterface'
}
VALUE_MAP = {
'Enable': {
'true': 'enabled',
'false': 'disabled'
}
}
E = data_element_maker()
top = E.top(
E.L2VPN(
E.Base()
)
)
nc_get_reply = self.device.get(('subtree', top))
return_l2vpn = data_elem_to_dict(nc_get_reply.data_ele, KEYMAP, value_map=VALUE_MAP)
return return_l2vpn.get('enable')
def enable(self, stage=False):
config = self._build_config(state='enabled')
if stage:
return self.device.stage_config(config, 'edit_config')
else:
return self.device.edit_config(config)
def disable(self, stage=False):
config = self._build_config(state='disabled')
if stage:
return self.device.stage_config(config, 'edit_config')
else:
return self.device.edit_config(config)
def _build_config(self, state):
if state == 'enabled':
value = 'true'
elif state == 'disabled':
value = 'false'
EN = nc_element_maker()
EC = config_element_maker()
config = EN.config(
EC.top(
EC.L2VPN(
EC.Base(
EC.Enable(value),
**operation_kwarg('merge')
)
)
)
)
return config
def config(self,stage=False):
commands = []
commands.append('l2vpn enable')
if commands:
commands.append('\n')
if stage:
self.device.stage_config(commands, 'cli_config')
else:
self.device.cli_config(commands) | true | true |
1c368f7505cae4e893045856ca51c6b53254b34a | 11,899 | py | Python | lib/urlwatch/command.py | RXWatcher1/urlwatch | c171851693206ce1df4d9c47327d38996ed063a6 | [
"BSD-3-Clause"
] | null | null | null | lib/urlwatch/command.py | RXWatcher1/urlwatch | c171851693206ce1df4d9c47327d38996ed063a6 | [
"BSD-3-Clause"
] | null | null | null | lib/urlwatch/command.py | RXWatcher1/urlwatch | c171851693206ce1df4d9c47327d38996ed063a6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of urlwatch (https://thp.io/2008/urlwatch/).
# Copyright (c) 2008-2019 Thomas Perl <m@thp.io>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import logging
import os
import shutil
import sys
import requests
from .filters import FilterBase
from .handler import JobState
from .jobs import JobBase, UrlJob
from .reporters import ReporterBase
from .util import atomic_rename, edit_file
from .mailer import set_password, have_password
logger = logging.getLogger(__name__)
class UrlwatchCommand:
def __init__(self, urlwatcher):
self.urlwatcher = urlwatcher
self.urlwatch_config = urlwatcher.urlwatch_config
def edit_hooks(self):
fn_base, fn_ext = os.path.splitext(self.urlwatch_config.hooks)
hooks_edit = fn_base + '.edit' + fn_ext
try:
if os.path.exists(self.urlwatch_config.hooks):
shutil.copy(self.urlwatch_config.hooks, hooks_edit)
elif self.urlwatch_config.hooks_py_example is not None and os.path.exists(
self.urlwatch_config.hooks_py_example):
shutil.copy(self.urlwatch_config.hooks_py_example, hooks_edit)
edit_file(hooks_edit)
imp.load_source('hooks', hooks_edit)
atomic_rename(hooks_edit, self.urlwatch_config.hooks)
print('Saving edit changes in', self.urlwatch_config.hooks)
except SystemExit:
raise
except Exception as e:
print('Parsing failed:')
print('======')
print(e)
print('======')
print('')
print('The file', self.urlwatch_config.hooks, 'was NOT updated.')
print('Your changes have been saved in', hooks_edit)
return 1
return 0
def show_features(self):
print()
print('Supported jobs:\n')
print(JobBase.job_documentation())
print('Supported filters:\n')
print(FilterBase.filter_documentation())
print()
print('Supported reporters:\n')
print(ReporterBase.reporter_documentation())
print()
return 0
def list_urls(self):
for idx, job in enumerate(self.urlwatcher.jobs):
if self.urlwatch_config.verbose:
print('%d: %s' % (idx + 1, repr(job)))
else:
pretty_name = job.pretty_name()
location = job.get_location()
if pretty_name != location:
print('%d: %s (%s)' % (idx + 1, pretty_name, location))
else:
print('%d: %s' % (idx + 1, pretty_name))
return 0
def _find_job(self, query):
try:
index = int(query)
if index <= 0:
return None
try:
return self.urlwatcher.jobs[index - 1]
except IndexError:
return None
except ValueError:
return next((job for job in self.urlwatcher.jobs if job.get_location() == query), None)
def test_filter(self):
job = self._find_job(self.urlwatch_config.test_filter)
job = job.with_defaults(self.urlwatcher.config_storage.config)
if job is None:
print('Not found: %r' % (self.urlwatch_config.test_filter,))
return 1
if isinstance(job, UrlJob):
# Force re-retrieval of job, as we're testing filters
job.ignore_cached = True
job_state = JobState(self.urlwatcher.cache_storage, job)
job_state.process()
if job_state.exception is not None:
raise job_state.exception
print(job_state.new_data)
# We do not save the job state or job on purpose here, since we are possibly modifying the job
# (ignore_cached) and we do not want to store the newly-retrieved data yet (filter testing)
return 0
def modify_urls(self):
save = True
if self.urlwatch_config.delete is not None:
job = self._find_job(self.urlwatch_config.delete)
if job is not None:
self.urlwatcher.jobs.remove(job)
print('Removed %r' % (job,))
else:
print('Not found: %r' % (self.urlwatch_config.delete,))
save = False
if self.urlwatch_config.add is not None:
# Allow multiple specifications of filter=, so that multiple filters can be specified on the CLI
items = [item.split('=', 1) for item in self.urlwatch_config.add.split(',')]
filters = [v for k, v in items if k == 'filter']
items = [(k, v) for k, v in items if k != 'filter']
d = {k: v for k, v in items}
if filters:
d['filter'] = ','.join(filters)
job = JobBase.unserialize(d)
print('Adding %r' % (job,))
self.urlwatcher.jobs.append(job)
if save:
self.urlwatcher.urls_storage.save(self.urlwatcher.jobs)
return 0
def handle_actions(self):
if self.urlwatch_config.features:
sys.exit(self.show_features())
if self.urlwatch_config.gc_cache:
self.urlwatcher.cache_storage.gc([job.get_guid() for job in self.urlwatcher.jobs])
sys.exit(0)
if self.urlwatch_config.edit:
sys.exit(self.urlwatcher.urls_storage.edit(self.urlwatch_config.urls_yaml_example))
if self.urlwatch_config.edit_hooks:
sys.exit(self.edit_hooks())
if self.urlwatch_config.test_filter:
sys.exit(self.test_filter())
if self.urlwatch_config.list:
sys.exit(self.list_urls())
if self.urlwatch_config.add is not None or self.urlwatch_config.delete is not None:
sys.exit(self.modify_urls())
def check_edit_config(self):
if self.urlwatch_config.edit_config:
sys.exit(self.urlwatcher.config_storage.edit())
def check_telegram_chats(self):
if self.urlwatch_config.telegram_chats:
config = self.urlwatcher.config_storage.config['report'].get('telegram', None)
if not config:
print('You need to configure telegram in your config first (see README.md)')
sys.exit(1)
bot_token = config.get('bot_token', None)
if not bot_token:
print('You need to set up your bot token first (see README.md)')
sys.exit(1)
info = requests.get('https://api.telegram.org/bot{}/getMe'.format(bot_token)).json()
chats = {}
for chat_info in requests.get('https://api.telegram.org/bot{}/getUpdates'.format(bot_token)).json()['result']:
chat = chat_info['message']['chat']
if chat['type'] == 'private':
chats[str(chat['id'])] = ' '.join((chat['first_name'], chat['last_name'])) if 'last_name' in chat else chat['first_name']
if not chats:
print('No chats found. Say hello to your bot at https://t.me/{}'.format(info['result']['username']))
sys.exit(1)
headers = ('Chat ID', 'Name')
maxchat = max(len(headers[0]), max((len(k) for k, v in chats.items()), default=0))
maxname = max(len(headers[1]), max((len(v) for k, v in chats.items()), default=0))
fmt = '%-' + str(maxchat) + 's %s'
print(fmt % headers)
print(fmt % ('-' * maxchat, '-' * maxname))
for k, v in sorted(chats.items(), key=lambda kv: kv[1]):
print(fmt % (k, v))
print('\nChat up your bot here: https://t.me/{}'.format(info['result']['username']))
sys.exit(0)
def check_test_slack(self):
if self.urlwatch_config.test_slack:
config = self.urlwatcher.config_storage.config['report'].get('slack', None)
if not config:
print('You need to configure slack in your config first (see README.md)')
sys.exit(1)
webhook_url = config.get('webhook_url', None)
if not webhook_url:
print('You need to set up your slack webhook_url first (see README.md)')
sys.exit(1)
info = requests.post(webhook_url, json={"text": "Test message from urlwatch, your configuration is working"})
if info.status_code == requests.codes.ok:
print('Successfully sent message to Slack')
sys.exit(0)
else:
print('Error while submitting message to Slack:{0}'.format(info.text))
sys.exit(1)
def check_smtp_login(self):
if self.urlwatch_config.smtp_login:
config = self.urlwatcher.config_storage.config['report']['email']
smtp_config = config['smtp']
success = True
if not config['enabled']:
print('Please enable e-mail reporting in the config first.')
success = False
if config['method'] != 'smtp':
print('Please set the method to SMTP for the e-mail reporter.')
success = False
if not smtp_config['keyring']:
print('Keyring authentication must be enabled for SMTP.')
success = False
smtp_hostname = smtp_config['host']
if not smtp_hostname:
print('Please configure the SMTP hostname in the config first.')
success = False
smtp_username = smtp_config.get('user', config['from'])
if not smtp_username:
print('Please configure the SMTP user in the config first.')
success = False
if not success:
sys.exit(1)
if have_password(smtp_hostname, smtp_username):
message = 'Password for %s / %s already set, update? [y/N] ' % (smtp_username, smtp_hostname)
if input(message).lower() != 'y':
print('Password unchanged.')
sys.exit(0)
if success:
set_password(smtp_hostname, smtp_username)
# TODO: Actually verify that the login to the server works
sys.exit(0)
def run(self):
self.check_edit_config()
self.check_smtp_login()
self.check_telegram_chats()
self.check_test_slack()
self.handle_actions()
self.urlwatcher.run_jobs()
self.urlwatcher.close()
| 40.199324 | 141 | 0.599966 |
import imp
import logging
import os
import shutil
import sys
import requests
from .filters import FilterBase
from .handler import JobState
from .jobs import JobBase, UrlJob
from .reporters import ReporterBase
from .util import atomic_rename, edit_file
from .mailer import set_password, have_password
logger = logging.getLogger(__name__)
class UrlwatchCommand:
def __init__(self, urlwatcher):
self.urlwatcher = urlwatcher
self.urlwatch_config = urlwatcher.urlwatch_config
def edit_hooks(self):
fn_base, fn_ext = os.path.splitext(self.urlwatch_config.hooks)
hooks_edit = fn_base + '.edit' + fn_ext
try:
if os.path.exists(self.urlwatch_config.hooks):
shutil.copy(self.urlwatch_config.hooks, hooks_edit)
elif self.urlwatch_config.hooks_py_example is not None and os.path.exists(
self.urlwatch_config.hooks_py_example):
shutil.copy(self.urlwatch_config.hooks_py_example, hooks_edit)
edit_file(hooks_edit)
imp.load_source('hooks', hooks_edit)
atomic_rename(hooks_edit, self.urlwatch_config.hooks)
print('Saving edit changes in', self.urlwatch_config.hooks)
except SystemExit:
raise
except Exception as e:
print('Parsing failed:')
print('======')
print(e)
print('======')
print('')
print('The file', self.urlwatch_config.hooks, 'was NOT updated.')
print('Your changes have been saved in', hooks_edit)
return 1
return 0
def show_features(self):
print()
print('Supported jobs:\n')
print(JobBase.job_documentation())
print('Supported filters:\n')
print(FilterBase.filter_documentation())
print()
print('Supported reporters:\n')
print(ReporterBase.reporter_documentation())
print()
return 0
def list_urls(self):
for idx, job in enumerate(self.urlwatcher.jobs):
if self.urlwatch_config.verbose:
print('%d: %s' % (idx + 1, repr(job)))
else:
pretty_name = job.pretty_name()
location = job.get_location()
if pretty_name != location:
print('%d: %s (%s)' % (idx + 1, pretty_name, location))
else:
print('%d: %s' % (idx + 1, pretty_name))
return 0
def _find_job(self, query):
try:
index = int(query)
if index <= 0:
return None
try:
return self.urlwatcher.jobs[index - 1]
except IndexError:
return None
except ValueError:
return next((job for job in self.urlwatcher.jobs if job.get_location() == query), None)
def test_filter(self):
job = self._find_job(self.urlwatch_config.test_filter)
job = job.with_defaults(self.urlwatcher.config_storage.config)
if job is None:
print('Not found: %r' % (self.urlwatch_config.test_filter,))
return 1
if isinstance(job, UrlJob):
job.ignore_cached = True
job_state = JobState(self.urlwatcher.cache_storage, job)
job_state.process()
if job_state.exception is not None:
raise job_state.exception
print(job_state.new_data)
# We do not save the job state or job on purpose here, since we are possibly modifying the job
# (ignore_cached) and we do not want to store the newly-retrieved data yet (filter testing)
return 0
def modify_urls(self):
save = True
if self.urlwatch_config.delete is not None:
job = self._find_job(self.urlwatch_config.delete)
if job is not None:
self.urlwatcher.jobs.remove(job)
print('Removed %r' % (job,))
else:
print('Not found: %r' % (self.urlwatch_config.delete,))
save = False
if self.urlwatch_config.add is not None:
# Allow multiple specifications of filter=, so that multiple filters can be specified on the CLI
items = [item.split('=', 1) for item in self.urlwatch_config.add.split(',')]
filters = [v for k, v in items if k == 'filter']
items = [(k, v) for k, v in items if k != 'filter']
d = {k: v for k, v in items}
if filters:
d['filter'] = ','.join(filters)
job = JobBase.unserialize(d)
print('Adding %r' % (job,))
self.urlwatcher.jobs.append(job)
if save:
self.urlwatcher.urls_storage.save(self.urlwatcher.jobs)
return 0
def handle_actions(self):
if self.urlwatch_config.features:
sys.exit(self.show_features())
if self.urlwatch_config.gc_cache:
self.urlwatcher.cache_storage.gc([job.get_guid() for job in self.urlwatcher.jobs])
sys.exit(0)
if self.urlwatch_config.edit:
sys.exit(self.urlwatcher.urls_storage.edit(self.urlwatch_config.urls_yaml_example))
if self.urlwatch_config.edit_hooks:
sys.exit(self.edit_hooks())
if self.urlwatch_config.test_filter:
sys.exit(self.test_filter())
if self.urlwatch_config.list:
sys.exit(self.list_urls())
if self.urlwatch_config.add is not None or self.urlwatch_config.delete is not None:
sys.exit(self.modify_urls())
def check_edit_config(self):
if self.urlwatch_config.edit_config:
sys.exit(self.urlwatcher.config_storage.edit())
def check_telegram_chats(self):
if self.urlwatch_config.telegram_chats:
config = self.urlwatcher.config_storage.config['report'].get('telegram', None)
if not config:
print('You need to configure telegram in your config first (see README.md)')
sys.exit(1)
bot_token = config.get('bot_token', None)
if not bot_token:
print('You need to set up your bot token first (see README.md)')
sys.exit(1)
info = requests.get('https://api.telegram.org/bot{}/getMe'.format(bot_token)).json()
chats = {}
for chat_info in requests.get('https://api.telegram.org/bot{}/getUpdates'.format(bot_token)).json()['result']:
chat = chat_info['message']['chat']
if chat['type'] == 'private':
chats[str(chat['id'])] = ' '.join((chat['first_name'], chat['last_name'])) if 'last_name' in chat else chat['first_name']
if not chats:
print('No chats found. Say hello to your bot at https://t.me/{}'.format(info['result']['username']))
sys.exit(1)
headers = ('Chat ID', 'Name')
maxchat = max(len(headers[0]), max((len(k) for k, v in chats.items()), default=0))
maxname = max(len(headers[1]), max((len(v) for k, v in chats.items()), default=0))
fmt = '%-' + str(maxchat) + 's %s'
print(fmt % headers)
print(fmt % ('-' * maxchat, '-' * maxname))
for k, v in sorted(chats.items(), key=lambda kv: kv[1]):
print(fmt % (k, v))
print('\nChat up your bot here: https://t.me/{}'.format(info['result']['username']))
sys.exit(0)
def check_test_slack(self):
if self.urlwatch_config.test_slack:
config = self.urlwatcher.config_storage.config['report'].get('slack', None)
if not config:
print('You need to configure slack in your config first (see README.md)')
sys.exit(1)
webhook_url = config.get('webhook_url', None)
if not webhook_url:
print('You need to set up your slack webhook_url first (see README.md)')
sys.exit(1)
info = requests.post(webhook_url, json={"text": "Test message from urlwatch, your configuration is working"})
if info.status_code == requests.codes.ok:
print('Successfully sent message to Slack')
sys.exit(0)
else:
print('Error while submitting message to Slack:{0}'.format(info.text))
sys.exit(1)
def check_smtp_login(self):
if self.urlwatch_config.smtp_login:
config = self.urlwatcher.config_storage.config['report']['email']
smtp_config = config['smtp']
success = True
if not config['enabled']:
print('Please enable e-mail reporting in the config first.')
success = False
if config['method'] != 'smtp':
print('Please set the method to SMTP for the e-mail reporter.')
success = False
if not smtp_config['keyring']:
print('Keyring authentication must be enabled for SMTP.')
success = False
smtp_hostname = smtp_config['host']
if not smtp_hostname:
print('Please configure the SMTP hostname in the config first.')
success = False
smtp_username = smtp_config.get('user', config['from'])
if not smtp_username:
print('Please configure the SMTP user in the config first.')
success = False
if not success:
sys.exit(1)
if have_password(smtp_hostname, smtp_username):
message = 'Password for %s / %s already set, update? [y/N] ' % (smtp_username, smtp_hostname)
if input(message).lower() != 'y':
print('Password unchanged.')
sys.exit(0)
if success:
set_password(smtp_hostname, smtp_username)
# TODO: Actually verify that the login to the server works
sys.exit(0)
def run(self):
self.check_edit_config()
self.check_smtp_login()
self.check_telegram_chats()
self.check_test_slack()
self.handle_actions()
self.urlwatcher.run_jobs()
self.urlwatcher.close()
| true | true |
1c3690450ffbc0da5752a9490a5bca65d5a5ee89 | 690 | py | Python | setup.py | emordonez/transfermarkt-transfers | 147c7b025958f7bb6796a32e95b0867dd2681820 | [
"MIT"
] | 2 | 2021-09-21T07:34:07.000Z | 2021-11-01T16:26:22.000Z | setup.py | emordonez/transfermarkt-transfers | 147c7b025958f7bb6796a32e95b0867dd2681820 | [
"MIT"
] | null | null | null | setup.py | emordonez/transfermarkt-transfers | 147c7b025958f7bb6796a32e95b0867dd2681820 | [
"MIT"
] | 2 | 2021-09-21T07:34:08.000Z | 2022-01-29T08:33:02.000Z | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements= f.read().splitlines()
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='tmtransfers',
version='0.1.0',
install_requires=requirements,
description='Script to web scrape league transfer data from Transfermarkt',
long_description=readme,
long_description_content_type='text/markdown',
author='Eric M. Ordonez',
author_email='',
url='https://github.com/emordonez/transfermarkt-transfers',
license=license,
packages=find_packages(exclude=['data', 'tests'])
)
| 24.642857 | 79 | 0.686957 |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements= f.read().splitlines()
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='tmtransfers',
version='0.1.0',
install_requires=requirements,
description='Script to web scrape league transfer data from Transfermarkt',
long_description=readme,
long_description_content_type='text/markdown',
author='Eric M. Ordonez',
author_email='',
url='https://github.com/emordonez/transfermarkt-transfers',
license=license,
packages=find_packages(exclude=['data', 'tests'])
)
| true | true |
1c36910fc81a9012e6fe1f8746d4dcb72467f9af | 288 | py | Python | Pig Latin Translator.py | navaneethmenon98/hackerearth | e103ef7cbc4d3e76cca8ae2aad888318f87a32a3 | [
"MIT"
] | null | null | null | Pig Latin Translator.py | navaneethmenon98/hackerearth | e103ef7cbc4d3e76cca8ae2aad888318f87a32a3 | [
"MIT"
] | null | null | null | Pig Latin Translator.py | navaneethmenon98/hackerearth | e103ef7cbc4d3e76cca8ae2aad888318f87a32a3 | [
"MIT"
] | null | null | null | T=input()
a=['a','e','i','o','u','A','E','I','O','U']
for f in range(T):
d=''
S=map(str,raw_input().split())
for i in range(len(S)):
b=S[i]
if b[0] in a:
b=b+'ay'
else:
c=len(b)
b=b[1:c]+b[0]+'ay'
S[i]=b
for t in range(len(b)):
d=d+b[t]
d=d+' '
print(d)
| 14.4 | 43 | 0.440972 | T=input()
a=['a','e','i','o','u','A','E','I','O','U']
for f in range(T):
d=''
S=map(str,raw_input().split())
for i in range(len(S)):
b=S[i]
if b[0] in a:
b=b+'ay'
else:
c=len(b)
b=b[1:c]+b[0]+'ay'
S[i]=b
for t in range(len(b)):
d=d+b[t]
d=d+' '
print(d)
| true | true |
1c3692c58723af2150d9710743d17bf37bbe56c4 | 6,827 | py | Python | popupcad/graphics2d/proto.py | popupcad/popupcad | d3da448260cd5cb9e05417b0a723d7f73ae4e06e | [
"MIT"
] | 19 | 2015-08-01T22:13:39.000Z | 2020-03-07T03:55:46.000Z | popupcad/graphics2d/proto.py | CadQuery/popupcad | b0c7b406d4b288c7cb375340323bba0252aedbfb | [
"MIT"
] | 106 | 2015-07-23T19:58:01.000Z | 2019-05-14T03:46:08.000Z | popupcad/graphics2d/proto.py | CadQuery/popupcad | b0c7b406d4b288c7cb375340323bba0252aedbfb | [
"MIT"
] | 9 | 2015-10-04T23:38:41.000Z | 2020-07-16T03:50:34.000Z | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import qt.QtCore as qc
import qt.QtGui as qg
from popupcad.graphics2d.graphicsitems import Common, CommonShape
from popupcad.geometry.vertex import ShapeVertex
from popupcad.filetypes.genericshapes import GenericPoly, GenericPolyline, GenericLine, GenericCircle, GenericTwoPointRect
import popupcad
import qt.qt_hacks as qh
class Proto(Common):
z_value = 20
isDeletable = True
minradius = 20
basicpen = qg.QPen(qg.QColor.fromRgbF(0,0,0,1),1.0,qc.Qt.SolidLine,qc.Qt.RoundCap,qc.Qt.RoundJoin)
basicpen.setCosmetic(True)
basicbrush = qg.QBrush(qg.QColor.fromRgbF(1, 1, 0, .25), qc.Qt.SolidPattern)
nobrush = qg.QBrush(qc.Qt.NoBrush)
def __init__(self, *args, **kwargs):
super(Proto, self).__init__(*args, **kwargs)
self.setZValue(self.z_value)
self.generic = self.shape_class([], [], False)
self.temphandle = None
self.setAcceptHoverEvents(True)
self.setFlag(self.ItemIsMovable, True)
self.setFlag(self.ItemIsSelectable, True)
self.setFlag(self.ItemIsFocusable, True)
self.setPen(self.basicpen)
self.setBrush(self.basicbrush)
def painterpath(self):
ep = self.exteriorpoints(popupcad.view_scaling)
ip = self.generic.interiorpoints(scaling=popupcad.view_scaling)
return self.generic.gen_painterpath(ep, ip)
def exteriorpoints(self,scaling=1):
ep = self.generic.exteriorpoints(scaling=scaling)
if self.temphandle is not None:
ep.append(
self.temphandle.generic.getpos(scaling=scaling))
return ep
def deltemphandle(self):
if not not self.temphandle:
self.temphandle.setParentItem(None)
del self.temphandle
self.temphandle = None
def checkdist(self, point0, point1):
return not popupcad.algorithms.points.twopointsthesame(
point0,
point1,
self.minradius /
self.scene().views()[0].zoom())
def finish_definition(self):
scene = self.scene()
self.deltemphandle()
scene.addItem(self.generic.outputinteractive())
self.harddelete()
scene.childfinished()
def mousedoubleclick(self, point):
if self.generic.is_valid_bool():
self.finish_definition()
self.updateshape()
def mouserelease(self, point):
pass
def mousemove(self, point):
import numpy
point = tuple(numpy.array(qh.to_tuple(point)) / popupcad.view_scaling)
if not not self.temphandle:
self.temphandle.generic.setpos(point)
self.temphandle.updateshape()
self.updateshape()
class ProtoMultiPoint(Proto):
def addhandle(self, handle):
if self.generic.len_exterior() == 0:
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
else:
if handle.generic.getpos(scaling=popupcad.view_scaling) != self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling):
if self.checkdist(handle.generic.getpos(scaling=popupcad.view_scaling),
self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling)):
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
def mousepress(self, point):
import numpy
point = tuple(numpy.array(qh.to_tuple(point)) / popupcad.view_scaling)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.addhandle(self.temphandle)
else:
self.addhandle(self.temphandle)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.updateshape()
# def mousedoubleclick(self, point):
# if self.generic.csg_valid() and self.generic.isValid():
# if self.generic.len_exterior() > 2:
# self.finish_definition()
# self.updateshape()
class ProtoTwoPoint(Proto):
def addhandle(self, handle):
if self.generic.len_exterior() == 0:
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
return True
elif self.generic.len_exterior() == 1:
if qh.to_tuple(handle.pos()) != self.generic.get_exterior()[-1].getpos():
if self.checkdist(handle.generic.getpos(scaling=popupcad.view_scaling),self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling)):
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
return True
else:
raise Exception
self.temphandle = None
return True
def mousepress(self, point):
import numpy
point = tuple(numpy.array(qh.to_tuple(point)) / popupcad.view_scaling)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
if self.generic.len_exterior() == 0:
self.addhandle(self.temphandle)
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.updateshape()
return
elif self.generic.len_exterior() == 1:
if self.addhandle(self.temphandle):
self.finish_definition()
self.updateshape()
return
else:
return
else:
raise Exception
self.finish_definition()
self.updateshape()
return
self.updateshape()
class ProtoPoly(ProtoMultiPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericPoly
class ProtoPath(ProtoMultiPoint, CommonShape, qg.QGraphicsPathItem):
basicbrush = Proto.nobrush
shape_class = GenericPolyline
class ProtoLine(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
basicbrush = Proto.nobrush
shape_class = GenericLine
class ProtoCircle(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericCircle
class ProtoRect2Point(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericTwoPointRect
| 33.465686 | 158 | 0.634393 |
import qt.QtCore as qc
import qt.QtGui as qg
from popupcad.graphics2d.graphicsitems import Common, CommonShape
from popupcad.geometry.vertex import ShapeVertex
from popupcad.filetypes.genericshapes import GenericPoly, GenericPolyline, GenericLine, GenericCircle, GenericTwoPointRect
import popupcad
import qt.qt_hacks as qh
class Proto(Common):
z_value = 20
isDeletable = True
minradius = 20
basicpen = qg.QPen(qg.QColor.fromRgbF(0,0,0,1),1.0,qc.Qt.SolidLine,qc.Qt.RoundCap,qc.Qt.RoundJoin)
basicpen.setCosmetic(True)
basicbrush = qg.QBrush(qg.QColor.fromRgbF(1, 1, 0, .25), qc.Qt.SolidPattern)
nobrush = qg.QBrush(qc.Qt.NoBrush)
def __init__(self, *args, **kwargs):
super(Proto, self).__init__(*args, **kwargs)
self.setZValue(self.z_value)
self.generic = self.shape_class([], [], False)
self.temphandle = None
self.setAcceptHoverEvents(True)
self.setFlag(self.ItemIsMovable, True)
self.setFlag(self.ItemIsSelectable, True)
self.setFlag(self.ItemIsFocusable, True)
self.setPen(self.basicpen)
self.setBrush(self.basicbrush)
def painterpath(self):
ep = self.exteriorpoints(popupcad.view_scaling)
ip = self.generic.interiorpoints(scaling=popupcad.view_scaling)
return self.generic.gen_painterpath(ep, ip)
def exteriorpoints(self,scaling=1):
ep = self.generic.exteriorpoints(scaling=scaling)
if self.temphandle is not None:
ep.append(
self.temphandle.generic.getpos(scaling=scaling))
return ep
def deltemphandle(self):
if not not self.temphandle:
self.temphandle.setParentItem(None)
del self.temphandle
self.temphandle = None
def checkdist(self, point0, point1):
return not popupcad.algorithms.points.twopointsthesame(
point0,
point1,
self.minradius /
self.scene().views()[0].zoom())
def finish_definition(self):
scene = self.scene()
self.deltemphandle()
scene.addItem(self.generic.outputinteractive())
self.harddelete()
scene.childfinished()
def mousedoubleclick(self, point):
if self.generic.is_valid_bool():
self.finish_definition()
self.updateshape()
def mouserelease(self, point):
pass
def mousemove(self, point):
import numpy
point = tuple(numpy.array(qh.to_tuple(point)) / popupcad.view_scaling)
if not not self.temphandle:
self.temphandle.generic.setpos(point)
self.temphandle.updateshape()
self.updateshape()
class ProtoMultiPoint(Proto):
def addhandle(self, handle):
if self.generic.len_exterior() == 0:
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
else:
if handle.generic.getpos(scaling=popupcad.view_scaling) != self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling):
if self.checkdist(handle.generic.getpos(scaling=popupcad.view_scaling),
self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling)):
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
def mousepress(self, point):
import numpy
point = tuple(numpy.array(qh.to_tuple(point)) / popupcad.view_scaling)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.addhandle(self.temphandle)
else:
self.addhandle(self.temphandle)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.updateshape()
class ProtoTwoPoint(Proto):
def addhandle(self, handle):
if self.generic.len_exterior() == 0:
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
return True
elif self.generic.len_exterior() == 1:
if qh.to_tuple(handle.pos()) != self.generic.get_exterior()[-1].getpos():
if self.checkdist(handle.generic.getpos(scaling=popupcad.view_scaling),self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling)):
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
return True
else:
raise Exception
self.temphandle = None
return True
def mousepress(self, point):
import numpy
point = tuple(numpy.array(qh.to_tuple(point)) / popupcad.view_scaling)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
if self.generic.len_exterior() == 0:
self.addhandle(self.temphandle)
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.updateshape()
return
elif self.generic.len_exterior() == 1:
if self.addhandle(self.temphandle):
self.finish_definition()
self.updateshape()
return
else:
return
else:
raise Exception
self.finish_definition()
self.updateshape()
return
self.updateshape()
class ProtoPoly(ProtoMultiPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericPoly
class ProtoPath(ProtoMultiPoint, CommonShape, qg.QGraphicsPathItem):
basicbrush = Proto.nobrush
shape_class = GenericPolyline
class ProtoLine(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
basicbrush = Proto.nobrush
shape_class = GenericLine
class ProtoCircle(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericCircle
class ProtoRect2Point(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericTwoPointRect
| true | true |
1c3694502120cbcff9534c02328242a596c36aa8 | 208 | py | Python | Mundo2/contRegressiva.py | DanieleMagalhaes/Exercicios-Python | 394c68e8f06a10ec16539addd888960d11d1318f | [
"MIT"
] | null | null | null | Mundo2/contRegressiva.py | DanieleMagalhaes/Exercicios-Python | 394c68e8f06a10ec16539addd888960d11d1318f | [
"MIT"
] | null | null | null | Mundo2/contRegressiva.py | DanieleMagalhaes/Exercicios-Python | 394c68e8f06a10ec16539addd888960d11d1318f | [
"MIT"
] | null | null | null | from time import sleep
sleep(1)
print('\n\33[32mVamos para a contagem regressiva:\33[m \n')
sleep(2)
for c in range(10, 0 , -1):
print(c, end=' ')
sleep(1)
print('\n\33[33mFeliz ANO NOVO!!! \33[m\n') | 26 | 59 | 0.629808 | from time import sleep
sleep(1)
print('\n\33[32mVamos para a contagem regressiva:\33[m \n')
sleep(2)
for c in range(10, 0 , -1):
print(c, end=' ')
sleep(1)
print('\n\33[33mFeliz ANO NOVO!!! \33[m\n') | true | true |
1c3695b32e50700b368ea8fee6965259c7b8b26f | 9,049 | py | Python | kivy/uix/recycleview/layout.py | JanMalte/kivy | aa267d1d026c6c2dd75d73d9e0523f58ec0bbad7 | [
"MIT"
] | 1 | 2019-05-03T08:36:01.000Z | 2019-05-03T08:36:01.000Z | kivy/uix/recycleview/layout.py | gloc-mike/kivy | 293ce1a7786c637cd825a0ab97560611a30fa890 | [
"MIT"
] | null | null | null | kivy/uix/recycleview/layout.py | gloc-mike/kivy | 293ce1a7786c637cd825a0ab97560611a30fa890 | [
"MIT"
] | 2 | 2017-03-09T14:27:03.000Z | 2019-05-03T08:36:02.000Z |
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.uix.recycleview.views import RecycleDataViewBehavior, \
_view_base_cache
class LayoutChangeException(Exception):
pass
class LayoutSelectionBehavior(CompoundSelectionBehavior):
'''The :class:`LayoutSelectionBehavior` can be combined with
:class:`RecycleLayoutManagerBehavior` to allow its derived classes
selection behaviors similarly to how
:class:`~kivy.uix.behaviors.compoundselection.CompoundSelectionBehavior`
can be used to add selection behaviors to normal layout.
:class:`RecycleLayoutManagerBehavior` manages its children
differently than normal layouts or widgets so this class adapts
:class:`~kivy.uix.behaviors.compoundselection.CompoundSelectionBehavior`
based selection to work with :class:`RecycleLayoutManagerBehavior` as well.
Similarly to
:class:`~kivy.uix.behaviors.compoundselection.CompoundSelectionBehavior`,
one can select using the keyboard or touch, which calls :meth:`select_node`
or :meth:`deselect_node`, or one can call these methods directly. When a
item is selected or deselected :meth:`apply_selection` is called. See
:meth:`apply_selection`.
'''
key_selection = StringProperty(None, allownone=True)
'''The key used to check whether a view of a data item can be selected
with touch or the keyboard.
:attr:`key_selection` is the key in data, which if present and ``True``
will enable selection for this item from the keyboard or with a touch.
When None, the default, not item will be selectable.
:attr:`key_selection` is a :class:`StringProperty` and defaults to None.
.. note::
All data items can be selected directly using :meth:`select_node` or
:meth:`deselect_node`, even if :attr:`key_selection` is False.
'''
_selectable_nodes = []
_nodes_map = {}
def __init__(self, **kwargs):
self.nodes_order_reversed = False
super(LayoutSelectionBehavior, self).__init__(**kwargs)
def compute_sizes_from_data(self, data, flags):
# overwrite this method so that when data changes we update
# selectable nodes.
key = self.key_selection
if key is None:
nodes = self._selectable_nodes = []
else:
nodes = self._selectable_nodes = [
i for i, d in enumerate(data) if d.get(key)]
self._nodes_map = {v: k for k, v in enumerate(nodes)}
return super(LayoutSelectionBehavior, self).compute_sizes_from_data(
data, flags)
def get_selectable_nodes(self):
# the indices of the data is used as the nodes
return self._selectable_nodes
def get_index_of_node(self, node, selectable_nodes):
# the indices of the data is used as the nodes, so node
return self._nodes_map[node]
def goto_node(self, key, last_node, last_node_idx):
node, idx = super(LayoutSelectionBehavior, self).goto_node(
key, last_node, last_node_idx)
if node is not last_node:
self.goto_view(node)
return node, idx
def select_node(self, node):
if super(LayoutSelectionBehavior, self).select_node(node):
view = self.recycleview.view_adapter.get_visible_view(node)
if view is not None:
self.apply_selection(node, view, True)
def deselect_node(self, node):
if super(LayoutSelectionBehavior, self).deselect_node(node):
view = self.recycleview.view_adapter.get_visible_view(node)
if view is not None:
self.apply_selection(node, view, False)
def apply_selection(self, index, view, is_selected):
'''Applies the selection to the view. This is called internally when
a view is displayed and it needs to be shown as selected or as not
selected.
It is called when :meth:`select_node` or :meth:`dselect_node` is called
or when a view needs to be refreshed. Its function is purely to update
the view to reflect the selection state. So the function may be
called multiple times even if the selection state may not have changed.
If the view is a instance of
:class:`~kivy.uix.recycleview.views.RecycleDataViewBehavior`, its
:meth:`~kivy.uix.recycleview.views.RecycleDataViewBehavior.\
apply_selection` method will be called everything the view needs to refresh
the selection state. Otherwise, the this method is responsible
for applying the selection.
:Parameters:
`index`: int
The index of the data item that is associated with the view.
`view`: widget
The widget that is the view of this data item.
`is_selected`: bool
Whether the item is selected.
'''
viewclass = view.__class__
if viewclass not in _view_base_cache:
_view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior)
if _view_base_cache[viewclass]:
view.apply_selection(self.recycleview, index, is_selected)
def refresh_view_layout(self, index, pos, pos_hint, size, size_hint, view,
viewport):
super(LayoutSelectionBehavior, self).refresh_view_layout(
index, pos, pos_hint, size, size_hint, view, viewport)
self.apply_selection(index, view, index in self.selected_nodes)
class RecycleLayoutManagerBehavior(object):
"""A RecycleLayoutManagerBehavior is responsible for positioning views into the
:attr:`RecycleView.data` within a :class:`RecycleView`. It adds new views
into the data when it becomes visible to the user, and removes them when
they leave the visible area.
"""
viewclass = ObjectProperty(None)
'''See :attr:`RecyclerView.viewclass`.
'''
key_viewclass = StringProperty(None)
'''See :attr:`RecyclerView.key_viewclass`.
'''
recycleview = ObjectProperty(None, allownone=True)
asked_sizes = None
def attach_recycleview(self, rv):
self.recycleview = rv
if rv:
fbind = self.fbind
# can be made more selective update than refresh_from_data which
# causes a full update. But this likely affects most of the data.
fbind('viewclass', rv.refresh_from_data)
fbind('key_viewclass', rv.refresh_from_data)
fbind('viewclass', rv._dispatch_prop_on_source, 'viewclass')
fbind('key_viewclass', rv._dispatch_prop_on_source,
'key_viewclass')
def detach_recycleview(self):
self.clear_layout()
rv = self.recycleview
if rv:
funbind = self.funbind
funbind('viewclass', rv.refresh_from_data)
funbind('key_viewclass', rv.refresh_from_data)
funbind('viewclass', rv._dispatch_prop_on_source, 'viewclass')
funbind('key_viewclass', rv._dispatch_prop_on_source,
'key_viewclass')
self.recycleview = None
def compute_sizes_from_data(self, data, flags):
pass
def compute_layout(self, data, flags):
pass
def compute_visible_views(self, data, viewport):
'''`viewport` is in coordinates of the layout manager.
'''
pass
def set_visible_views(self, indices, data, viewport):
'''`viewport` is in coordinates of the layout manager.
'''
pass
def refresh_view_layout(self, index, pos, pos_hint, size, size_hint, view,
viewport):
'''`See :meth:`~kivy.uix.recycleview.views.RecycleDataAdapter.\
refresh_view_layout`.
'''
self.recycleview.view_adapter.refresh_view_layout(
index, pos, pos_hint, size, size_hint, view, viewport)
def get_view_index_at(self, pos):
"""Return the view `index` on which position, `pos`, falls.
`pos` is in coordinates of the layout manager.
"""
pass
def remove_views(self):
rv = self.recycleview
if rv:
adapter = rv.view_adapter
if adapter:
adapter.make_views_dirty()
def remove_view(self, view, index):
rv = self.recycleview
if rv:
adapter = rv.view_adapter
if adapter:
adapter.make_view_dirty(view, index)
def clear_layout(self):
rv = self.recycleview
if rv:
adapter = rv.view_adapter
if adapter:
adapter.invalidate()
def goto_view(self, index):
'''Moves the views so that the view corresponding to `index` is
visible.
'''
pass
def on_viewclass(self, instance, value):
# resolve the real class if it was a string.
if isinstance(value, string_types):
self.viewclass = getattr(Factory, value)
| 37.238683 | 83 | 0.65952 |
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.uix.recycleview.views import RecycleDataViewBehavior, \
_view_base_cache
class LayoutChangeException(Exception):
pass
class LayoutSelectionBehavior(CompoundSelectionBehavior):
key_selection = StringProperty(None, allownone=True)
_selectable_nodes = []
_nodes_map = {}
def __init__(self, **kwargs):
self.nodes_order_reversed = False
super(LayoutSelectionBehavior, self).__init__(**kwargs)
def compute_sizes_from_data(self, data, flags):
key = self.key_selection
if key is None:
nodes = self._selectable_nodes = []
else:
nodes = self._selectable_nodes = [
i for i, d in enumerate(data) if d.get(key)]
self._nodes_map = {v: k for k, v in enumerate(nodes)}
return super(LayoutSelectionBehavior, self).compute_sizes_from_data(
data, flags)
def get_selectable_nodes(self):
return self._selectable_nodes
def get_index_of_node(self, node, selectable_nodes):
return self._nodes_map[node]
def goto_node(self, key, last_node, last_node_idx):
node, idx = super(LayoutSelectionBehavior, self).goto_node(
key, last_node, last_node_idx)
if node is not last_node:
self.goto_view(node)
return node, idx
def select_node(self, node):
if super(LayoutSelectionBehavior, self).select_node(node):
view = self.recycleview.view_adapter.get_visible_view(node)
if view is not None:
self.apply_selection(node, view, True)
def deselect_node(self, node):
if super(LayoutSelectionBehavior, self).deselect_node(node):
view = self.recycleview.view_adapter.get_visible_view(node)
if view is not None:
self.apply_selection(node, view, False)
def apply_selection(self, index, view, is_selected):
viewclass = view.__class__
if viewclass not in _view_base_cache:
_view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior)
if _view_base_cache[viewclass]:
view.apply_selection(self.recycleview, index, is_selected)
def refresh_view_layout(self, index, pos, pos_hint, size, size_hint, view,
viewport):
super(LayoutSelectionBehavior, self).refresh_view_layout(
index, pos, pos_hint, size, size_hint, view, viewport)
self.apply_selection(index, view, index in self.selected_nodes)
class RecycleLayoutManagerBehavior(object):
viewclass = ObjectProperty(None)
key_viewclass = StringProperty(None)
recycleview = ObjectProperty(None, allownone=True)
asked_sizes = None
def attach_recycleview(self, rv):
self.recycleview = rv
if rv:
fbind = self.fbind
fbind('viewclass', rv.refresh_from_data)
fbind('key_viewclass', rv.refresh_from_data)
fbind('viewclass', rv._dispatch_prop_on_source, 'viewclass')
fbind('key_viewclass', rv._dispatch_prop_on_source,
'key_viewclass')
def detach_recycleview(self):
self.clear_layout()
rv = self.recycleview
if rv:
funbind = self.funbind
funbind('viewclass', rv.refresh_from_data)
funbind('key_viewclass', rv.refresh_from_data)
funbind('viewclass', rv._dispatch_prop_on_source, 'viewclass')
funbind('key_viewclass', rv._dispatch_prop_on_source,
'key_viewclass')
self.recycleview = None
def compute_sizes_from_data(self, data, flags):
pass
def compute_layout(self, data, flags):
pass
def compute_visible_views(self, data, viewport):
pass
def set_visible_views(self, indices, data, viewport):
pass
def refresh_view_layout(self, index, pos, pos_hint, size, size_hint, view,
viewport):
self.recycleview.view_adapter.refresh_view_layout(
index, pos, pos_hint, size, size_hint, view, viewport)
def get_view_index_at(self, pos):
pass
def remove_views(self):
rv = self.recycleview
if rv:
adapter = rv.view_adapter
if adapter:
adapter.make_views_dirty()
def remove_view(self, view, index):
rv = self.recycleview
if rv:
adapter = rv.view_adapter
if adapter:
adapter.make_view_dirty(view, index)
def clear_layout(self):
rv = self.recycleview
if rv:
adapter = rv.view_adapter
if adapter:
adapter.invalidate()
def goto_view(self, index):
pass
def on_viewclass(self, instance, value):
if isinstance(value, string_types):
self.viewclass = getattr(Factory, value)
| true | true |
1c36987a3e27495d3cd68db28ebe968716373215 | 6,066 | py | Python | elasticapm/utils/__init__.py | dpaluch-rp/apm-agent-python | 8b11d232f37c0affe0a7c92f590b05106c55b3b3 | [
"BSD-3-Clause"
] | null | null | null | elasticapm/utils/__init__.py | dpaluch-rp/apm-agent-python | 8b11d232f37c0affe0a7c92f590b05106c55b3b3 | [
"BSD-3-Clause"
] | null | null | null | elasticapm/utils/__init__.py | dpaluch-rp/apm-agent-python | 8b11d232f37c0affe0a7c92f590b05106c55b3b3 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
import base64
import os
import re
from functools import partial
from elasticapm.conf import constants
from elasticapm.utils import compat, encoding
try:
from functools import partialmethod
partial_types = (partial, partialmethod)
except ImportError:
# Python 2
partial_types = (partial,)
default_ports = {"https": 443, "http": 80, "postgresql": 5432, "mysql": 3306, "mssql": 1433}
def varmap(func, var, context=None, name=None):
"""
Executes ``func(key_name, value)`` on all values,
recursively discovering dict and list scoped
values.
"""
if context is None:
context = set()
objid = id(var)
if objid in context:
return func(name, "<...>")
context.add(objid)
if isinstance(var, dict):
ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))
elif isinstance(var, (list, tuple)):
ret = func(name, [varmap(func, f, context, name) for f in var])
else:
ret = func(name, var)
context.remove(objid)
return ret
def get_name_from_func(func):
# partials don't have `__module__` or `__name__`, so we use the values from the "inner" function
if isinstance(func, partial_types):
return "partial({})".format(get_name_from_func(func.func))
elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
return "partial({})".format(get_name_from_func(func._partialmethod.func))
module = func.__module__
if hasattr(func, "__name__"):
view_name = func.__name__
else: # Fall back if there's no __name__
view_name = func.__class__.__name__
return "{0}.{1}".format(module, view_name)
def build_name_with_http_method_prefix(name, request):
return " ".join((request.method, name)) if name else name
def is_master_process():
# currently only recognizes uwsgi master process
try:
import uwsgi
return os.getpid() == uwsgi.masterpid()
except ImportError:
return False
def get_url_dict(url):
parse_result = compat.urlparse.urlparse(url)
url_dict = {
"full": encoding.keyword_field(url),
"protocol": parse_result.scheme + ":",
"hostname": encoding.keyword_field(parse_result.hostname),
"pathname": encoding.keyword_field(parse_result.path),
}
port = None if parse_result.port is None else str(parse_result.port)
if port:
url_dict["port"] = port
if parse_result.query:
url_dict["search"] = encoding.keyword_field("?" + parse_result.query)
return url_dict
def sanitize_url(url):
if "@" not in url:
return url
parts = compat.urlparse.urlparse(url)
return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
def get_host_from_url(url):
parsed_url = compat.urlparse.urlparse(url)
host = parsed_url.hostname or " "
if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
host += ":" + str(parsed_url.port)
return host
def url_to_destination(url, service_type="external"):
parts = compat.urlparse.urlsplit(url)
hostname = parts.hostname
# preserve brackets for IPv6 URLs
if "://[" in url:
hostname = "[%s]" % hostname
try:
port = parts.port
except ValueError:
# Malformed port, just use None rather than raising an exception
port = None
default_port = default_ports.get(parts.scheme, None)
name = "%s://%s" % (parts.scheme, hostname)
resource = hostname
if not port and parts.scheme in default_ports:
port = default_ports[parts.scheme]
if port:
if port != default_port:
name += ":%d" % port
resource += ":%d" % port
return {"service": {"name": name, "resource": resource, "type": service_type}}
def read_pem_file(file_obj):
cert = b""
for line in file_obj:
if line.startswith(b"-----BEGIN CERTIFICATE-----"):
break
for line in file_obj:
if not line.startswith(b"-----END CERTIFICATE-----"):
cert += line.strip()
return base64.b64decode(cert)
def starmatch_to_regex(pattern):
i, n = 0, len(pattern)
res = []
while i < n:
c = pattern[i]
i = i + 1
if c == "*":
res.append(".*")
else:
res.append(re.escape(c))
return re.compile(r"(?:%s)\Z" % "".join(res), re.IGNORECASE | re.DOTALL)
| 32.789189 | 110 | 0.670129 |
import base64
import os
import re
from functools import partial
from elasticapm.conf import constants
from elasticapm.utils import compat, encoding
try:
from functools import partialmethod
partial_types = (partial, partialmethod)
except ImportError:
partial_types = (partial,)
default_ports = {"https": 443, "http": 80, "postgresql": 5432, "mysql": 3306, "mssql": 1433}
def varmap(func, var, context=None, name=None):
if context is None:
context = set()
objid = id(var)
if objid in context:
return func(name, "<...>")
context.add(objid)
if isinstance(var, dict):
ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))
elif isinstance(var, (list, tuple)):
ret = func(name, [varmap(func, f, context, name) for f in var])
else:
ret = func(name, var)
context.remove(objid)
return ret
def get_name_from_func(func):
if isinstance(func, partial_types):
return "partial({})".format(get_name_from_func(func.func))
elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
return "partial({})".format(get_name_from_func(func._partialmethod.func))
module = func.__module__
if hasattr(func, "__name__"):
view_name = func.__name__
else: # Fall back if there's no __name__
view_name = func.__class__.__name__
return "{0}.{1}".format(module, view_name)
def build_name_with_http_method_prefix(name, request):
return " ".join((request.method, name)) if name else name
def is_master_process():
try:
import uwsgi
return os.getpid() == uwsgi.masterpid()
except ImportError:
return False
def get_url_dict(url):
parse_result = compat.urlparse.urlparse(url)
url_dict = {
"full": encoding.keyword_field(url),
"protocol": parse_result.scheme + ":",
"hostname": encoding.keyword_field(parse_result.hostname),
"pathname": encoding.keyword_field(parse_result.path),
}
port = None if parse_result.port is None else str(parse_result.port)
if port:
url_dict["port"] = port
if parse_result.query:
url_dict["search"] = encoding.keyword_field("?" + parse_result.query)
return url_dict
def sanitize_url(url):
if "@" not in url:
return url
parts = compat.urlparse.urlparse(url)
return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
def get_host_from_url(url):
parsed_url = compat.urlparse.urlparse(url)
host = parsed_url.hostname or " "
if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
host += ":" + str(parsed_url.port)
return host
def url_to_destination(url, service_type="external"):
parts = compat.urlparse.urlsplit(url)
hostname = parts.hostname
if "://[" in url:
hostname = "[%s]" % hostname
try:
port = parts.port
except ValueError:
port = None
default_port = default_ports.get(parts.scheme, None)
name = "%s://%s" % (parts.scheme, hostname)
resource = hostname
if not port and parts.scheme in default_ports:
port = default_ports[parts.scheme]
if port:
if port != default_port:
name += ":%d" % port
resource += ":%d" % port
return {"service": {"name": name, "resource": resource, "type": service_type}}
def read_pem_file(file_obj):
cert = b""
for line in file_obj:
if line.startswith(b"-----BEGIN CERTIFICATE-----"):
break
for line in file_obj:
if not line.startswith(b"-----END CERTIFICATE-----"):
cert += line.strip()
return base64.b64decode(cert)
def starmatch_to_regex(pattern):
i, n = 0, len(pattern)
res = []
while i < n:
c = pattern[i]
i = i + 1
if c == "*":
res.append(".*")
else:
res.append(re.escape(c))
return re.compile(r"(?:%s)\Z" % "".join(res), re.IGNORECASE | re.DOTALL)
| true | true |
1c369903477e7937c7eaa59d42f3fc714b291ead | 16,475 | py | Python | contrib/tools/python3/src/Lib/distutils/cygwinccompiler.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | contrib/tools/python3/src/Lib/distutils/cygwinccompiler.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | contrib/tools/python3/src/Lib/distutils/cygwinccompiler.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate an import library for its dll
# - create a def-file for python??.dll
# - create an import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='gcc -O -Wall',
compiler_so='gcc -mdll -O -Wall',
compiler_cxx='g++ -O -Wall',
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using an unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
"""Find the version of an executable by running `cmd` in the shell.
If the command is not found, or the output does not match
`RE_VERSION`, returns None.
"""
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
| 40.578818 | 79 | 0.597997 |
# --export-all-symbols because it doesn't worked reliable in some
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
return ['msvcr70']
elif msc_ver == '1310':
return ['msvcr71']
elif msc_ver == '1400':
return ['msvcr80']
elif msc_ver == '1500':
return ['msvcr90']
elif msc_ver == '1600':
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
if self.gcc_version == "2.91.57":
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
objects.append(def_file)
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None,
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
class Mingw32CCompiler(CygwinCCompiler):
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='gcc -O -Wall',
compiler_so='gcc -mdll -O -Wall',
compiler_cxx='g++ -O -Wall',
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
self.dll_libraries=[]
self.dll_libraries = get_msvcr()
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
| true | true |
1c3699abd03c5d1dd5affb0ed50c17ce47d6c208 | 914 | py | Python | userbot/modules/fun/scramble.py | ZJRDroid/PaperplaneRemix | b41dffad17cce076f174a3fb36f0aeba1177cefa | [
"MIT"
] | null | null | null | userbot/modules/fun/scramble.py | ZJRDroid/PaperplaneRemix | b41dffad17cce076f174a3fb36f0aeba1177cefa | [
"MIT"
] | null | null | null | userbot/modules/fun/scramble.py | ZJRDroid/PaperplaneRemix | b41dffad17cce076f174a3fb36f0aeba1177cefa | [
"MIT"
] | 1 | 2019-12-21T03:44:42.000Z | 2019-12-21T03:44:42.000Z | import random
import re
from ..help import add_help_item
from userbot.events import register
@register(outgoing=True, pattern=r"^\.scramble(\s+[\S\s]+|$)")
async def scramble_message(e):
reply_message = await e.get_reply_message()
text = e.pattern_match.group(1) or reply_message.text
words = re.split(r"\s", text)
scrambled = map(scramble_word, words)
text = ' '.join(scrambled)
await e.edit(text)
def scramble_word(word):
if len(word) < 4:
return word
first_letter = word[0]
last_letter = word[-1]
middle_letters = list(word[1:-1])
random.shuffle(middle_letters)
return first_letter + ''.join(middle_letters) + last_letter
add_help_item(
".scramble",
"Fun",
"Scrambles a message in a way that the human "
"brain can still understand.",
"""
`.scramble (message)`
Or, in reply to a message
`.scramble`
"""
)
| 21.761905 | 63 | 0.650985 | import random
import re
from ..help import add_help_item
from userbot.events import register
@register(outgoing=True, pattern=r"^\.scramble(\s+[\S\s]+|$)")
async def scramble_message(e):
reply_message = await e.get_reply_message()
text = e.pattern_match.group(1) or reply_message.text
words = re.split(r"\s", text)
scrambled = map(scramble_word, words)
text = ' '.join(scrambled)
await e.edit(text)
def scramble_word(word):
if len(word) < 4:
return word
first_letter = word[0]
last_letter = word[-1]
middle_letters = list(word[1:-1])
random.shuffle(middle_letters)
return first_letter + ''.join(middle_letters) + last_letter
add_help_item(
".scramble",
"Fun",
"Scrambles a message in a way that the human "
"brain can still understand.",
"""
`.scramble (message)`
Or, in reply to a message
`.scramble`
"""
)
| true | true |
1c369ad7655b31ef6a1806353fd7ab1425ed1954 | 8,191 | py | Python | lofarimaging/lofarimaging.py | lofar-astron/lofarimaging | 9672b52bb9be8f3405e6e3f85701175bdc4bf211 | [
"Apache-2.0"
] | 5 | 2020-03-14T02:48:38.000Z | 2022-02-18T12:19:22.000Z | lofarimaging/lofarimaging.py | lofar-astron/lofarimaging | 9672b52bb9be8f3405e6e3f85701175bdc4bf211 | [
"Apache-2.0"
] | 1 | 2020-06-16T15:55:50.000Z | 2020-06-16T15:55:50.000Z | lofarimaging/lofarimaging.py | lofar-astron/lofarimaging | 9672b52bb9be8f3405e6e3f85701175bdc4bf211 | [
"Apache-2.0"
] | 3 | 2020-03-13T20:13:02.000Z | 2020-10-08T07:36:51.000Z | """Functions for working with LOFAR single station data"""
from typing import Dict, List
import numpy as np
from numpy.linalg import norm, lstsq
import numexpr as ne
import numba
from astropy.coordinates import SkyCoord, SkyOffsetFrame, CartesianRepresentation
__all__ = ["nearfield_imager", "sky_imager", "ground_imager", "skycoord_to_lmn", "calibrate", "simulate_sky_source",
"subtract_sources"]
__version__ = "1.5.0"
SPEED_OF_LIGHT = 299792458.0
def skycoord_to_lmn(pos: SkyCoord, phasecentre: SkyCoord):
"""
Convert astropy sky coordinates into the l,m,n coordinate system
relative to a phase centre.
The l,m,n is a RHS coordinate system with
* its origin on the sky sphere
* m,n and the celestial north on the same plane
* l,m a tangential plane of the sky sphere
Note that this means that l increases east-wards
This function was taken from https://github.com/SKA-ScienceDataProcessor/algorithm-reference-library
"""
# Determine relative sky position
todc = pos.transform_to(SkyOffsetFrame(origin=phasecentre))
dc = todc.represent_as(CartesianRepresentation)
dc /= dc.norm()
# Do coordinate transformation - astropy's relative coordinates do
# not quite follow imaging conventions
return dc.y.value, dc.z.value, dc.x.value - 1
@numba.jit(parallel=True)
def sky_imager(visibilities, baselines, freq, npix_l, npix_m):
"""
Sky imager
Args:
visibilities: Numpy array with visibilities, shape [num_antennas x num_antennas]
baselines: Numpy array with distances between antennas, shape [num_antennas, num_antennas, 3]
freq: frequency
npix_l: Number of pixels in l-direction
npix_m: Number of pixels in m-direction
Returns:
np.array(float): Real valued array of shape [npix_l, npix_m]
"""
img = np.zeros((npix_m, npix_l), dtype=np.complex128)
for m_ix in range(npix_m):
m = -1 + m_ix * 2 / npix_m
for l_ix in range(npix_l):
l = 1 - l_ix * 2 / npix_l
img[m_ix, l_ix] = np.mean(visibilities * np.exp(-2j * np.pi * freq *
(baselines[:, :, 0] * l + baselines[:, :, 1] * m) /
SPEED_OF_LIGHT))
return np.real(img)
def ground_imager(visibilities, freq, npix_p, npix_q, dims, station_pqr, height=1.5):
"""Do a Fourier transform for ground imaging"""
img = np.zeros([npix_q, npix_p], dtype=np.complex128)
for q_ix, q in enumerate(np.linspace(dims[2], dims[3], npix_q)):
for p_ix, p in enumerate(np.linspace(dims[0], dims[1], npix_p)):
r = height
pqr = np.array([p, q, r], dtype=np.float32)
antdist = np.linalg.norm(station_pqr - pqr[np.newaxis, :], axis=1)
groundbase = antdist[:, np.newaxis] - antdist[np.newaxis, :]
img[q_ix, p_ix] = np.mean(visibilities * np.exp(-2j * np.pi * freq * (-groundbase) / SPEED_OF_LIGHT))
return img
def nearfield_imager(visibilities, baseline_indices, freqs, npix_p, npix_q, extent, station_pqr, height=1.5,
max_memory_mb=200):
"""
Nearfield imager
Args:
visibilities: Numpy array with visibilities, shape [num_visibilities x num_frequencies]
baseline_indices: List with tuples of antenna numbers in visibilities, shape [2 x num_visibilities]
freqs: List of frequencies
npix_p: Number of pixels in p-direction
npix_q: Number of pixels in q-direction
extent: Extent (in m) that the image should span
station_pqr: PQR coordinates of stations
height: Height of image in metre
max_memory_mb: Maximum amount of memory to use for the biggest array. Higher may improve performance.
Returns:
np.array(complex): Complex valued array of shape [npix_p, npix_q]
"""
z = height
x = np.linspace(extent[0], extent[1], npix_p)
y = np.linspace(extent[2], extent[3], npix_q)
posx, posy = np.meshgrid(x, y)
posxyz = np.transpose(np.array([posx, posy, z * np.ones_like(posx)]), [1, 2, 0])
diff_vectors = (station_pqr[:, None, None, :] - posxyz[None, :, :, :])
distances = np.linalg.norm(diff_vectors, axis=3)
vis_chunksize = max_memory_mb * 1024 * 1024 // (8 * npix_p * npix_q)
bl_diff = np.zeros((vis_chunksize, npix_q, npix_p), dtype=np.float64)
img = np.zeros((npix_q, npix_p), dtype=np.complex128)
for vis_chunkstart in range(0, len(baseline_indices), vis_chunksize):
vis_chunkend = min(vis_chunkstart + vis_chunksize, baseline_indices.shape[0])
# For the last chunk, bl_diff_chunk is a bit smaller than bl_diff
bl_diff_chunk = bl_diff[:vis_chunkend - vis_chunkstart, :]
np.add(distances[baseline_indices[vis_chunkstart:vis_chunkend, 0]],
-distances[baseline_indices[vis_chunkstart:vis_chunkend, 1]], out=bl_diff_chunk)
j2pi = 1j * 2 * np.pi
for ifreq, freq in enumerate(freqs):
v = visibilities[vis_chunkstart:vis_chunkend, ifreq][:, None, None]
lamb = SPEED_OF_LIGHT / freq
# v[:,np.newaxis,np.newaxis]*np.exp(-2j*np.pi*freq/c*groundbase_pixels[:,:,:]/c)
# groundbase_pixels=nvis x npix x npix
np.add(img, np.sum(ne.evaluate("v * exp(j2pi * bl_diff_chunk / lamb)"), axis=0), out=img)
img /= len(freqs) * len(baseline_indices)
return img
def calibrate(vis, modelvis, maxiter=30, amplitudeonly=True):
"""
Calibrate and subtract some sources
Args:
vis: visibility matrix, shape [n_st, n_st]
modelvis: model visibility matrices, shape [n_dir, n_st, n_st]
maxiter: max iterations (default 30)
amplitudeonly: fit only amplitudes (default True)
Returns:
residual: visibilities with calibrated directions subtracted, shape [n_st, n_st]
gains: gains, shape [n_dir, n_st]
"""
nst = vis.shape[1]
ndir = np.array(modelvis).shape[0]
gains = np.ones([ndir, nst], dtype=np.complex)
if ndir == 0:
return vis, gains
else:
gains *= np.sqrt(norm(vis) / norm(modelvis))
iteration = 0
while iteration < maxiter:
iteration += 1
gains_prev = gains.copy()
for k in range(nst):
z = np.conj(gains_prev) * np.array(modelvis)[:, :, k]
gains[:, k] = lstsq(z.T, vis[:, k], rcond=None)[0]
if amplitudeonly:
gains = np.abs(gains).astype(np.complex)
if iteration % 2 == 0 and iteration > 0:
dg = norm(gains - gains_prev)
residual = vis.copy()
for d in range(ndir):
residual -= np.diag(np.conj(gains[d])) @ modelvis[d] @ np.diag(gains[d])
gains = 0.5 * gains + 0.5 * gains_prev
return residual, gains
def simulate_sky_source(lmn_coord: np.array, baselines: np.array, freq: float):
"""
Simulate visibilities for a sky source
Args:
lmn_coord (np.array): l, m, n coordinate
baselines (np.array): baseline distances in metres, shape (n_ant, n_ant)
freq (float): Frequency in Hz
"""
return np.exp(2j * np.pi * freq * baselines.dot(np.array(lmn_coord)) / SPEED_OF_LIGHT)
def subtract_sources(vis: np.array, baselines: np.array, freq: float, lmn_dict: Dict[str, np.array],
sources=["Cas A", "Cyg A", "Sun"]):
"""
Subtract sky sources from visibilities
Args:
vis (np.array): visibility matrix, shape [n_ant, n_ant]
lmn_dict (Dict[str, np.array]): dictionary with lmn coordinates
baselines (np.array): baseline distances in metres, shape (n_ant, n_ant)
freq (float): Frequency in Hz
sources (List[str]): list with source names to subtract (should all be in lmn_dict).
Default ["Cas A", "Sun"]
Returns:
vis (np.array): visibility matrix with sources subtracted
"""
modelvis = [simulate_sky_source(lmn_dict[srcname], baselines, freq) for srcname in lmn_dict
if srcname in sources]
residual, _ = calibrate(vis, modelvis)
return residual
| 38.455399 | 116 | 0.636308 |
from typing import Dict, List
import numpy as np
from numpy.linalg import norm, lstsq
import numexpr as ne
import numba
from astropy.coordinates import SkyCoord, SkyOffsetFrame, CartesianRepresentation
__all__ = ["nearfield_imager", "sky_imager", "ground_imager", "skycoord_to_lmn", "calibrate", "simulate_sky_source",
"subtract_sources"]
__version__ = "1.5.0"
SPEED_OF_LIGHT = 299792458.0
def skycoord_to_lmn(pos: SkyCoord, phasecentre: SkyCoord):
todc = pos.transform_to(SkyOffsetFrame(origin=phasecentre))
dc = todc.represent_as(CartesianRepresentation)
dc /= dc.norm()
# not quite follow imaging conventions
return dc.y.value, dc.z.value, dc.x.value - 1
@numba.jit(parallel=True)
def sky_imager(visibilities, baselines, freq, npix_l, npix_m):
img = np.zeros((npix_m, npix_l), dtype=np.complex128)
for m_ix in range(npix_m):
m = -1 + m_ix * 2 / npix_m
for l_ix in range(npix_l):
l = 1 - l_ix * 2 / npix_l
img[m_ix, l_ix] = np.mean(visibilities * np.exp(-2j * np.pi * freq *
(baselines[:, :, 0] * l + baselines[:, :, 1] * m) /
SPEED_OF_LIGHT))
return np.real(img)
def ground_imager(visibilities, freq, npix_p, npix_q, dims, station_pqr, height=1.5):
img = np.zeros([npix_q, npix_p], dtype=np.complex128)
for q_ix, q in enumerate(np.linspace(dims[2], dims[3], npix_q)):
for p_ix, p in enumerate(np.linspace(dims[0], dims[1], npix_p)):
r = height
pqr = np.array([p, q, r], dtype=np.float32)
antdist = np.linalg.norm(station_pqr - pqr[np.newaxis, :], axis=1)
groundbase = antdist[:, np.newaxis] - antdist[np.newaxis, :]
img[q_ix, p_ix] = np.mean(visibilities * np.exp(-2j * np.pi * freq * (-groundbase) / SPEED_OF_LIGHT))
return img
def nearfield_imager(visibilities, baseline_indices, freqs, npix_p, npix_q, extent, station_pqr, height=1.5,
max_memory_mb=200):
z = height
x = np.linspace(extent[0], extent[1], npix_p)
y = np.linspace(extent[2], extent[3], npix_q)
posx, posy = np.meshgrid(x, y)
posxyz = np.transpose(np.array([posx, posy, z * np.ones_like(posx)]), [1, 2, 0])
diff_vectors = (station_pqr[:, None, None, :] - posxyz[None, :, :, :])
distances = np.linalg.norm(diff_vectors, axis=3)
vis_chunksize = max_memory_mb * 1024 * 1024 // (8 * npix_p * npix_q)
bl_diff = np.zeros((vis_chunksize, npix_q, npix_p), dtype=np.float64)
img = np.zeros((npix_q, npix_p), dtype=np.complex128)
for vis_chunkstart in range(0, len(baseline_indices), vis_chunksize):
vis_chunkend = min(vis_chunkstart + vis_chunksize, baseline_indices.shape[0])
# For the last chunk, bl_diff_chunk is a bit smaller than bl_diff
bl_diff_chunk = bl_diff[:vis_chunkend - vis_chunkstart, :]
np.add(distances[baseline_indices[vis_chunkstart:vis_chunkend, 0]],
-distances[baseline_indices[vis_chunkstart:vis_chunkend, 1]], out=bl_diff_chunk)
j2pi = 1j * 2 * np.pi
for ifreq, freq in enumerate(freqs):
v = visibilities[vis_chunkstart:vis_chunkend, ifreq][:, None, None]
lamb = SPEED_OF_LIGHT / freq
# v[:,np.newaxis,np.newaxis]*np.exp(-2j*np.pi*freq/c*groundbase_pixels[:,:,:]/c)
# groundbase_pixels=nvis x npix x npix
np.add(img, np.sum(ne.evaluate("v * exp(j2pi * bl_diff_chunk / lamb)"), axis=0), out=img)
img /= len(freqs) * len(baseline_indices)
return img
def calibrate(vis, modelvis, maxiter=30, amplitudeonly=True):
nst = vis.shape[1]
ndir = np.array(modelvis).shape[0]
gains = np.ones([ndir, nst], dtype=np.complex)
if ndir == 0:
return vis, gains
else:
gains *= np.sqrt(norm(vis) / norm(modelvis))
iteration = 0
while iteration < maxiter:
iteration += 1
gains_prev = gains.copy()
for k in range(nst):
z = np.conj(gains_prev) * np.array(modelvis)[:, :, k]
gains[:, k] = lstsq(z.T, vis[:, k], rcond=None)[0]
if amplitudeonly:
gains = np.abs(gains).astype(np.complex)
if iteration % 2 == 0 and iteration > 0:
dg = norm(gains - gains_prev)
residual = vis.copy()
for d in range(ndir):
residual -= np.diag(np.conj(gains[d])) @ modelvis[d] @ np.diag(gains[d])
gains = 0.5 * gains + 0.5 * gains_prev
return residual, gains
def simulate_sky_source(lmn_coord: np.array, baselines: np.array, freq: float):
return np.exp(2j * np.pi * freq * baselines.dot(np.array(lmn_coord)) / SPEED_OF_LIGHT)
def subtract_sources(vis: np.array, baselines: np.array, freq: float, lmn_dict: Dict[str, np.array],
sources=["Cas A", "Cyg A", "Sun"]):
modelvis = [simulate_sky_source(lmn_dict[srcname], baselines, freq) for srcname in lmn_dict
if srcname in sources]
residual, _ = calibrate(vis, modelvis)
return residual
| true | true |
1c369b2ba4f2284a15f4bc84084547d05fa280f0 | 799 | py | Python | atom/nucleus/python/test/test_cash.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_cash.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_cash.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.cash import Cash # noqa: E501
from nucleus_api.rest import ApiException
class TestCash(unittest.TestCase):
"""Cash unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCash(self):
"""Test Cash"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.cash.Cash() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 19.487805 | 79 | 0.670839 |
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.cash import Cash
from nucleus_api.rest import ApiException
class TestCash(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCash(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c369bd018d66ebfeb25445a3c74ee8298cf1795 | 3,947 | bzl | Python | kustomization.bzl | nateinaction/rules_kustomize | 8fdb286e69216a979e432c5f2166426a390dd92e | [
"Apache-2.0"
] | null | null | null | kustomization.bzl | nateinaction/rules_kustomize | 8fdb286e69216a979e432c5f2166426a390dd92e | [
"Apache-2.0"
] | null | null | null | kustomization.bzl | nateinaction/rules_kustomize | 8fdb286e69216a979e432c5f2166426a390dd92e | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 BenchSci Analytics Inc.
# Copyright 2021 Nate Gay
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"rules_kustomize"
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:shell.bzl", "shell")
load("//:kustomize_image.bzl", "ImageInfo")
def _impl(ctx):
# Create unhydrated yaml
unhydrated_file = ctx.actions.declare_file('new/{}-unhydrated.yaml'.format(ctx.attr.name))
unhydrated_dir = paths.dirname(ctx.attr.kustomization_yaml.files.to_list()[0].path)
unhydrated_args = ctx.actions.args()
unhydrated_args.add('build', unhydrated_dir)
unhydrated_args.add('--load-restrictor', 'LoadRestrictionsNone')
unhydrated_args.add('--output', unhydrated_file.path)
ctx.actions.run(
inputs = [file for target in ctx.attr.srcs for file in target.files.to_list()],
outputs = [unhydrated_file],
arguments = [unhydrated_args],
executable = ctx.executable._kustomize,
)
# Create kustomization.yaml
kustomization_file = ctx.actions.declare_file('new/kustomization.yaml')
yaml = [
'apiVersion: kustomize.config.k8s.io/v1beta1',
'kind: Kustomization',
'resources:',
'- {}'.format(paths.basename(unhydrated_file.path)),
]
if ctx.attr.images:
yaml.append('images:')
yaml.append('$(cat {})'.format(' '.join([shell.quote(image[ImageInfo].partial.path) for image in ctx.attr.images])))
formatted_yaml = '\n'.join(yaml)
ctx.actions.run_shell(
inputs = [image[ImageInfo].partial for image in ctx.attr.images],
outputs = [kustomization_file],
arguments = [],
command = 'printf "{}\n" > "{}"'.format(formatted_yaml, kustomization_file.path),
)
# Create hydrated yaml
hydrated_args = ctx.actions.args()
hydrated_args.add('build', paths.dirname(kustomization_file.path))
hydrated_args.add('--load-restrictor', 'LoadRestrictionsNone')
hydrated_args.add('--output', ctx.outputs.hydrated.path)
ctx.actions.run(
inputs = [unhydrated_file, kustomization_file],
outputs = [ctx.outputs.hydrated],
arguments = [hydrated_args],
executable = ctx.executable._kustomize,
)
kustomization_ = rule(
attrs = dicts.add({
'kustomization_yaml': attr.label(
doc = 'Kustomization yaml file to build',
allow_single_file = True,
mandatory = True,
),
'srcs': attr.label_list(
doc = 'Source inputs to run `kustomize build` against. Note that the Bazel glob() function can be used to specify which source files to include and which to exclude, e.g. `glob(["*.yaml"], exclude=["golden.yaml"])`.',
cfg = 'host',
mandatory = True,
allow_files = True,
),
'images': attr.label_list(
doc = 'A list of kustomize_image labels.',
cfg = 'host',
mandatory = False,
allow_files = True,
providers = [ImageInfo]
),
'_kustomize': attr.label(
default = '@kustomize//:file',
cfg = 'host',
executable = True,
)
# "tags": attr.list(
# default = ["block-network"],
# )
}),
implementation = _impl,
outputs = {
'hydrated': '%{name}.yaml',
},
)
def kustomization(**kwargs):
kustomization_(**kwargs)
| 37.235849 | 229 | 0.636686 |
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:shell.bzl", "shell")
load("//:kustomize_image.bzl", "ImageInfo")
def _impl(ctx):
unhydrated_file = ctx.actions.declare_file('new/{}-unhydrated.yaml'.format(ctx.attr.name))
unhydrated_dir = paths.dirname(ctx.attr.kustomization_yaml.files.to_list()[0].path)
unhydrated_args = ctx.actions.args()
unhydrated_args.add('build', unhydrated_dir)
unhydrated_args.add('--load-restrictor', 'LoadRestrictionsNone')
unhydrated_args.add('--output', unhydrated_file.path)
ctx.actions.run(
inputs = [file for target in ctx.attr.srcs for file in target.files.to_list()],
outputs = [unhydrated_file],
arguments = [unhydrated_args],
executable = ctx.executable._kustomize,
)
kustomization_file = ctx.actions.declare_file('new/kustomization.yaml')
yaml = [
'apiVersion: kustomize.config.k8s.io/v1beta1',
'kind: Kustomization',
'resources:',
'- {}'.format(paths.basename(unhydrated_file.path)),
]
if ctx.attr.images:
yaml.append('images:')
yaml.append('$(cat {})'.format(' '.join([shell.quote(image[ImageInfo].partial.path) for image in ctx.attr.images])))
formatted_yaml = '\n'.join(yaml)
ctx.actions.run_shell(
inputs = [image[ImageInfo].partial for image in ctx.attr.images],
outputs = [kustomization_file],
arguments = [],
command = 'printf "{}\n" > "{}"'.format(formatted_yaml, kustomization_file.path),
)
hydrated_args = ctx.actions.args()
hydrated_args.add('build', paths.dirname(kustomization_file.path))
hydrated_args.add('--load-restrictor', 'LoadRestrictionsNone')
hydrated_args.add('--output', ctx.outputs.hydrated.path)
ctx.actions.run(
inputs = [unhydrated_file, kustomization_file],
outputs = [ctx.outputs.hydrated],
arguments = [hydrated_args],
executable = ctx.executable._kustomize,
)
kustomization_ = rule(
attrs = dicts.add({
'kustomization_yaml': attr.label(
doc = 'Kustomization yaml file to build',
allow_single_file = True,
mandatory = True,
),
'srcs': attr.label_list(
doc = 'Source inputs to run `kustomize build` against. Note that the Bazel glob() function can be used to specify which source files to include and which to exclude, e.g. `glob(["*.yaml"], exclude=["golden.yaml"])`.',
cfg = 'host',
mandatory = True,
allow_files = True,
),
'images': attr.label_list(
doc = 'A list of kustomize_image labels.',
cfg = 'host',
mandatory = False,
allow_files = True,
providers = [ImageInfo]
),
'_kustomize': attr.label(
default = '@kustomize//:file',
cfg = 'host',
executable = True,
)
}),
implementation = _impl,
outputs = {
'hydrated': '%{name}.yaml',
},
)
def kustomization(**kwargs):
kustomization_(**kwargs)
| true | true |
1c369e2a0d2e5bc13e9fb4d88242336baf419780 | 644 | py | Python | examples/train-1.py | SINeWang/magpie | 4c6d806969795a2d5fc52645fced08adaf686959 | [
"MIT"
] | null | null | null | examples/train-1.py | SINeWang/magpie | 4c6d806969795a2d5fc52645fced08adaf686959 | [
"MIT"
] | null | null | null | examples/train-1.py | SINeWang/magpie | 4c6d806969795a2d5fc52645fced08adaf686959 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.append(os.path.realpath(os.getcwd()))
sys.path.append("..")
from magpie import Magpie
magpie = Magpie()
magpie.train_word2vec('../data/hep-categories', vec_dim=3) #训练一个word2vec
magpie.fit_scaler('../data/hep-categories') #生成scaler
magpie.init_word_vectors('../data/hep-categories', vec_dim=3) #初始化词向量
labels = ['军事','旅游','政治'] #定义所有类别
magpie.train('../data/hep-categories', labels, test_ratio=0.2, epochs=20) #训练,20%数据作为测试数据,5轮
#保存训练后的模型文件
magpie.save_word2vec_model('../workspace/embeddings', overwrite=True)
magpie.save_scaler('../workspace/scaler', overwrite=True)
magpie.save_model('../workspace/model.h5') | 33.894737 | 92 | 0.746894 | import os
import sys
sys.path.append(os.path.realpath(os.getcwd()))
sys.path.append("..")
from magpie import Magpie
magpie = Magpie()
magpie.train_word2vec('../data/hep-categories', vec_dim=3)
magpie.fit_scaler('../data/hep-categories')
magpie.init_word_vectors('../data/hep-categories', vec_dim=3)
labels = ['军事','旅游','政治']
magpie.train('../data/hep-categories', labels, test_ratio=0.2, epochs=20)
magpie.save_word2vec_model('../workspace/embeddings', overwrite=True)
magpie.save_scaler('../workspace/scaler', overwrite=True)
magpie.save_model('../workspace/model.h5') | true | true |
1c369e5832adc50f438c555f56dfcb9a9431f342 | 5,501 | py | Python | solvers/generation_solver/img_interface.py | Anthony102899/Lego-ImageGenerator | 52b19c8bb20f77a3394675e7c037c943a50c1e15 | [
"Unlicense"
] | 1 | 2022-03-20T10:23:38.000Z | 2022-03-20T10:23:38.000Z | solvers/generation_solver/img_interface.py | Anthony102899/Lego-ImageGenerator | 52b19c8bb20f77a3394675e7c037c943a50c1e15 | [
"Unlicense"
] | null | null | null | solvers/generation_solver/img_interface.py | Anthony102899/Lego-ImageGenerator | 52b19c8bb20f77a3394675e7c037c943a50c1e15 | [
"Unlicense"
] | null | null | null | import os
from tkinter import *
import tkinter.filedialog as tkfd
from PIL import Image
import numpy as np
import solvers.generation_solver.image_seperation as IS
def layer_interface(img_num):
layer_names = []
layer_nums = []
for k in range(img_num):
master = Toplevel()
master.title(f"Image number {k+1}")
master.geometry("+300+200")
# input image and layer
img_label = Label(master, text="Image").grid(row=0)
layer_label = Label(master, text="Layer").grid(row=1)
entry_img = Entry(master, width=30)
entry_layer = Entry(master, width=30)
entry_img.grid(row=0, column=1)
entry_layer.grid(row=1, column=1)
if k == img_num - 1:
Button(master, text='Done', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
else:
Button(master, text='Next', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
img_path = "inputs/images/"
img_path = os.path.join(os.path.dirname(__file__), img_path)
path = tkfd.askopenfilename(initialdir = img_path, title = "Select file", filetypes = (("png files","*.png"),("all files","*.*")))
entry_img.insert('0', os.path.basename(path))
image = Image.open(path)
img = PhotoImage(file=path)
width, height = img.width(), img.height()
if width > 250:
scale_w = int(round(width / 250, 0))
scale_h = int(round(height / 250, 0))
img = img.subsample(scale_w, scale_h)
if width < 250:
scale_w = int(round(250 / width, 0))
scale_h = int(round(250 / height, 0))
img = img.zoom(scale_w, scale_h)
Label(master, image=img).grid(row=2, column=1)
mainloop()
img_name = entry_img.get()
img_layer = entry_layer.get()
layer_names.append(img_name)
layer_nums.append(img_layer)
return layer_names, layer_nums
def show_interface():
root = Tk()
root.geometry("+300+300")
Label(root, text="Graph", font=("", 14, "bold", "underline"), fg='#696969').grid(row=0, sticky='w')
entry_graph = Entry(root, width=15)
entry_graph.grid(row=0, column=1)
graph_path = "connectivity/"
graph_path = os.path.join(os.path.dirname(__file__), graph_path)
path = tkfd.askopenfilename(initialdir = graph_path, title = "Select file", filetypes = (("pkl files", "*.pkl"), ("all files","*.*")))
entry_graph.insert('0', os.path.basename(path))
# input No. image and button
Label(root, text="Input image", font=("", 14, "bold", "underline"), fg='#696969').grid(row=1, sticky='w')
entry_file = Entry(root, width=15)
entry_file.grid(row=1, column=1)
entry_path = "inputs/images/"
entry_path = os.path.join(os.path.dirname(__file__), entry_path)
input_path = tkfd.askopenfilename(initialdir=entry_path, title="Select input image", filetypes=(("png files", "*.png"), ("jpg files", "*.jpg")))
entry_file.insert('0', os.path.basename(input_path))
Button(root, text='Next', command=root.quit).grid(row=1, column=2, sticky='e', pady=4)
# input background color
Label(root, text="").grid(row=2, column=1)
Label(root, text="Background color", font=("", 14, "bold", "underline"), fg='#696969').grid(row=3, sticky='w')
Label(root, text="R", fg='#4f4f4f').grid(row=4, column=0)
Label(root, text="G", fg='#4f4f4f').grid(row=4, column=1)
Label(root, text="B", fg='#4f4f4f').grid(row=4, column=2)
entry_r = Entry(root, width=15)
entry_g = Entry(root, width=15)
entry_b = Entry(root, width=15)
entry_r.grid(row=5, column=0)
entry_g.grid(row=5, column=1)
entry_b.grid(row=5, column=2)
# input rotation and scaling
Label(root, text="").grid(row=6, column=1)
Label(root, text="Rotation degree", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, sticky='w')
entry_degree = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_degree.grid(row=7, column=1)
Label(root, text="Scale", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, column=2)
entry_scale = Entry(root, width=15, textvariable=StringVar(root, value='1'))
entry_scale.grid(row=7, column=3)
# input translation
Label(root, text="").grid(row=8, column=1)
Label(root, text="x translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, sticky='w')
entry_x = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_x.grid(row=9, column=1)
Label(root, text="y translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, column=2)
entry_y = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_y.grid(row=9, column=3)
Label(root, text="").grid(row=9, column=1)
mainloop()
img_path = input_path
print(img_path)
img_num = IS.seperate_color(img_path, "./cache/")
r, g, b = entry_r.get(), entry_g.get(), entry_b.get()
if len(r) == 0:
r = 0
if len(g) == 0:
g = 0
if len(b) == 0:
b = 0
if r == 0 and g == 0 and b == 0:
rgb = []
else:
rgb = np.array((int(r), int(g), int(b)))
layer_names, layer_nums = layer_interface(img_num)
return entry_graph.get(), img_num, layer_names, layer_nums, rgb, int(entry_degree.get()), float(entry_scale.get()), int(entry_x.get()), int(entry_y.get())
if __name__ == '__main__':
print(show_interface()) | 42.315385 | 158 | 0.616797 | import os
from tkinter import *
import tkinter.filedialog as tkfd
from PIL import Image
import numpy as np
import solvers.generation_solver.image_seperation as IS
def layer_interface(img_num):
layer_names = []
layer_nums = []
for k in range(img_num):
master = Toplevel()
master.title(f"Image number {k+1}")
master.geometry("+300+200")
img_label = Label(master, text="Image").grid(row=0)
layer_label = Label(master, text="Layer").grid(row=1)
entry_img = Entry(master, width=30)
entry_layer = Entry(master, width=30)
entry_img.grid(row=0, column=1)
entry_layer.grid(row=1, column=1)
if k == img_num - 1:
Button(master, text='Done', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
else:
Button(master, text='Next', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
img_path = "inputs/images/"
img_path = os.path.join(os.path.dirname(__file__), img_path)
path = tkfd.askopenfilename(initialdir = img_path, title = "Select file", filetypes = (("png files","*.png"),("all files","*.*")))
entry_img.insert('0', os.path.basename(path))
image = Image.open(path)
img = PhotoImage(file=path)
width, height = img.width(), img.height()
if width > 250:
scale_w = int(round(width / 250, 0))
scale_h = int(round(height / 250, 0))
img = img.subsample(scale_w, scale_h)
if width < 250:
scale_w = int(round(250 / width, 0))
scale_h = int(round(250 / height, 0))
img = img.zoom(scale_w, scale_h)
Label(master, image=img).grid(row=2, column=1)
mainloop()
img_name = entry_img.get()
img_layer = entry_layer.get()
layer_names.append(img_name)
layer_nums.append(img_layer)
return layer_names, layer_nums
def show_interface():
root = Tk()
root.geometry("+300+300")
Label(root, text="Graph", font=("", 14, "bold", "underline"), fg='#696969').grid(row=0, sticky='w')
entry_graph = Entry(root, width=15)
entry_graph.grid(row=0, column=1)
graph_path = "connectivity/"
graph_path = os.path.join(os.path.dirname(__file__), graph_path)
path = tkfd.askopenfilename(initialdir = graph_path, title = "Select file", filetypes = (("pkl files", "*.pkl"), ("all files","*.*")))
entry_graph.insert('0', os.path.basename(path))
Label(root, text="Input image", font=("", 14, "bold", "underline"), fg='#696969').grid(row=1, sticky='w')
entry_file = Entry(root, width=15)
entry_file.grid(row=1, column=1)
entry_path = "inputs/images/"
entry_path = os.path.join(os.path.dirname(__file__), entry_path)
input_path = tkfd.askopenfilename(initialdir=entry_path, title="Select input image", filetypes=(("png files", "*.png"), ("jpg files", "*.jpg")))
entry_file.insert('0', os.path.basename(input_path))
Button(root, text='Next', command=root.quit).grid(row=1, column=2, sticky='e', pady=4)
Label(root, text="").grid(row=2, column=1)
Label(root, text="Background color", font=("", 14, "bold", "underline"), fg='#696969').grid(row=3, sticky='w')
Label(root, text="R", fg='#4f4f4f').grid(row=4, column=0)
Label(root, text="G", fg='#4f4f4f').grid(row=4, column=1)
Label(root, text="B", fg='#4f4f4f').grid(row=4, column=2)
entry_r = Entry(root, width=15)
entry_g = Entry(root, width=15)
entry_b = Entry(root, width=15)
entry_r.grid(row=5, column=0)
entry_g.grid(row=5, column=1)
entry_b.grid(row=5, column=2)
Label(root, text="").grid(row=6, column=1)
Label(root, text="Rotation degree", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, sticky='w')
entry_degree = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_degree.grid(row=7, column=1)
Label(root, text="Scale", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, column=2)
entry_scale = Entry(root, width=15, textvariable=StringVar(root, value='1'))
entry_scale.grid(row=7, column=3)
Label(root, text="").grid(row=8, column=1)
Label(root, text="x translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, sticky='w')
entry_x = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_x.grid(row=9, column=1)
Label(root, text="y translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, column=2)
entry_y = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_y.grid(row=9, column=3)
Label(root, text="").grid(row=9, column=1)
mainloop()
img_path = input_path
print(img_path)
img_num = IS.seperate_color(img_path, "./cache/")
r, g, b = entry_r.get(), entry_g.get(), entry_b.get()
if len(r) == 0:
r = 0
if len(g) == 0:
g = 0
if len(b) == 0:
b = 0
if r == 0 and g == 0 and b == 0:
rgb = []
else:
rgb = np.array((int(r), int(g), int(b)))
layer_names, layer_nums = layer_interface(img_num)
return entry_graph.get(), img_num, layer_names, layer_nums, rgb, int(entry_degree.get()), float(entry_scale.get()), int(entry_x.get()), int(entry_y.get())
if __name__ == '__main__':
print(show_interface()) | true | true |
1c369e6b83847416415f729c831355feb66a9dc4 | 809 | py | Python | tests/test_cli.py | gfogwill/dmps | d92826dbd37a363b01ea7a287edd56ae88326364 | [
"MIT"
] | null | null | null | tests/test_cli.py | gfogwill/dmps | d92826dbd37a363b01ea7a287edd56ae88326364 | [
"MIT"
] | null | null | null | tests/test_cli.py | gfogwill/dmps | d92826dbd37a363b01ea7a287edd56ae88326364 | [
"MIT"
] | null | null | null | import unittest
from click.testing import CliRunner
from src import __version__ as version
from src.cli import cli
class TestCLI(unittest.TestCase):
def test_cli(self):
"""Run `dmps` without arguments."""
result = CliRunner().invoke(cli, [])
assert result.exit_code == 0
assert "dmps" in result.output
def test_print_version(self):
"""Chech that `dmps --version` and `dmps -V` output contain
the current package version."""
result = CliRunner().invoke(cli, ["--version"])
assert result.exit_code == 0
assert version in result.output
result_abr = CliRunner().invoke(cli, ["-V"])
assert result_abr.exit_code == 0
assert version in result_abr.output
if __name__ == '__main__':
unittest.main()
| 24.515152 | 67 | 0.641533 | import unittest
from click.testing import CliRunner
from src import __version__ as version
from src.cli import cli
class TestCLI(unittest.TestCase):
def test_cli(self):
result = CliRunner().invoke(cli, [])
assert result.exit_code == 0
assert "dmps" in result.output
def test_print_version(self):
result = CliRunner().invoke(cli, ["--version"])
assert result.exit_code == 0
assert version in result.output
result_abr = CliRunner().invoke(cli, ["-V"])
assert result_abr.exit_code == 0
assert version in result_abr.output
if __name__ == '__main__':
unittest.main()
| true | true |
1c36a014dfc6dfdd8d2a673bfeb4c459ddd8bbc3 | 3,897 | py | Python | sensebert.py | AI21Labs/sense-bert | 32773c4da8ba23674978170598498fe0239ddb1a | [
"Apache-2.0"
] | 30 | 2020-07-13T17:29:48.000Z | 2022-03-29T12:20:55.000Z | sensebert.py | AI21Labs/sense-bert | 32773c4da8ba23674978170598498fe0239ddb1a | [
"Apache-2.0"
] | 8 | 2020-09-25T04:27:20.000Z | 2022-02-10T02:00:41.000Z | sensebert.py | AI21Labs/sense-bert | 32773c4da8ba23674978170598498fe0239ddb1a | [
"Apache-2.0"
] | 8 | 2020-07-30T10:51:19.000Z | 2022-02-01T19:35:10.000Z | import os
from collections import namedtuple
import tensorflow as tf
from tokenization import FullTokenizer
_SenseBertGraph = namedtuple(
'SenseBertGraph',
('input_ids', 'input_mask', 'contextualized_embeddings', 'mlm_logits', 'supersense_losits')
)
_MODEL_PATHS = {
'sensebert-base-uncased': 'gs://ai21-public-models/sensebert-base-uncased',
'sensebert-large-uncased': 'gs://ai21-public-models/sensebert-large-uncased'
}
_CONTEXTUALIZED_EMBEDDINGS_TENSOR_NAME = "bert/encoder/Reshape_13:0"
def _get_model_path(name_or_path, is_tokenizer=False):
if name_or_path in _MODEL_PATHS:
print(f"Loading the known {'tokenizer' if is_tokenizer else 'model'} '{name_or_path}'")
model_path = _MODEL_PATHS[name_or_path]
else:
print(f"This is not a known {'tokenizer' if is_tokenizer else 'model'}. "
f"Assuming {name_or_path} is a path or a url...")
model_path = name_or_path
return model_path
def load_tokenizer(name_or_path):
model_path = _get_model_path(name_or_path, is_tokenizer=True)
vocab_file = os.path.join(model_path, "vocab.txt")
supersense_vocab_file = os.path.join(model_path, "supersense_vocab.txt")
return FullTokenizer(vocab_file=vocab_file, senses_file=supersense_vocab_file)
def _load_model(name_or_path, session=None):
if session is None:
session = tf.get_default_session()
model = tf.saved_model.load(export_dir=_get_model_path(name_or_path), sess=session, tags=[tf.saved_model.SERVING])
serve_def = model.signature_def[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
inputs, outputs = ({key: session.graph.get_tensor_by_name(info.name) for key, info in puts.items()}
for puts in (serve_def.inputs, serve_def.outputs))
return _SenseBertGraph(
input_ids=inputs['input_ids'],
input_mask=inputs['input_mask'],
contextualized_embeddings=session.graph.get_tensor_by_name(_CONTEXTUALIZED_EMBEDDINGS_TENSOR_NAME),
supersense_losits=outputs['ss'],
mlm_logits=outputs['masked_lm']
)
class SenseBert:
def __init__(self, name_or_path, max_seq_length=512, session=None):
self.max_seq_length = max_seq_length
self.session = session if session else tf.get_default_session()
self.model = _load_model(name_or_path, session=self.session)
self.tokenizer = load_tokenizer(name_or_path)
def tokenize(self, inputs):
"""
Gets a string or a list of strings, and returns a tuple (input_ids, input_mask) to use as inputs for SenseBERT.
Both share the same shape: [batch_size, sequence_length] where sequence_length is the maximal sequence length.
"""
if isinstance(inputs, str):
inputs = [inputs]
# tokenizing all inputs
all_token_ids = []
for inp in inputs:
tokens = [self.tokenizer.start_sym] + self.tokenizer.tokenize(inp)[0] + [self.tokenizer.end_sym]
assert len(tokens) <= self.max_seq_length
all_token_ids.append(self.tokenizer.convert_tokens_to_ids(tokens))
# decide the maximum sequence length and pad accordingly
max_len = max([len(token_ids) for token_ids in all_token_ids])
input_ids, input_mask = [], []
pad_sym_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_sym])
for token_ids in all_token_ids:
to_pad = max_len - len(token_ids)
input_ids.append(token_ids + pad_sym_id * to_pad)
input_mask.append([1] * len(token_ids) + [0] * to_pad)
return input_ids, input_mask
def run(self, input_ids, input_mask):
return self.session.run(
[self.model.contextualized_embeddings, self.model.mlm_logits, self.model.supersense_losits],
feed_dict={self.model.input_ids: input_ids, self.model.input_mask: input_mask}
)
| 41.457447 | 119 | 0.702335 | import os
from collections import namedtuple
import tensorflow as tf
from tokenization import FullTokenizer
_SenseBertGraph = namedtuple(
'SenseBertGraph',
('input_ids', 'input_mask', 'contextualized_embeddings', 'mlm_logits', 'supersense_losits')
)
_MODEL_PATHS = {
'sensebert-base-uncased': 'gs://ai21-public-models/sensebert-base-uncased',
'sensebert-large-uncased': 'gs://ai21-public-models/sensebert-large-uncased'
}
_CONTEXTUALIZED_EMBEDDINGS_TENSOR_NAME = "bert/encoder/Reshape_13:0"
def _get_model_path(name_or_path, is_tokenizer=False):
if name_or_path in _MODEL_PATHS:
print(f"Loading the known {'tokenizer' if is_tokenizer else 'model'} '{name_or_path}'")
model_path = _MODEL_PATHS[name_or_path]
else:
print(f"This is not a known {'tokenizer' if is_tokenizer else 'model'}. "
f"Assuming {name_or_path} is a path or a url...")
model_path = name_or_path
return model_path
def load_tokenizer(name_or_path):
model_path = _get_model_path(name_or_path, is_tokenizer=True)
vocab_file = os.path.join(model_path, "vocab.txt")
supersense_vocab_file = os.path.join(model_path, "supersense_vocab.txt")
return FullTokenizer(vocab_file=vocab_file, senses_file=supersense_vocab_file)
def _load_model(name_or_path, session=None):
if session is None:
session = tf.get_default_session()
model = tf.saved_model.load(export_dir=_get_model_path(name_or_path), sess=session, tags=[tf.saved_model.SERVING])
serve_def = model.signature_def[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
inputs, outputs = ({key: session.graph.get_tensor_by_name(info.name) for key, info in puts.items()}
for puts in (serve_def.inputs, serve_def.outputs))
return _SenseBertGraph(
input_ids=inputs['input_ids'],
input_mask=inputs['input_mask'],
contextualized_embeddings=session.graph.get_tensor_by_name(_CONTEXTUALIZED_EMBEDDINGS_TENSOR_NAME),
supersense_losits=outputs['ss'],
mlm_logits=outputs['masked_lm']
)
class SenseBert:
def __init__(self, name_or_path, max_seq_length=512, session=None):
self.max_seq_length = max_seq_length
self.session = session if session else tf.get_default_session()
self.model = _load_model(name_or_path, session=self.session)
self.tokenizer = load_tokenizer(name_or_path)
def tokenize(self, inputs):
if isinstance(inputs, str):
inputs = [inputs]
all_token_ids = []
for inp in inputs:
tokens = [self.tokenizer.start_sym] + self.tokenizer.tokenize(inp)[0] + [self.tokenizer.end_sym]
assert len(tokens) <= self.max_seq_length
all_token_ids.append(self.tokenizer.convert_tokens_to_ids(tokens))
max_len = max([len(token_ids) for token_ids in all_token_ids])
input_ids, input_mask = [], []
pad_sym_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_sym])
for token_ids in all_token_ids:
to_pad = max_len - len(token_ids)
input_ids.append(token_ids + pad_sym_id * to_pad)
input_mask.append([1] * len(token_ids) + [0] * to_pad)
return input_ids, input_mask
def run(self, input_ids, input_mask):
return self.session.run(
[self.model.contextualized_embeddings, self.model.mlm_logits, self.model.supersense_losits],
feed_dict={self.model.input_ids: input_ids, self.model.input_mask: input_mask}
)
| true | true |
1c36a09dcf51bbbc1af03c87491000f450456c13 | 1,365 | py | Python | rptk/format/jsonf.py | wolcomm/rptk | fe6c1b597741ff14e4c89519458bb0950f0aa955 | [
"Apache-2.0"
] | 15 | 2017-11-30T01:28:11.000Z | 2021-08-12T09:17:36.000Z | rptk/format/jsonf.py | wolcomm/rptk | fe6c1b597741ff14e4c89519458bb0950f0aa955 | [
"Apache-2.0"
] | 71 | 2018-06-22T09:54:50.000Z | 2020-10-21T07:10:54.000Z | rptk/format/jsonf.py | wolcomm/rptk | fe6c1b597741ff14e4c89519458bb0950f0aa955 | [
"Apache-2.0"
] | 2 | 2019-08-31T20:45:19.000Z | 2019-10-02T18:26:58.000Z | # Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format.jsonf module."""
from __future__ import print_function
from __future__ import unicode_literals
import json
from rptk.format import BaseFormat
class JsonFormat(BaseFormat):
"""Renders result object as a JSON document."""
description = "JSON object"
content_type = "application/json"
def format(self, result=None):
"""Render output as JSON."""
self.log_method_enter(method=self.current_method)
super(self.__class__, self).format(result=result)
self.log.debug(msg="creating json output")
try:
output = json.dumps(result, indent=4)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output
| 34.125 | 79 | 0.711355 |
from __future__ import print_function
from __future__ import unicode_literals
import json
from rptk.format import BaseFormat
class JsonFormat(BaseFormat):
description = "JSON object"
content_type = "application/json"
def format(self, result=None):
self.log_method_enter(method=self.current_method)
super(self.__class__, self).format(result=result)
self.log.debug(msg="creating json output")
try:
output = json.dumps(result, indent=4)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output
| true | true |
1c36a126d10813046b4b085fc65220a3114442db | 4,202 | py | Python | server/api/covid_data_handler.py | will-riddy/Covid-Dashboard | f54b1e39435de59366163e5ffe394b47c86fd67a | [
"MIT"
] | null | null | null | server/api/covid_data_handler.py | will-riddy/Covid-Dashboard | f54b1e39435de59366163e5ffe394b47c86fd67a | [
"MIT"
] | null | null | null | server/api/covid_data_handler.py | will-riddy/Covid-Dashboard | f54b1e39435de59366163e5ffe394b47c86fd67a | [
"MIT"
] | null | null | null | '''Peforms all data handling for the covid data'''
import csv, os
from uk_covid19 import Cov19API
from requests.exceptions import ConnectionError
LOCATION = os.environ['LOCATION']
LOCATION_TYPE = os.environ['LOCATION_TYPE']
NATION = os.environ['NATION']
def parse_csv_data(csv_file : str) -> list:
'''reads from csv file and converts it into a list of headers and data'''
csv_list = []
with open(csv_file, 'r') as data:
data = csv.reader(data)
header = next(data)
[csv_list.append(row) for row in data]
csv_list.insert(0, header)
return csv_list
def process_covid_data(covid_csv_data : list) -> tuple[int, int, int]:
'''Processess data from the covid csv template'''
hosp_cases_i = covid_csv_data[0].index('hospitalCases')
deaths_i = covid_csv_data[0].index('cumDailyNsoDeathsByDeathDate')
cum_cases_i = covid_csv_data[0].index('newCasesBySpecimenDate')
hosp_cases_list = []
deaths_list = []
cum_cases_list = []
for i, row in enumerate(covid_csv_data[1:]): # skips first row
hosp_cases_list.append(row[hosp_cases_i])
deaths_list.append(row[deaths_i])
cum_cases_list.append(row[cum_cases_i])
cum_deaths = 0
for death in deaths_list:
if death != '':
cum_deaths = int(death)
break
cum_cases = 0
for case in cum_cases_list[2:9]:
cum_cases += int(case)
if hosp_cases_list[0] != '':
curr_hospital_cases = int(hosp_cases_list[0])
else:
curr_hospital_cases = 0
return cum_cases, curr_hospital_cases, cum_deaths
def covid_API_request(location : str = LOCATION, location_type : str = LOCATION_TYPE) -> dict:
'''Fetches all the covid data from the uk-covid api'''
# filters
filter = [
f'areaType={location_type}',
f'areaName={location}'
]
cases_and_deaths = {
"areaName": "areaName",
"areaCode": "areaCode",
"areaType": "areaType",
"date": "date",
"cumDailyNsoDeathsByDeathDate": "cumDailyNsoDeathsByDeathDate",
"hospitalCases": "hospitalCases",
"newCasesBySpecimenDate": "newCasesBySpecimenDate"
}
try:
api = Cov19API(filters=filter, structure=cases_and_deaths)
data = api.get_json()
except ConnectionError:
raise ConnectionError
# create dictionary sorted with dates as key
data_dic = {}
for line in data['data']:
data_dic[line['date']] = line
return data_dic
def process_covid_dic(covid_dic_data : dict) -> tuple[float, float, float]:
'''Processess covid data'''
seven_day_cases = 0
not_found_deaths = True
deaths_total = None
hospital_cases = None
not_found_hospital = True
day_rate_count = 0
for i, data in enumerate(covid_dic_data):
if covid_dic_data[data]['hospitalCases'] is not None and not_found_hospital:
hospital_cases = covid_dic_data[data]['hospitalCases']
not_found_hospital = False
# skips first day because the first day is incomplete
if i > 0 and i < 8:
seven_day_cases += covid_dic_data[data]['newCasesBySpecimenDate']
day_rate_count += 1
if covid_dic_data[data]['cumDailyNsoDeathsByDeathDate'] is not None and not_found_deaths:
deaths_total = covid_dic_data[data]['cumDailyNsoDeathsByDeathDate']
not_found_deaths = False
seven_day_rate = round(seven_day_cases / day_rate_count, 1)
return seven_day_rate, hospital_cases, deaths_total
# returns data at given time interval
def schedule_covid_updates(time : float = None, nation : str = NATION) -> None:
'''Used to schedule the covid data'''
global covid_data_all
seven_day, _, _ = process_covid_dic(covid_API_request())
seven_day_nation, hospital, deaths = process_covid_dic(covid_API_request(location=nation, location_type='nation'))
covid_data_all = (seven_day, seven_day_nation, hospital, deaths)
covid_data_all = None
if '__main__' == __name__:
covid_API_request() | 32.323077 | 119 | 0.652546 |
import csv, os
from uk_covid19 import Cov19API
from requests.exceptions import ConnectionError
LOCATION = os.environ['LOCATION']
LOCATION_TYPE = os.environ['LOCATION_TYPE']
NATION = os.environ['NATION']
def parse_csv_data(csv_file : str) -> list:
csv_list = []
with open(csv_file, 'r') as data:
data = csv.reader(data)
header = next(data)
[csv_list.append(row) for row in data]
csv_list.insert(0, header)
return csv_list
def process_covid_data(covid_csv_data : list) -> tuple[int, int, int]:
hosp_cases_i = covid_csv_data[0].index('hospitalCases')
deaths_i = covid_csv_data[0].index('cumDailyNsoDeathsByDeathDate')
cum_cases_i = covid_csv_data[0].index('newCasesBySpecimenDate')
hosp_cases_list = []
deaths_list = []
cum_cases_list = []
for i, row in enumerate(covid_csv_data[1:]):
hosp_cases_list.append(row[hosp_cases_i])
deaths_list.append(row[deaths_i])
cum_cases_list.append(row[cum_cases_i])
cum_deaths = 0
for death in deaths_list:
if death != '':
cum_deaths = int(death)
break
cum_cases = 0
for case in cum_cases_list[2:9]:
cum_cases += int(case)
if hosp_cases_list[0] != '':
curr_hospital_cases = int(hosp_cases_list[0])
else:
curr_hospital_cases = 0
return cum_cases, curr_hospital_cases, cum_deaths
def covid_API_request(location : str = LOCATION, location_type : str = LOCATION_TYPE) -> dict:
filter = [
f'areaType={location_type}',
f'areaName={location}'
]
cases_and_deaths = {
"areaName": "areaName",
"areaCode": "areaCode",
"areaType": "areaType",
"date": "date",
"cumDailyNsoDeathsByDeathDate": "cumDailyNsoDeathsByDeathDate",
"hospitalCases": "hospitalCases",
"newCasesBySpecimenDate": "newCasesBySpecimenDate"
}
try:
api = Cov19API(filters=filter, structure=cases_and_deaths)
data = api.get_json()
except ConnectionError:
raise ConnectionError
data_dic = {}
for line in data['data']:
data_dic[line['date']] = line
return data_dic
def process_covid_dic(covid_dic_data : dict) -> tuple[float, float, float]:
seven_day_cases = 0
not_found_deaths = True
deaths_total = None
hospital_cases = None
not_found_hospital = True
day_rate_count = 0
for i, data in enumerate(covid_dic_data):
if covid_dic_data[data]['hospitalCases'] is not None and not_found_hospital:
hospital_cases = covid_dic_data[data]['hospitalCases']
not_found_hospital = False
if i > 0 and i < 8:
seven_day_cases += covid_dic_data[data]['newCasesBySpecimenDate']
day_rate_count += 1
if covid_dic_data[data]['cumDailyNsoDeathsByDeathDate'] is not None and not_found_deaths:
deaths_total = covid_dic_data[data]['cumDailyNsoDeathsByDeathDate']
not_found_deaths = False
seven_day_rate = round(seven_day_cases / day_rate_count, 1)
return seven_day_rate, hospital_cases, deaths_total
def schedule_covid_updates(time : float = None, nation : str = NATION) -> None:
global covid_data_all
seven_day, _, _ = process_covid_dic(covid_API_request())
seven_day_nation, hospital, deaths = process_covid_dic(covid_API_request(location=nation, location_type='nation'))
covid_data_all = (seven_day, seven_day_nation, hospital, deaths)
covid_data_all = None
if '__main__' == __name__:
covid_API_request() | true | true |
1c36a1fefd2e1a9dfc70b3a72627449bab62991e | 5,970 | py | Python | pyepal/models/nt.py | kjappelbaum/pyepal | fde3172e2d0e945f4e63a0289f049285730b5f27 | [
"Apache-2.0"
] | 22 | 2020-11-06T00:00:06.000Z | 2022-02-17T15:42:19.000Z | pyepal/models/nt.py | kjappelbaum/pyepal | fde3172e2d0e945f4e63a0289f049285730b5f27 | [
"Apache-2.0"
] | 93 | 2020-11-03T20:19:46.000Z | 2022-03-04T17:23:29.000Z | pyepal/models/nt.py | kjappelbaum/pypal | 006be9440c3fcff223b1b2a3f98222d732c60a48 | [
"Apache-2.0"
] | 1 | 2021-06-01T03:37:39.000Z | 2021-06-01T03:37:39.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 PyePAL authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to build neutral tangents models for PALNT
Depending on the dataset there might be some issues with these models,
some tricks are listed in https://github.com/google/neural-tangents/issues/76
1. Use Erf as activation
2. Initialize the weights with larger standard deviation
3. Standardize the data
The first two points are done by default in the `build_dense_network` function
Note that following the law of total variance the prior, intialized via
W_std and b_std give an upper bound on the std of the posterior
"""
from dataclasses import dataclass
from typing import Callable, Sequence, Union
@dataclass
class NTModel:
"""Defining a dataclass for neural tangents models"""
# Initialization functions construct parameters for neural networks
# given a random key and an input shape.
init_fn: Callable
# Apply functions do computations with finite-width neural networks.
apply_fn: Callable
kernel_fn: Callable
predict_fn: Union[Callable, None] = None
scaler: Union[Callable, None] = None # Used to store Standard Scaler objects
params: Union[list, None] = None # Used to store parameters for the ensemble models
@dataclass
class JaxOptimizer:
"""Defining a dataclass for a JAX optimizer"""
opt_init: Callable
opt_update: Callable
get_params: Callable
__all__ = ["NTModel", "build_dense_network", "JaxOptimizer"]
def build_dense_network(
hidden_layers: Sequence[int],
activations: Union[Sequence, str] = "erf",
w_std: float = 2.5,
b_std=1,
) -> NTModel:
"""Utility function to build a simple feedforward network with the
neural tangents library.
Args:
hidden_layers (Sequence[int]): Iterable with the number of neurons.
For example, [512, 512]
activations (Union[Sequence, str], optional):
Iterable with neural_tangents.stax axtivations or "relu" or "erf".
Defaults to "erf".
w_std (float): Standard deviation of the weight distribution.
b_std (float): Standard deviation of the bias distribution.
Returns:
NTModel: jiited init, apply and
kernel functions, predict_function (None)
"""
from jax.config import config # pylint:disable=import-outside-toplevel
config.update("jax_enable_x64", True)
from jax import jit # pylint:disable=import-outside-toplevel
from neural_tangents import stax # pylint:disable=import-outside-toplevel
assert len(hidden_layers) >= 1, "You must provide at least one hidden layer"
if activations is None:
activations = [stax.Relu() for _ in hidden_layers]
elif isinstance(activations, str):
if activations.lower() == "relu":
activations = [stax.Relu() for _ in hidden_layers]
elif activations.lower() == "erf":
activations = [stax.Erf() for _ in hidden_layers]
else:
for activation in activations:
assert callable(
activation
), "You need to provide `neural_tangents.stax` activations"
assert len(activations) == len(
hidden_layers
), "The number of hidden layers should match the number of nonlinearities"
stack = []
for hidden_layer, activation in zip(hidden_layers, activations):
stack.append(stax.Dense(hidden_layer, W_std=w_std, b_std=b_std))
stack.append(activation)
stack.append(stax.Dense(1, W_std=w_std, b_std=b_std))
init_fn, apply_fn, kernel_fn = stax.serial(*stack)
return NTModel(init_fn, jit(apply_fn), jit(kernel_fn, static_argnums=(2,)), None)
def get_optimizer(
learning_rate: float = 1e-4, optimizer="sdg", optimizer_kwargs: dict = None
) -> JaxOptimizer:
"""Return a `JaxOptimizer` dataclass for a JAX optimizer
Args:
learning_rate (float, optional): Step size. Defaults to 1e-4.
optimizer (str, optional): Optimizer type (Allowed types: "adam",
"adamax", "adagrad", "rmsprop", "sdg"). Defaults to "sdg".
optimizer_kwargs (dict, optional): Additional keyword arguments
that are passed to the optimizer. Defaults to None.
Returns:
JaxOptimizer
"""
from jax.config import config # pylint:disable=import-outside-toplevel
config.update("jax_enable_x64", True)
from jax import jit # pylint:disable=import-outside-toplevel
from jax.experimental import optimizers # pylint:disable=import-outside-toplevel
if optimizer_kwargs is None:
optimizer_kwargs = {}
optimizer = optimizer.lower()
if optimizer == "adam":
opt_init, opt_update, get_params = optimizers.adam(
learning_rate, **optimizer_kwargs
)
elif optimizer == "adagrad":
opt_init, opt_update, get_params = optimizers.adagrad(
learning_rate, **optimizer_kwargs
)
elif optimizer == "adamax":
opt_init, opt_update, get_params = optimizers.adamax(
learning_rate, **optimizer_kwargs
)
elif optimizer == "rmsprop":
opt_init, opt_update, get_params = optimizers.rmsprop(
learning_rate, **optimizer_kwargs
)
else:
opt_init, opt_update, get_params = optimizers.sgd(
learning_rate, **optimizer_kwargs
)
opt_update = jit(opt_update)
return JaxOptimizer(opt_init, opt_update, get_params)
| 35.535714 | 88 | 0.691625 |
from dataclasses import dataclass
from typing import Callable, Sequence, Union
@dataclass
class NTModel:
init_fn: Callable
apply_fn: Callable
kernel_fn: Callable
predict_fn: Union[Callable, None] = None
scaler: Union[Callable, None] = None
params: Union[list, None] = None
@dataclass
class JaxOptimizer:
opt_init: Callable
opt_update: Callable
get_params: Callable
__all__ = ["NTModel", "build_dense_network", "JaxOptimizer"]
def build_dense_network(
hidden_layers: Sequence[int],
activations: Union[Sequence, str] = "erf",
w_std: float = 2.5,
b_std=1,
) -> NTModel:
from jax.config import config
config.update("jax_enable_x64", True)
from jax import jit
from neural_tangents import stax
assert len(hidden_layers) >= 1, "You must provide at least one hidden layer"
if activations is None:
activations = [stax.Relu() for _ in hidden_layers]
elif isinstance(activations, str):
if activations.lower() == "relu":
activations = [stax.Relu() for _ in hidden_layers]
elif activations.lower() == "erf":
activations = [stax.Erf() for _ in hidden_layers]
else:
for activation in activations:
assert callable(
activation
), "You need to provide `neural_tangents.stax` activations"
assert len(activations) == len(
hidden_layers
), "The number of hidden layers should match the number of nonlinearities"
stack = []
for hidden_layer, activation in zip(hidden_layers, activations):
stack.append(stax.Dense(hidden_layer, W_std=w_std, b_std=b_std))
stack.append(activation)
stack.append(stax.Dense(1, W_std=w_std, b_std=b_std))
init_fn, apply_fn, kernel_fn = stax.serial(*stack)
return NTModel(init_fn, jit(apply_fn), jit(kernel_fn, static_argnums=(2,)), None)
def get_optimizer(
learning_rate: float = 1e-4, optimizer="sdg", optimizer_kwargs: dict = None
) -> JaxOptimizer:
from jax.config import config
config.update("jax_enable_x64", True)
from jax import jit
from jax.experimental import optimizers
if optimizer_kwargs is None:
optimizer_kwargs = {}
optimizer = optimizer.lower()
if optimizer == "adam":
opt_init, opt_update, get_params = optimizers.adam(
learning_rate, **optimizer_kwargs
)
elif optimizer == "adagrad":
opt_init, opt_update, get_params = optimizers.adagrad(
learning_rate, **optimizer_kwargs
)
elif optimizer == "adamax":
opt_init, opt_update, get_params = optimizers.adamax(
learning_rate, **optimizer_kwargs
)
elif optimizer == "rmsprop":
opt_init, opt_update, get_params = optimizers.rmsprop(
learning_rate, **optimizer_kwargs
)
else:
opt_init, opt_update, get_params = optimizers.sgd(
learning_rate, **optimizer_kwargs
)
opt_update = jit(opt_update)
return JaxOptimizer(opt_init, opt_update, get_params)
| true | true |
1c36a49acd778e78ca83a11411df2ce5a0a7fbb2 | 137 | py | Python | dev/local/data/all.py | nareshr8/fastai_docs | c34bf4e0fe296cb4ff8410dea3895c0ad2f6fe93 | [
"Apache-2.0"
] | null | null | null | dev/local/data/all.py | nareshr8/fastai_docs | c34bf4e0fe296cb4ff8410dea3895c0ad2f6fe93 | [
"Apache-2.0"
] | null | null | null | dev/local/data/all.py | nareshr8/fastai_docs | c34bf4e0fe296cb4ff8410dea3895c0ad2f6fe93 | [
"Apache-2.0"
] | null | null | null | from .block import *
from .core import *
from .external import *
from .pipeline import *
from .source import *
from .transform import *
| 17.125 | 24 | 0.729927 | from .block import *
from .core import *
from .external import *
from .pipeline import *
from .source import *
from .transform import *
| true | true |
1c36a514cf27aa40d3e040e745c93484f44d8225 | 1,520 | py | Python | Examples/sin_approximation.py | martinferianc/WineClassification-EIE3 | f84e3a57dff336f03aa8358102977f0e936be06c | [
"MIT"
] | 5 | 2018-07-12T11:58:35.000Z | 2019-08-20T11:18:00.000Z | Examples/sin_approximation.py | fexter-svk/WineClassification-EIE3 | f84e3a57dff336f03aa8358102977f0e936be06c | [
"MIT"
] | null | null | null | Examples/sin_approximation.py | fexter-svk/WineClassification-EIE3 | f84e3a57dff336f03aa8358102977f0e936be06c | [
"MIT"
] | null | null | null | import numpy as np
import random
import matplotlib.pyplot as plt
def gradient_descent(X,Y,lr, max_iter=1000, sc=0.001):
W = np.ones(X.shape[1])
i = 0
previous_loss = 0
current_loss = -1
while i<max_iter and abs(previous_loss-current_loss)>0.001:
h = np.dot(X,W)
loss = h-Y
current_loss = np.sum(loss**2)
print("Iteration %d | Cost: %f" % (i, current_loss))
gradient = (2/len(X))*np.dot(X.transpose(), loss)
previous_loss = current_loss
W-=lr*gradient
h = np.dot(X,W)
current_loss = np.sum((h - Y)**2)
i+=1
return W
def linear_regression(X,Y,reg=None):
if reg is None:
reg = 0
W = np.matmul(np.linalg.inv(np.matmul(X.transpose(),X)+reg*np.identity(X.shape[1])),np.matmul(X.transpose(),Y))
return W
def generate_data(N, power, var=0.1):
X = np.zeros(shape=(N,power+1))
Y = np.zeros(N)
for i in range(N):
x = random.uniform(0,1)
y = np.sin(x*2*np.pi) + np.random.normal(0,var)
X[i] = np.array([x**j for j in range(power+1)])
Y[i]=y
return X,Y
train = 100
X,Y = generate_data(1000, 120, 0.01)
#W = gradient_descent(X,Y,max_iter = 5000,lr=0.09,sc = 0.0001)
W = linear_regression(X[:train],Y[:train],0.001)
print(W)
x_test = [X[i] for i in range(len(X[train:]))]
y_test = []
for x in x_test:
y_test.append(np.dot(W,x))
x_test = [x[1] for x in x_test]
plt.figure()
X = [x[1] for x in X]
plt.scatter(X,Y)
plt.scatter(x_test,y_test)
plt.show()
| 25.762712 | 115 | 0.594737 | import numpy as np
import random
import matplotlib.pyplot as plt
def gradient_descent(X,Y,lr, max_iter=1000, sc=0.001):
W = np.ones(X.shape[1])
i = 0
previous_loss = 0
current_loss = -1
while i<max_iter and abs(previous_loss-current_loss)>0.001:
h = np.dot(X,W)
loss = h-Y
current_loss = np.sum(loss**2)
print("Iteration %d | Cost: %f" % (i, current_loss))
gradient = (2/len(X))*np.dot(X.transpose(), loss)
previous_loss = current_loss
W-=lr*gradient
h = np.dot(X,W)
current_loss = np.sum((h - Y)**2)
i+=1
return W
def linear_regression(X,Y,reg=None):
if reg is None:
reg = 0
W = np.matmul(np.linalg.inv(np.matmul(X.transpose(),X)+reg*np.identity(X.shape[1])),np.matmul(X.transpose(),Y))
return W
def generate_data(N, power, var=0.1):
X = np.zeros(shape=(N,power+1))
Y = np.zeros(N)
for i in range(N):
x = random.uniform(0,1)
y = np.sin(x*2*np.pi) + np.random.normal(0,var)
X[i] = np.array([x**j for j in range(power+1)])
Y[i]=y
return X,Y
train = 100
X,Y = generate_data(1000, 120, 0.01)
W = linear_regression(X[:train],Y[:train],0.001)
print(W)
x_test = [X[i] for i in range(len(X[train:]))]
y_test = []
for x in x_test:
y_test.append(np.dot(W,x))
x_test = [x[1] for x in x_test]
plt.figure()
X = [x[1] for x in X]
plt.scatter(X,Y)
plt.scatter(x_test,y_test)
plt.show()
| true | true |
1c36a53e68b3b6ebb0e99c1bfcedccd2007599d3 | 995 | py | Python | tests/test_utils.py | pasztorpisti/django-universal-view-decorator | 95824b5d1ecd405b00d7845fde6e707a40e10752 | [
"MIT"
] | 1 | 2016-10-16T00:04:04.000Z | 2016-10-16T00:04:04.000Z | tests/test_utils.py | pasztorpisti/django-universal-view-decorator | 95824b5d1ecd405b00d7845fde6e707a40e10752 | [
"MIT"
] | 3 | 2019-10-02T15:10:15.000Z | 2020-06-05T16:40:01.000Z | tests/test_utils.py | pasztorpisti/django-universal-view-decorator | 95824b5d1ecd405b00d7845fde6e707a40e10752 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django_universal_view_decorator.utils import class_property
class TestClassProperty(TestCase):
class MyClass(object):
@class_property
def my_class_property(cls):
return cls, 'class_property'
def test_with_class(self):
self.assertEqual(self.MyClass.my_class_property, (self.MyClass, 'class_property'))
def test_with_instance(self):
self.assertEqual(self.MyClass().my_class_property, (self.MyClass, 'class_property'))
def test_using_getter(self):
class MyClass(object):
my_class_property = class_property()
@my_class_property.getter
def could_be_my_class_property_but_it_isnt_because_of_testing(cls):
return cls, 'class_property'
self.assertEqual(MyClass.my_class_property, (MyClass, 'class_property'))
self.assertEqual(MyClass.could_be_my_class_property_but_it_isnt_because_of_testing, (MyClass, 'class_property'))
| 36.851852 | 120 | 0.724623 | from django.test import TestCase
from django_universal_view_decorator.utils import class_property
class TestClassProperty(TestCase):
class MyClass(object):
@class_property
def my_class_property(cls):
return cls, 'class_property'
def test_with_class(self):
self.assertEqual(self.MyClass.my_class_property, (self.MyClass, 'class_property'))
def test_with_instance(self):
self.assertEqual(self.MyClass().my_class_property, (self.MyClass, 'class_property'))
def test_using_getter(self):
class MyClass(object):
my_class_property = class_property()
@my_class_property.getter
def could_be_my_class_property_but_it_isnt_because_of_testing(cls):
return cls, 'class_property'
self.assertEqual(MyClass.my_class_property, (MyClass, 'class_property'))
self.assertEqual(MyClass.could_be_my_class_property_but_it_isnt_because_of_testing, (MyClass, 'class_property'))
| true | true |
1c36a798314b7c91602a2684e93d6013332e384a | 30,398 | py | Python | galaxy/main/models.py | tima/galaxy | b371b973e0e9150f3e8b9b08068828b092982f62 | [
"Apache-2.0"
] | null | null | null | galaxy/main/models.py | tima/galaxy | b371b973e0e9150f3e8b9b08068828b092982f62 | [
"Apache-2.0"
] | null | null | null | galaxy/main/models.py | tima/galaxy | b371b973e0e9150f3e8b9b08068828b092982f62 | [
"Apache-2.0"
] | null | null | null | # (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import logging
import operator
import six
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.forms.models import model_to_dict
from django.contrib.postgres import fields as psql_fields
from django.contrib.postgres import search as psql_search
from django.contrib.postgres import indexes as psql_indexes
from django.utils import timezone
from galaxy import constants
from galaxy.main import fields
from galaxy.main.mixins import DirtyMixin
logger = logging.getLogger(__name__)
__all__ = [
'PrimordialModel', 'Platform', 'CloudPlatform', 'Category', 'Tag',
'Content', 'ImportTask', 'ImportTaskMessage', 'RepositoryVersion',
'UserAlias', 'NotificationSecret', 'Notification', 'Repository',
'Subscription', 'Stargazer', 'Namespace', 'Provider', 'ProviderNamespace',
'ContentBlock', 'ContentType'
]
# TODO(cutwater): Split models.py into multiple modules
# (e.g. models/base.py, models/content.py, etc.)
@six.python_2_unicode_compatible
class BaseModel(models.Model, DirtyMixin):
"""Common model for objects not needing name, description,
active attributes."""
class Meta:
abstract = True
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
if hasattr(self, 'name'):
return '{}-{}'.format(self.name, self.id)
else:
return '{}-{}'.format(self._meta.verbose_name, self.id)
class PrimordialModel(BaseModel):
"""Base model for CommonModel and CommonModelNameNotUnique."""
class Meta:
abstract = True
description = fields.TruncatingCharField(
max_length=255, blank=True, default='')
active = models.BooleanField(default=True, db_index=True)
class CommonModel(PrimordialModel):
"""A base model where the name is unique."""
class Meta:
abstract = True
name = models.CharField(max_length=512, unique=True, db_index=True)
class CommonModelNameNotUnique(PrimordialModel):
"""A base model where the name is not unique."""
class Meta:
abstract = True
name = models.CharField(max_length=512, unique=False, db_index=True)
# Actual models
# -----------------------------------------------------------------------------
@six.python_2_unicode_compatible
class Category(CommonModel):
"""
A class represnting the valid categories (formerly tags)
that can be assigned to a role.
"""
class Meta:
ordering = ['name']
verbose_name_plural = "Categories"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:category_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class Tag(CommonModel):
"""A class representing the tags that have been assigned to roles."""
class Meta:
ordering = ['name']
verbose_name_plural = 'Tags'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:tag_detail', args=(self.pk,))
def get_num_roles(self):
return self.roles.filter(active=True, is_valid=True).count()
@six.python_2_unicode_compatible
class Platform(CommonModelNameNotUnique):
"""A class representing the valid platforms a role supports."""
class Meta:
ordering = ['name', 'release']
release = models.CharField(
max_length=50,
verbose_name="Distribution Release Version",
)
alias = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Search terms"
)
def __str__(self):
return "{}-{}".format(self.name, self.release)
def get_absolute_url(self):
return reverse('api:platform_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class CloudPlatform(CommonModel):
"""A model representing the valid cloud platforms for role."""
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:cloud_platform_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class UserAlias(models.Model):
"""
A class representing a mapping between users and aliases to allow
for user renaming without breaking deps.
"""
class Meta:
verbose_name_plural = "UserAliases"
alias_of = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='aliases',
)
alias_name = models.CharField(
# must be in-sync with galaxy/accounts/models.py:CustomUser
max_length=30,
unique=True,
)
def __str__(self):
return '{} (alias of {})'.format(
self.alias_name, self.alias_of.username)
class Video(PrimordialModel):
class Meta:
verbose_name = "videos"
url = models.CharField(
max_length=256,
blank=False,
null=False
)
url.help_text = ""
role = models.ForeignKey(
'Content',
related_name='videos',
on_delete=models.CASCADE,
null=True
)
role.help_text = ""
@six.python_2_unicode_compatible
class ContentType(BaseModel):
"""A model that represents content type (e.g. role, module, etc.)."""
name = models.CharField(max_length=512, unique=True, db_index=True,
choices=constants.ContentType.choices())
description = fields.TruncatingCharField(
max_length=255, blank=True, default='')
@classmethod
def get(cls, content_type):
if isinstance(content_type, constants.ContentType):
content_type = content_type.value
return cls.objects.get(name=content_type)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:content_type_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class Content(CommonModelNameNotUnique):
"""A class representing a user role."""
class Meta:
unique_together = [
('namespace', 'repository', 'name', 'content_type')
]
ordering = ['namespace', 'repository', 'name', 'content_type']
indexes = [
psql_indexes.GinIndex(fields=['search_vector'])
]
# Foreign keys
# -------------------------------------------------------------------------
dependencies = models.ManyToManyField(
'Content',
related_name='+',
blank=True,
editable=False,
)
platforms = models.ManyToManyField(
'Platform',
related_name='roles',
verbose_name="Supported Platforms",
blank=True,
editable=False,
)
platforms.help_text = ""
cloud_platforms = models.ManyToManyField(
'CloudPlatform',
related_name='roles',
verbose_name="Cloud Platforms",
blank=True,
editable=False,
)
tags = models.ManyToManyField(
'Tag',
related_name='roles',
verbose_name='Tags',
blank=True,
editable=False,
)
tags.help_text = ""
categories = models.ManyToManyField(
'Category',
related_name='categories',
verbose_name="Categories",
blank=True,
editable=False,
)
categories.help_text = ""
repository = models.ForeignKey(
'Repository',
related_name='content_objects',
editable=False,
)
content_type = models.ForeignKey(
'ContentType',
related_name='content_objects',
editable=False,
on_delete=models.PROTECT,
)
namespace = models.ForeignKey(
'Namespace',
related_name='content_objects',
)
readme = models.ForeignKey(
'Readme',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
# Regular fields
# -------------------------------------------------------------------------
# TODO(cutwater): Constants left for compatibility reasons. Should be
# removed in future.
ANSIBLE = constants.RoleType.ANSIBLE.value
CONTAINER = constants.RoleType.CONTAINER.value
CONTAINER_APP = constants.RoleType.CONTAINER_APP.value
DEMO = constants.RoleType.DEMO.value
role_type = models.CharField(
max_length=3,
choices=constants.RoleType.choices(),
null=True,
blank=False,
default=None,
editable=False,
)
original_name = models.CharField(
max_length=256,
null=False
)
metadata = psql_fields.JSONField(
null=False,
default={}
)
github_default_branch = models.CharField(
max_length=256,
default='master',
verbose_name="Default Branch"
)
container_yml = models.TextField(
blank=True,
null=True,
verbose_name='container.yml'
)
min_ansible_version = models.CharField(
max_length=10,
blank=True,
null=True,
verbose_name="Min Ansible Version",
)
min_ansible_container_version = models.CharField(
max_length=10,
blank=True,
null=True,
verbose_name="Min Ansible Container Version",
)
license = models.CharField(
max_length=50,
blank=True,
verbose_name="License (optional)",
)
company = models.CharField(
max_length=50,
blank=True,
null=True,
verbose_name="Company Name (optional)",
)
is_valid = models.BooleanField(
default=False,
editable=False,
)
featured = models.BooleanField(
default=False,
editable=False,
)
imported = models.DateTimeField(
null=True,
verbose_name="Last Import"
)
search_vector = psql_search.SearchVectorField()
# Other functions and properties
# -------------------------------------------------------------------------
def __str__(self):
return "{}.{}".format(self.namespace.name, self.name)
@property
def github_user(self):
return self.repository.github_user
@property
def github_repo(self):
return self.repository.github_repo
@property
def travis_status_url(self):
return self.repository.travis_status_url
@property
def travis_build_url(self):
return self.repository.travis_build_url
@property
def download_count(self):
return self.repository.download_count
def get_absolute_url(self):
return reverse('api:content_detail', args=(self.pk,))
def get_last_import(self):
try:
return model_to_dict(self.repository.import_tasks.latest(),
fields=('id', 'state'))
except Exception:
return dict()
def get_unique_platforms(self):
return [platform.name for platform in
self.platforms.filter(active=True)
.order_by('name').distinct('name')]
def get_cloud_platforms(self):
return [cp.name for cp in self.cloud_platforms.filter(active=True)]
def get_unique_platform_versions(self):
return [platform.release for platform in
self.platforms.filter(active=True)
.order_by('release').distinct('release')]
def get_unique_platform_search_terms(self):
# Fetch the unique set of aliases
terms = []
for platform in (
self.platforms.filter(active=True)
.exclude(alias__isnull=True).exclude(alias__exact='').all()):
terms += platform.alias.split(' ')
return set(terms)
def get_username(self):
return self.namespace
# TODO(cutwater): Active field is not used for tags anymore.
# get_tags() function should be replaced with tags property usage and
# removed as well as an `active` field from Tag model.
def get_tags(self):
return [tag.name for tag in self.tags.filter(active=True)]
# FIXME(cutwater): Refactor me
def clean(self):
if self.company and len(self.company) > 50:
# add_message(import_task, u"WARNING",
# u"galaxy_info.company exceeds max length of 50 in meta data")
self.company = self.company[:50]
if not self.description:
# add_message(import_task, u"ERROR",
# u"missing description. Add a description to GitHub
# repo or meta data.")
pass
elif len(self.description) > 255:
# add_message(import_task, u"WARNING",
# u"galaxy_info.description exceeds max length
# of 255 in meta data")
self.description = self.description[:255]
if not self.license:
# add_message(import_task, u"ERROR",
# u"galaxy_info.license missing value in meta data")
pass
elif len(self.license) > 50:
# add_message(import_task, u"WARNING",
# u"galaxy_info.license exceeds max length of 50 in meta data")
self.license = self.license[:50]
if (not self.min_ansible_version
and self.role_type in (constants.RoleType.CONTAINER,
constants.RoleType.ANSIBLE)):
self.min_ansible_version = u'2.4'
if (not self.min_ansible_container_version
and self.role_type == constants.RoleType.CONTAINER_APP):
self.min_ansible_container_version = u'0.9.0'
class Namespace(CommonModel):
"""
Represents the aggregation of multiple namespaces across providers.
"""
class Meta:
ordering = ('name',)
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='namespaces',
editable=True,
)
avatar_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Avatar URL"
)
location = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Location"
)
company = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Company Name"
)
email = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Email Address"
)
html_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Web Site URL"
)
is_vendor = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse('api:namespace_detail', args=(self.pk,))
@property
def content_counts(self):
return Content.objects \
.filter(namespace=self.pk) \
.values('content_type__name') \
.annotate(count=models.Count('content_type__name')) \
.order_by('content_type__name')
class Provider(CommonModel):
"""
Valid SCM providers (e.g., GitHub, GitLab, etc.)
"""
download_url = models.CharField(max_length=256, null=True)
class Meta:
ordering = ('name',)
def get_absolute_url(self):
return reverse('api:active_provider_detail', args=(self.pk,))
class ProviderNamespace(PrimordialModel):
"""
A one-to-one mapping to namespaces within each provider.
"""
class Meta:
ordering = ('provider', 'name')
unique_together = [
('provider', 'name'),
('namespace', 'provider', 'name'),
]
name = models.CharField(
max_length=256,
verbose_name="Name",
editable=True,
null=False
)
namespace = models.ForeignKey(
'Namespace',
related_name='provider_namespaces',
editable=False,
null=True,
on_delete=models.CASCADE,
verbose_name='Namespace'
)
provider = models.ForeignKey(
'Provider',
related_name='provider_namespaces',
editable=True,
null=True,
on_delete=models.CASCADE,
verbose_name='Provider'
)
display_name = models.CharField(
max_length=256,
blank=True,
null=True,
editable=False,
verbose_name="Display Name"
)
avatar_url = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Avatar URL"
)
location = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Location"
)
company = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Company Name"
)
email = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Email Address"
)
html_url = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Web Site URL"
)
followers = models.IntegerField(
null=True,
editable=True,
verbose_name="Followers"
)
def get_absolute_url(self):
return reverse('api:provider_namespace_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class RepositoryVersion(BaseModel):
class Meta:
unique_together = ('repository', 'version')
repository = models.ForeignKey('Repository', related_name='versions')
version = fields.VersionField(null=True)
tag = models.CharField(max_length=64)
commit_sha = models.CharField(max_length=40, null=True)
commit_date = models.DateTimeField(null=True)
def __str__(self):
return "{}.{}-{}".format(
self.content.namespace, self.content.name, self.version)
@six.python_2_unicode_compatible
class ImportTaskMessage(PrimordialModel):
TYPE_INFO = constants.ImportTaskMessageType.INFO.value
TYPE_WARNING = constants.ImportTaskMessageType.WARNING.value
TYPE_SUCCESS = constants.ImportTaskMessageType.SUCCESS.value
# FIXME(cutwater): ERROR and FAILED types seem to be redundant
TYPE_FAILED = constants.ImportTaskMessageType.FAILED.value
TYPE_ERROR = constants.ImportTaskMessageType.ERROR.value
task = models.ForeignKey(
'ImportTask',
related_name='messages',
)
message_type = models.CharField(
max_length=10,
choices=constants.ImportTaskMessageType.choices(),
)
message_text = models.CharField(
max_length=256,
)
def __str__(self):
return "{}-{}-{}".format(
self.task.id, self.message_type, self.message_text)
@six.python_2_unicode_compatible
class ImportTask(PrimordialModel):
class Meta:
ordering = ('-id',)
get_latest_by = 'created'
# TODO(cutwater): Constants left for backward compatibility, to be removed
STATE_PENDING = constants.ImportTaskState.PENDING.value
STATE_RUNNING = constants.ImportTaskState.RUNNING.value
STATE_FAILED = constants.ImportTaskState.FAILED.value
STATE_SUCCESS = constants.ImportTaskState.SUCCESS.value
repository = models.ForeignKey(
'Repository',
related_name='import_tasks',
)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='import_tasks',
db_index=True,
)
import_branch = models.CharField(
max_length=256,
null=True,
blank=False,
)
celery_task_id = models.CharField(
max_length=100,
blank=True,
null=True,
)
state = models.CharField(
max_length=20,
default=STATE_PENDING,
choices=constants.ImportTaskState.choices()
)
started = models.DateTimeField(
auto_now_add=False,
null=True,
blank=True,
)
finished = models.DateTimeField(
auto_now_add=False,
null=True,
blank=True,
)
# GitHub repo attributes at time of import
commit = models.CharField(
max_length=256,
blank=True
)
commit_message = models.CharField(
max_length=256,
blank=True
)
commit_url = models.CharField(
max_length=256,
blank=True
)
travis_status_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build Status"
)
travis_build_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build URL"
)
def __str__(self):
return '{}-{}'.format(self.id, self.state)
def start(self):
self.state = ImportTask.STATE_RUNNING
self.started = timezone.now()
self.save()
def finish_success(self, message=None):
self.state = ImportTask.STATE_SUCCESS
self.finished = timezone.now()
if message:
self.messages.create(message_type=ImportTaskMessage.TYPE_SUCCESS,
message_text=message)
self.save()
def finish_failed(self, reason=None):
self.state = ImportTask.STATE_FAILED
self.finished = timezone.now()
if reason:
# FIXME(cutwater): Remove truncating reason to 256 chars.
# Use TruncatingCharField or TextField for message field
self.messages.create(message_type=ImportTaskMessage.TYPE_FAILED,
message_text=str(reason)[:256])
self.save()
@six.python_2_unicode_compatible
class NotificationSecret(PrimordialModel):
class Meta:
ordering = ('source', 'github_user', 'github_repo')
unique_together = ('source', 'github_user', 'github_repo')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='notification_secrets',
db_index=True,
)
source = models.CharField(
max_length=20,
verbose_name="Source"
)
github_user = models.CharField(
max_length=256,
verbose_name="Github Username",
)
github_repo = models.CharField(
max_length=256,
verbose_name="Github Repository",
)
secret = models.CharField(
max_length=256,
verbose_name="Secret",
db_index=True
)
def __str__(self):
return "{}-{}".format(self.owner.username, self.source)
def repo_full_name(self):
return "{}/{}".format(self.github_user, self.github_repo)
class Notification(PrimordialModel):
class Meta:
ordering = ('-id',)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='notifications',
db_index=True,
editable=False
)
source = models.CharField(
max_length=20,
verbose_name="Source",
editable=False
)
github_branch = models.CharField(
max_length=256,
verbose_name="GitHub Branch",
blank=True,
editable=False
)
travis_build_url = models.CharField(
max_length=256,
blank=True
)
travis_status = models.CharField(
max_length=256,
blank=True
)
commit = models.CharField(
max_length=256,
blank=True
)
committed_at = models.DateTimeField(
auto_now=False,
null=True
)
commit_message = models.CharField(
max_length=256,
blank=True
)
repository = models.ForeignKey(
'Repository',
related_name='notifications',
editable=False,
)
import_task = models.ForeignKey(
ImportTask,
related_name='notifications',
verbose_name='Tasks',
editable=False
)
class Repository(BaseModel):
class Meta:
unique_together = [
('provider_namespace', 'name'),
('provider_namespace', 'original_name'),
]
ordering = ('provider_namespace', 'name')
# Foreign keys
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='repositories'
)
provider_namespace = models.ForeignKey(
ProviderNamespace,
related_name='repositories',
)
readme = models.ForeignKey(
'Readme',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
# Fields
name = models.CharField(max_length=256)
original_name = models.CharField(max_length=256, null=False)
format = models.CharField(max_length=16, null=True,
choices=constants.RepositoryFormat.choices())
description = fields.TruncatingCharField(
max_length=255, blank=True, default='')
import_branch = models.CharField(max_length=256, null=True)
is_enabled = models.BooleanField(default=False)
# Repository attributes
commit = models.CharField(max_length=256, blank=True, default='')
commit_message = fields.TruncatingCharField(
max_length=256, blank=True, default='')
commit_url = models.CharField(max_length=256, blank=True, default='')
commit_created = models.DateTimeField(
null=True, verbose_name="Last Commit DateTime")
stargazers_count = models.IntegerField(default=0)
watchers_count = models.IntegerField(default=0)
forks_count = models.IntegerField(default=0)
open_issues_count = models.IntegerField(default=0)
travis_status_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build Status"
)
travis_build_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build URL"
)
issue_tracker_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Issue Tracker URL",
)
download_count = models.IntegerField(
default=0
)
@property
def clone_url(self):
return "https://github.com/{user}/{repo}.git".format(
user=self.provider_namespace.name,
repo=self.original_name
)
@property
def github_user(self):
return self.provider_namespace.name
@property
def github_repo(self):
return self.original_name
@property
def content_counts(self):
return Content.objects \
.filter(repository=self.pk) \
.values('content_type__name') \
.annotate(count=models.Count('content_type__name')) \
.order_by('content_type__name')
def get_absolute_url(self):
return reverse('api:repository_detail', args=(self.pk,))
def get_download_url(self, ref=None):
download_url = self.provider_namespace.provider.download_url
if ref is None:
last_version = self.last_version()
if last_version:
ref = last_version.tag
else:
ref = self.import_branch
return download_url.format(
username=self.provider_namespace.name,
repository=self.original_name,
ref=ref,
)
def all_versions(self):
return sorted(self.versions.filter(version__isnull=False).all(),
key=operator.attrgetter('version'),
reverse=True)
def last_version(self):
versions = self.all_versions()
if versions:
return versions[0]
return None
class Subscription(PrimordialModel):
class Meta:
unique_together = ('owner', 'github_user', 'github_repo')
ordering = ('owner', 'github_user', 'github_repo')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='subscriptions',
)
# TODO(cutwater): Replace with reference to a Repository model
github_user = models.CharField(
max_length=256,
verbose_name="Github Username",
)
github_repo = models.CharField(
max_length=256,
verbose_name="Github Repository",
)
class Stargazer(BaseModel):
class Meta:
unique_together = ('owner', 'repository')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='starred',
)
repository = models.ForeignKey(
Repository,
related_name='stars'
)
class RefreshRoleCount(PrimordialModel):
state = models.CharField(
max_length=20
)
passed = models.IntegerField(
default=0,
null=True
)
failed = models.IntegerField(
default=0,
null=True
)
deleted = models.IntegerField(
default=0,
null=True
)
updated = models.IntegerField(
default=0,
null=True
)
@six.python_2_unicode_compatible
class ContentBlock(BaseModel):
name = models.SlugField(unique=True)
content = models.TextField('content', blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:content_block_detail', args=(self.name,))
class Readme(BaseModel):
class Meta:
unique_together = ('repository', 'raw_hash')
repository = models.ForeignKey(
Repository,
null=False,
on_delete=models.CASCADE,
related_name='+',
)
raw = models.TextField(null=False, blank=False)
raw_hash = models.CharField(
max_length=128, null=False, blank=False)
mimetype = models.CharField(max_length=32, blank=False)
html = models.TextField(null=False, blank=False)
def safe_delete(self):
ref_count = (
Repository.objects.filter(readme=self).count()
+ Content.objects.filter(readme=self).count()
)
if ref_count:
return False
self.delete()
return True
| 27.509502 | 79 | 0.620797 |
import logging
import operator
import six
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.forms.models import model_to_dict
from django.contrib.postgres import fields as psql_fields
from django.contrib.postgres import search as psql_search
from django.contrib.postgres import indexes as psql_indexes
from django.utils import timezone
from galaxy import constants
from galaxy.main import fields
from galaxy.main.mixins import DirtyMixin
logger = logging.getLogger(__name__)
__all__ = [
'PrimordialModel', 'Platform', 'CloudPlatform', 'Category', 'Tag',
'Content', 'ImportTask', 'ImportTaskMessage', 'RepositoryVersion',
'UserAlias', 'NotificationSecret', 'Notification', 'Repository',
'Subscription', 'Stargazer', 'Namespace', 'Provider', 'ProviderNamespace',
'ContentBlock', 'ContentType'
]
@six.python_2_unicode_compatible
class BaseModel(models.Model, DirtyMixin):
class Meta:
abstract = True
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
if hasattr(self, 'name'):
return '{}-{}'.format(self.name, self.id)
else:
return '{}-{}'.format(self._meta.verbose_name, self.id)
class PrimordialModel(BaseModel):
class Meta:
abstract = True
description = fields.TruncatingCharField(
max_length=255, blank=True, default='')
active = models.BooleanField(default=True, db_index=True)
class CommonModel(PrimordialModel):
class Meta:
abstract = True
name = models.CharField(max_length=512, unique=True, db_index=True)
class CommonModelNameNotUnique(PrimordialModel):
class Meta:
abstract = True
name = models.CharField(max_length=512, unique=False, db_index=True)
@six.python_2_unicode_compatible
class Category(CommonModel):
class Meta:
ordering = ['name']
verbose_name_plural = "Categories"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:category_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class Tag(CommonModel):
class Meta:
ordering = ['name']
verbose_name_plural = 'Tags'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:tag_detail', args=(self.pk,))
def get_num_roles(self):
return self.roles.filter(active=True, is_valid=True).count()
@six.python_2_unicode_compatible
class Platform(CommonModelNameNotUnique):
class Meta:
ordering = ['name', 'release']
release = models.CharField(
max_length=50,
verbose_name="Distribution Release Version",
)
alias = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Search terms"
)
def __str__(self):
return "{}-{}".format(self.name, self.release)
def get_absolute_url(self):
return reverse('api:platform_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class CloudPlatform(CommonModel):
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:cloud_platform_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class UserAlias(models.Model):
class Meta:
verbose_name_plural = "UserAliases"
alias_of = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='aliases',
)
alias_name = models.CharField(
max_length=30,
unique=True,
)
def __str__(self):
return '{} (alias of {})'.format(
self.alias_name, self.alias_of.username)
class Video(PrimordialModel):
class Meta:
verbose_name = "videos"
url = models.CharField(
max_length=256,
blank=False,
null=False
)
url.help_text = ""
role = models.ForeignKey(
'Content',
related_name='videos',
on_delete=models.CASCADE,
null=True
)
role.help_text = ""
@six.python_2_unicode_compatible
class ContentType(BaseModel):
name = models.CharField(max_length=512, unique=True, db_index=True,
choices=constants.ContentType.choices())
description = fields.TruncatingCharField(
max_length=255, blank=True, default='')
@classmethod
def get(cls, content_type):
if isinstance(content_type, constants.ContentType):
content_type = content_type.value
return cls.objects.get(name=content_type)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:content_type_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class Content(CommonModelNameNotUnique):
class Meta:
unique_together = [
('namespace', 'repository', 'name', 'content_type')
]
ordering = ['namespace', 'repository', 'name', 'content_type']
indexes = [
psql_indexes.GinIndex(fields=['search_vector'])
]
dependencies = models.ManyToManyField(
'Content',
related_name='+',
blank=True,
editable=False,
)
platforms = models.ManyToManyField(
'Platform',
related_name='roles',
verbose_name="Supported Platforms",
blank=True,
editable=False,
)
platforms.help_text = ""
cloud_platforms = models.ManyToManyField(
'CloudPlatform',
related_name='roles',
verbose_name="Cloud Platforms",
blank=True,
editable=False,
)
tags = models.ManyToManyField(
'Tag',
related_name='roles',
verbose_name='Tags',
blank=True,
editable=False,
)
tags.help_text = ""
categories = models.ManyToManyField(
'Category',
related_name='categories',
verbose_name="Categories",
blank=True,
editable=False,
)
categories.help_text = ""
repository = models.ForeignKey(
'Repository',
related_name='content_objects',
editable=False,
)
content_type = models.ForeignKey(
'ContentType',
related_name='content_objects',
editable=False,
on_delete=models.PROTECT,
)
namespace = models.ForeignKey(
'Namespace',
related_name='content_objects',
)
readme = models.ForeignKey(
'Readme',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
ANSIBLE = constants.RoleType.ANSIBLE.value
CONTAINER = constants.RoleType.CONTAINER.value
CONTAINER_APP = constants.RoleType.CONTAINER_APP.value
DEMO = constants.RoleType.DEMO.value
role_type = models.CharField(
max_length=3,
choices=constants.RoleType.choices(),
null=True,
blank=False,
default=None,
editable=False,
)
original_name = models.CharField(
max_length=256,
null=False
)
metadata = psql_fields.JSONField(
null=False,
default={}
)
github_default_branch = models.CharField(
max_length=256,
default='master',
verbose_name="Default Branch"
)
container_yml = models.TextField(
blank=True,
null=True,
verbose_name='container.yml'
)
min_ansible_version = models.CharField(
max_length=10,
blank=True,
null=True,
verbose_name="Min Ansible Version",
)
min_ansible_container_version = models.CharField(
max_length=10,
blank=True,
null=True,
verbose_name="Min Ansible Container Version",
)
license = models.CharField(
max_length=50,
blank=True,
verbose_name="License (optional)",
)
company = models.CharField(
max_length=50,
blank=True,
null=True,
verbose_name="Company Name (optional)",
)
is_valid = models.BooleanField(
default=False,
editable=False,
)
featured = models.BooleanField(
default=False,
editable=False,
)
imported = models.DateTimeField(
null=True,
verbose_name="Last Import"
)
search_vector = psql_search.SearchVectorField()
def __str__(self):
return "{}.{}".format(self.namespace.name, self.name)
@property
def github_user(self):
return self.repository.github_user
@property
def github_repo(self):
return self.repository.github_repo
@property
def travis_status_url(self):
return self.repository.travis_status_url
@property
def travis_build_url(self):
return self.repository.travis_build_url
@property
def download_count(self):
return self.repository.download_count
def get_absolute_url(self):
return reverse('api:content_detail', args=(self.pk,))
def get_last_import(self):
try:
return model_to_dict(self.repository.import_tasks.latest(),
fields=('id', 'state'))
except Exception:
return dict()
def get_unique_platforms(self):
return [platform.name for platform in
self.platforms.filter(active=True)
.order_by('name').distinct('name')]
def get_cloud_platforms(self):
return [cp.name for cp in self.cloud_platforms.filter(active=True)]
def get_unique_platform_versions(self):
return [platform.release for platform in
self.platforms.filter(active=True)
.order_by('release').distinct('release')]
def get_unique_platform_search_terms(self):
terms = []
for platform in (
self.platforms.filter(active=True)
.exclude(alias__isnull=True).exclude(alias__exact='').all()):
terms += platform.alias.split(' ')
return set(terms)
def get_username(self):
return self.namespace
def get_tags(self):
return [tag.name for tag in self.tags.filter(active=True)]
def clean(self):
if self.company and len(self.company) > 50:
self.company = self.company[:50]
if not self.description:
# repo or meta data.")
pass
elif len(self.description) > 255:
# of 255 in meta data")
self.description = self.description[:255]
if not self.license:
pass
elif len(self.license) > 50:
self.license = self.license[:50]
if (not self.min_ansible_version
and self.role_type in (constants.RoleType.CONTAINER,
constants.RoleType.ANSIBLE)):
self.min_ansible_version = u'2.4'
if (not self.min_ansible_container_version
and self.role_type == constants.RoleType.CONTAINER_APP):
self.min_ansible_container_version = u'0.9.0'
class Namespace(CommonModel):
class Meta:
ordering = ('name',)
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='namespaces',
editable=True,
)
avatar_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Avatar URL"
)
location = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Location"
)
company = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Company Name"
)
email = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Email Address"
)
html_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Web Site URL"
)
is_vendor = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse('api:namespace_detail', args=(self.pk,))
@property
def content_counts(self):
return Content.objects \
.filter(namespace=self.pk) \
.values('content_type__name') \
.annotate(count=models.Count('content_type__name')) \
.order_by('content_type__name')
class Provider(CommonModel):
download_url = models.CharField(max_length=256, null=True)
class Meta:
ordering = ('name',)
def get_absolute_url(self):
return reverse('api:active_provider_detail', args=(self.pk,))
class ProviderNamespace(PrimordialModel):
class Meta:
ordering = ('provider', 'name')
unique_together = [
('provider', 'name'),
('namespace', 'provider', 'name'),
]
name = models.CharField(
max_length=256,
verbose_name="Name",
editable=True,
null=False
)
namespace = models.ForeignKey(
'Namespace',
related_name='provider_namespaces',
editable=False,
null=True,
on_delete=models.CASCADE,
verbose_name='Namespace'
)
provider = models.ForeignKey(
'Provider',
related_name='provider_namespaces',
editable=True,
null=True,
on_delete=models.CASCADE,
verbose_name='Provider'
)
display_name = models.CharField(
max_length=256,
blank=True,
null=True,
editable=False,
verbose_name="Display Name"
)
avatar_url = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Avatar URL"
)
location = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Location"
)
company = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Company Name"
)
email = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Email Address"
)
html_url = models.CharField(
max_length=256,
blank=True,
null=True,
editable=True,
verbose_name="Web Site URL"
)
followers = models.IntegerField(
null=True,
editable=True,
verbose_name="Followers"
)
def get_absolute_url(self):
return reverse('api:provider_namespace_detail', args=(self.pk,))
@six.python_2_unicode_compatible
class RepositoryVersion(BaseModel):
class Meta:
unique_together = ('repository', 'version')
repository = models.ForeignKey('Repository', related_name='versions')
version = fields.VersionField(null=True)
tag = models.CharField(max_length=64)
commit_sha = models.CharField(max_length=40, null=True)
commit_date = models.DateTimeField(null=True)
def __str__(self):
return "{}.{}-{}".format(
self.content.namespace, self.content.name, self.version)
@six.python_2_unicode_compatible
class ImportTaskMessage(PrimordialModel):
TYPE_INFO = constants.ImportTaskMessageType.INFO.value
TYPE_WARNING = constants.ImportTaskMessageType.WARNING.value
TYPE_SUCCESS = constants.ImportTaskMessageType.SUCCESS.value
TYPE_FAILED = constants.ImportTaskMessageType.FAILED.value
TYPE_ERROR = constants.ImportTaskMessageType.ERROR.value
task = models.ForeignKey(
'ImportTask',
related_name='messages',
)
message_type = models.CharField(
max_length=10,
choices=constants.ImportTaskMessageType.choices(),
)
message_text = models.CharField(
max_length=256,
)
def __str__(self):
return "{}-{}-{}".format(
self.task.id, self.message_type, self.message_text)
@six.python_2_unicode_compatible
class ImportTask(PrimordialModel):
class Meta:
ordering = ('-id',)
get_latest_by = 'created'
STATE_PENDING = constants.ImportTaskState.PENDING.value
STATE_RUNNING = constants.ImportTaskState.RUNNING.value
STATE_FAILED = constants.ImportTaskState.FAILED.value
STATE_SUCCESS = constants.ImportTaskState.SUCCESS.value
repository = models.ForeignKey(
'Repository',
related_name='import_tasks',
)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='import_tasks',
db_index=True,
)
import_branch = models.CharField(
max_length=256,
null=True,
blank=False,
)
celery_task_id = models.CharField(
max_length=100,
blank=True,
null=True,
)
state = models.CharField(
max_length=20,
default=STATE_PENDING,
choices=constants.ImportTaskState.choices()
)
started = models.DateTimeField(
auto_now_add=False,
null=True,
blank=True,
)
finished = models.DateTimeField(
auto_now_add=False,
null=True,
blank=True,
)
commit = models.CharField(
max_length=256,
blank=True
)
commit_message = models.CharField(
max_length=256,
blank=True
)
commit_url = models.CharField(
max_length=256,
blank=True
)
travis_status_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build Status"
)
travis_build_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build URL"
)
def __str__(self):
return '{}-{}'.format(self.id, self.state)
def start(self):
self.state = ImportTask.STATE_RUNNING
self.started = timezone.now()
self.save()
def finish_success(self, message=None):
self.state = ImportTask.STATE_SUCCESS
self.finished = timezone.now()
if message:
self.messages.create(message_type=ImportTaskMessage.TYPE_SUCCESS,
message_text=message)
self.save()
def finish_failed(self, reason=None):
self.state = ImportTask.STATE_FAILED
self.finished = timezone.now()
if reason:
self.messages.create(message_type=ImportTaskMessage.TYPE_FAILED,
message_text=str(reason)[:256])
self.save()
@six.python_2_unicode_compatible
class NotificationSecret(PrimordialModel):
class Meta:
ordering = ('source', 'github_user', 'github_repo')
unique_together = ('source', 'github_user', 'github_repo')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='notification_secrets',
db_index=True,
)
source = models.CharField(
max_length=20,
verbose_name="Source"
)
github_user = models.CharField(
max_length=256,
verbose_name="Github Username",
)
github_repo = models.CharField(
max_length=256,
verbose_name="Github Repository",
)
secret = models.CharField(
max_length=256,
verbose_name="Secret",
db_index=True
)
def __str__(self):
return "{}-{}".format(self.owner.username, self.source)
def repo_full_name(self):
return "{}/{}".format(self.github_user, self.github_repo)
class Notification(PrimordialModel):
class Meta:
ordering = ('-id',)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='notifications',
db_index=True,
editable=False
)
source = models.CharField(
max_length=20,
verbose_name="Source",
editable=False
)
github_branch = models.CharField(
max_length=256,
verbose_name="GitHub Branch",
blank=True,
editable=False
)
travis_build_url = models.CharField(
max_length=256,
blank=True
)
travis_status = models.CharField(
max_length=256,
blank=True
)
commit = models.CharField(
max_length=256,
blank=True
)
committed_at = models.DateTimeField(
auto_now=False,
null=True
)
commit_message = models.CharField(
max_length=256,
blank=True
)
repository = models.ForeignKey(
'Repository',
related_name='notifications',
editable=False,
)
import_task = models.ForeignKey(
ImportTask,
related_name='notifications',
verbose_name='Tasks',
editable=False
)
class Repository(BaseModel):
class Meta:
unique_together = [
('provider_namespace', 'name'),
('provider_namespace', 'original_name'),
]
ordering = ('provider_namespace', 'name')
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='repositories'
)
provider_namespace = models.ForeignKey(
ProviderNamespace,
related_name='repositories',
)
readme = models.ForeignKey(
'Readme',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
name = models.CharField(max_length=256)
original_name = models.CharField(max_length=256, null=False)
format = models.CharField(max_length=16, null=True,
choices=constants.RepositoryFormat.choices())
description = fields.TruncatingCharField(
max_length=255, blank=True, default='')
import_branch = models.CharField(max_length=256, null=True)
is_enabled = models.BooleanField(default=False)
commit = models.CharField(max_length=256, blank=True, default='')
commit_message = fields.TruncatingCharField(
max_length=256, blank=True, default='')
commit_url = models.CharField(max_length=256, blank=True, default='')
commit_created = models.DateTimeField(
null=True, verbose_name="Last Commit DateTime")
stargazers_count = models.IntegerField(default=0)
watchers_count = models.IntegerField(default=0)
forks_count = models.IntegerField(default=0)
open_issues_count = models.IntegerField(default=0)
travis_status_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build Status"
)
travis_build_url = models.CharField(
max_length=256,
blank=True,
default='',
verbose_name="Travis Build URL"
)
issue_tracker_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Issue Tracker URL",
)
download_count = models.IntegerField(
default=0
)
@property
def clone_url(self):
return "https://github.com/{user}/{repo}.git".format(
user=self.provider_namespace.name,
repo=self.original_name
)
@property
def github_user(self):
return self.provider_namespace.name
@property
def github_repo(self):
return self.original_name
@property
def content_counts(self):
return Content.objects \
.filter(repository=self.pk) \
.values('content_type__name') \
.annotate(count=models.Count('content_type__name')) \
.order_by('content_type__name')
def get_absolute_url(self):
return reverse('api:repository_detail', args=(self.pk,))
def get_download_url(self, ref=None):
download_url = self.provider_namespace.provider.download_url
if ref is None:
last_version = self.last_version()
if last_version:
ref = last_version.tag
else:
ref = self.import_branch
return download_url.format(
username=self.provider_namespace.name,
repository=self.original_name,
ref=ref,
)
def all_versions(self):
return sorted(self.versions.filter(version__isnull=False).all(),
key=operator.attrgetter('version'),
reverse=True)
def last_version(self):
versions = self.all_versions()
if versions:
return versions[0]
return None
class Subscription(PrimordialModel):
class Meta:
unique_together = ('owner', 'github_user', 'github_repo')
ordering = ('owner', 'github_user', 'github_repo')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='subscriptions',
)
github_user = models.CharField(
max_length=256,
verbose_name="Github Username",
)
github_repo = models.CharField(
max_length=256,
verbose_name="Github Repository",
)
class Stargazer(BaseModel):
class Meta:
unique_together = ('owner', 'repository')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='starred',
)
repository = models.ForeignKey(
Repository,
related_name='stars'
)
class RefreshRoleCount(PrimordialModel):
state = models.CharField(
max_length=20
)
passed = models.IntegerField(
default=0,
null=True
)
failed = models.IntegerField(
default=0,
null=True
)
deleted = models.IntegerField(
default=0,
null=True
)
updated = models.IntegerField(
default=0,
null=True
)
@six.python_2_unicode_compatible
class ContentBlock(BaseModel):
name = models.SlugField(unique=True)
content = models.TextField('content', blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('api:content_block_detail', args=(self.name,))
class Readme(BaseModel):
class Meta:
unique_together = ('repository', 'raw_hash')
repository = models.ForeignKey(
Repository,
null=False,
on_delete=models.CASCADE,
related_name='+',
)
raw = models.TextField(null=False, blank=False)
raw_hash = models.CharField(
max_length=128, null=False, blank=False)
mimetype = models.CharField(max_length=32, blank=False)
html = models.TextField(null=False, blank=False)
def safe_delete(self):
ref_count = (
Repository.objects.filter(readme=self).count()
+ Content.objects.filter(readme=self).count()
)
if ref_count:
return False
self.delete()
return True
| true | true |
1c36a7cc6a3e744ace5fc5a8f768ead126374065 | 973 | py | Python | codeforces/math数学/1300/515B传递快乐.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/math数学/1300/515B传递快乐.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/math数学/1300/515B传递快乐.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#https://codeforces.com/problemset/problem/515/B
#n个男孩,m个女孩; i天, i男孩和i女孩聚会, 有一人快乐另一人则也会变快乐
#问会不会都快乐!
#可以暴力,但应该有更优雅的数学解
#Let g = greatest common divisor of n and m.
#If i-th person is happy, then all people with number x satisfying x=i%g will become happy some day because of this person.
#测试用例2有问题??!
def allHappy(ubl,ugl,n,m):
if len(ubl)==0 or len(ugl)==0: return True
gcd = lambda a,b: b if a%b==0 else gcd(b,a%b)
g = gcd(n,m)
hbl = [False]*g #happy lists
hgl = [False]*g
for i in range(n):
if i not in ubl:
hbl[i%g] = True
for i in range(m):
if i not in ugl:
hgl[i%g] = True
#print(ubl,ugl,g,hbl,hgl)
return not (False in [(hbl[i] or hgl[i]) for i in range(g)])
n,m = list(map(int,input().split())) #100
ubl = list(map(int,input().split())) #unhappy boys
ugl = list(map(int,input().split())) #unhappy girls
print('Yes' if allHappy(ubl[1:],ugl[1:],n,m) else 'No')
| 30.40625 | 123 | 0.618705 |
def allHappy(ubl,ugl,n,m):
if len(ubl)==0 or len(ugl)==0: return True
gcd = lambda a,b: b if a%b==0 else gcd(b,a%b)
g = gcd(n,m)
hbl = [False]*g
hgl = [False]*g
for i in range(n):
if i not in ubl:
hbl[i%g] = True
for i in range(m):
if i not in ugl:
hgl[i%g] = True
return not (False in [(hbl[i] or hgl[i]) for i in range(g)])
n,m = list(map(int,input().split()))
ubl = list(map(int,input().split()))
ugl = list(map(int,input().split()))
print('Yes' if allHappy(ubl[1:],ugl[1:],n,m) else 'No')
| true | true |
1c36aa262f4b224667ce0400b29a76ac90384539 | 3,572 | py | Python | homeassistant/components/switch/rpi_gpio.py | evancohen/home-assistant | dafc0ced6b07025c03417d8e7a2c0133b4c622fc | [
"MIT"
] | 14 | 2015-11-10T07:57:43.000Z | 2021-08-29T13:45:26.000Z | homeassistant/components/switch/rpi_gpio.py | evancohen/home-assistant | dafc0ced6b07025c03417d8e7a2c0133b4c622fc | [
"MIT"
] | null | null | null | homeassistant/components/switch/rpi_gpio.py | evancohen/home-assistant | dafc0ced6b07025c03417d8e7a2c0133b4c622fc | [
"MIT"
] | 8 | 2015-11-14T16:40:41.000Z | 2020-02-17T19:48:08.000Z | """
homeassistant.components.switch.rpi_gpio
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Allows to control the GPIO pins of a Raspberry Pi.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.rpi_gpio/
"""
import logging
try:
import RPi.GPIO as GPIO
except ImportError:
GPIO = None
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
DEFAULT_INVERT_LOGIC = False
REQUIREMENTS = ['RPi.GPIO==0.5.11']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Raspberry PI GPIO ports. """
if GPIO is None:
_LOGGER.error('RPi.GPIO not available. rpi_gpio ports ignored.')
return
# pylint: disable=no-member
GPIO.setmode(GPIO.BCM)
switches = []
invert_logic = config.get('invert_logic', DEFAULT_INVERT_LOGIC)
ports = config.get('ports')
for port_num, port_name in ports.items():
switches.append(RPiGPIOSwitch(port_name, port_num, invert_logic))
add_devices(switches)
def cleanup_gpio(event):
""" Stuff to do before stop home assistant. """
# pylint: disable=no-member
GPIO.cleanup()
def prepare_gpio(event):
""" Stuff to do when home assistant starts. """
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gpio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_gpio)
class RPiGPIOSwitch(ToggleEntity):
""" Represents a port that can be toggled using Raspberry Pi GPIO. """
def __init__(self, name, gpio, invert_logic):
self._name = name or DEVICE_DEFAULT_NAME
self._gpio = gpio
self._active_state = not invert_logic
self._state = not self._active_state
# pylint: disable=no-member
GPIO.setup(gpio, GPIO.OUT)
@property
def name(self):
""" The name of the port. """
return self._name
@property
def should_poll(self):
""" No polling needed. """
return False
@property
def is_on(self):
""" True if device is on. """
return self._state
def turn_on(self, **kwargs):
""" Turn the device on. """
if self._switch(self._active_state):
self._state = True
self.update_ha_state()
def turn_off(self, **kwargs):
""" Turn the device off. """
if self._switch(not self._active_state):
self._state = False
self.update_ha_state()
def _switch(self, new_state):
""" Change the output value to Raspberry Pi GPIO port. """
_LOGGER.info('Setting GPIO %s to %s', self._gpio, new_state)
# pylint: disable=bare-except
try:
# pylint: disable=no-member
GPIO.output(self._gpio, 1 if new_state else 0)
except:
_LOGGER.error('GPIO "%s" output failed', self._gpio)
return False
return True
# pylint: disable=no-self-use
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
return None
@property
def state_attributes(self):
""" Returns optional state attributes. """
data = {}
device_attr = self.device_state_attributes
if device_attr is not None:
data.update(device_attr)
return data
| 30.529915 | 74 | 0.632139 | import logging
try:
import RPi.GPIO as GPIO
except ImportError:
GPIO = None
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
DEFAULT_INVERT_LOGIC = False
REQUIREMENTS = ['RPi.GPIO==0.5.11']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
if GPIO is None:
_LOGGER.error('RPi.GPIO not available. rpi_gpio ports ignored.')
return
GPIO.setmode(GPIO.BCM)
switches = []
invert_logic = config.get('invert_logic', DEFAULT_INVERT_LOGIC)
ports = config.get('ports')
for port_num, port_name in ports.items():
switches.append(RPiGPIOSwitch(port_name, port_num, invert_logic))
add_devices(switches)
def cleanup_gpio(event):
GPIO.cleanup()
def prepare_gpio(event):
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gpio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_gpio)
class RPiGPIOSwitch(ToggleEntity):
def __init__(self, name, gpio, invert_logic):
self._name = name or DEVICE_DEFAULT_NAME
self._gpio = gpio
self._active_state = not invert_logic
self._state = not self._active_state
GPIO.setup(gpio, GPIO.OUT)
@property
def name(self):
return self._name
@property
def should_poll(self):
return False
@property
def is_on(self):
return self._state
def turn_on(self, **kwargs):
if self._switch(self._active_state):
self._state = True
self.update_ha_state()
def turn_off(self, **kwargs):
if self._switch(not self._active_state):
self._state = False
self.update_ha_state()
def _switch(self, new_state):
_LOGGER.info('Setting GPIO %s to %s', self._gpio, new_state)
try:
GPIO.output(self._gpio, 1 if new_state else 0)
except:
_LOGGER.error('GPIO "%s" output failed', self._gpio)
return False
return True
@property
def device_state_attributes(self):
return None
@property
def state_attributes(self):
data = {}
device_attr = self.device_state_attributes
if device_attr is not None:
data.update(device_attr)
return data
| true | true |
1c36aaf57102eb4bc951dd2537e2e3a9e177c6de | 3,417 | py | Python | redash/query_runner/phoenix.py | frextrite/redash | 74beed80d20d858b51b5560e7984b20d5d2c874e | [
"BSD-2-Clause"
] | 3 | 2018-06-01T00:05:18.000Z | 2019-03-07T14:03:10.000Z | redash/query_runner/phoenix.py | frextrite/redash | 74beed80d20d858b51b5560e7984b20d5d2c874e | [
"BSD-2-Clause"
] | 10 | 2017-10-17T09:17:53.000Z | 2019-12-05T07:13:41.000Z | redash/query_runner/phoenix.py | tradingfoe/redash-clone | 94065b8dce0e27f6f40a7adc2b99e078b03115b3 | [
"BSD-2-Clause"
] | 5 | 2018-06-02T11:12:44.000Z | 2020-05-13T18:34:33.000Z | from redash.query_runner import *
from redash.utils import json_dumps, json_loads
import logging
logger = logging.getLogger(__name__)
try:
import phoenixdb
from phoenixdb.errors import *
enabled = True
except ImportError:
enabled = False
TYPES_MAPPING = {
'VARCHAR': TYPE_STRING,
'CHAR': TYPE_STRING,
'BINARY': TYPE_STRING,
'VARBINARY': TYPE_STRING,
'BOOLEAN': TYPE_BOOLEAN,
'TIME': TYPE_DATETIME,
'DATE': TYPE_DATETIME,
'TIMESTAMP': TYPE_DATETIME,
'UNSIGNED_TIME': TYPE_DATETIME,
'UNSIGNED_DATE': TYPE_DATETIME,
'UNSIGNED_TIMESTAMP': TYPE_DATETIME,
'INTEGER': TYPE_INTEGER,
'UNSIGNED_INT': TYPE_INTEGER,
'BIGINT': TYPE_INTEGER,
'UNSIGNED_LONG': TYPE_INTEGER,
'TINYINT': TYPE_INTEGER,
'UNSIGNED_TINYINT': TYPE_INTEGER,
'SMALLINT': TYPE_INTEGER,
'UNSIGNED_SMALLINT': TYPE_INTEGER,
'FLOAT': TYPE_FLOAT,
'UNSIGNED_FLOAT': TYPE_FLOAT,
'DOUBLE': TYPE_FLOAT,
'UNSIGNED_DOUBLE': TYPE_FLOAT,
'DECIMAL': TYPE_FLOAT
}
class Phoenix(BaseQueryRunner):
noop_query = 'select 1'
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def type(cls):
return "phoenix"
def get_schema(self, get_stats=False):
schema = {}
query = """
SELECT TABLE_SCHEM, TABLE_NAME, COLUMN_NAME
FROM SYSTEM.CATALOG
WHERE TABLE_SCHEM IS NULL OR TABLE_SCHEM != 'SYSTEM' AND COLUMN_NAME IS NOT NULL
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results['rows']:
table_name = '{}.{}'.format(row['TABLE_SCHEM'], row['TABLE_NAME'])
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['COLUMN_NAME'])
return schema.values()
def run_query(self, query, user):
connection = phoenixdb.connect(
url=self.configuration.get('url', ''),
autocommit=True)
cursor = connection.cursor()
try:
cursor.execute(query)
column_tuples = [(i[0], TYPES_MAPPING.get(i[1], None)) for i in cursor.description]
columns = self.fetch_columns(column_tuples)
rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
error = None
cursor.close()
except Error as e:
json_data = None
error = 'code: {}, sql state:{}, message: {}'.format(e.code, e.sqlstate, e.message)
except (KeyboardInterrupt, InterruptException) as e:
error = "Query cancelled by user."
json_data = None
except Exception as ex:
json_data = None
error = unicode(ex)
finally:
if connection:
connection.close()
return json_data, error
register(Phoenix)
| 27.556452 | 106 | 0.586479 | from redash.query_runner import *
from redash.utils import json_dumps, json_loads
import logging
logger = logging.getLogger(__name__)
try:
import phoenixdb
from phoenixdb.errors import *
enabled = True
except ImportError:
enabled = False
TYPES_MAPPING = {
'VARCHAR': TYPE_STRING,
'CHAR': TYPE_STRING,
'BINARY': TYPE_STRING,
'VARBINARY': TYPE_STRING,
'BOOLEAN': TYPE_BOOLEAN,
'TIME': TYPE_DATETIME,
'DATE': TYPE_DATETIME,
'TIMESTAMP': TYPE_DATETIME,
'UNSIGNED_TIME': TYPE_DATETIME,
'UNSIGNED_DATE': TYPE_DATETIME,
'UNSIGNED_TIMESTAMP': TYPE_DATETIME,
'INTEGER': TYPE_INTEGER,
'UNSIGNED_INT': TYPE_INTEGER,
'BIGINT': TYPE_INTEGER,
'UNSIGNED_LONG': TYPE_INTEGER,
'TINYINT': TYPE_INTEGER,
'UNSIGNED_TINYINT': TYPE_INTEGER,
'SMALLINT': TYPE_INTEGER,
'UNSIGNED_SMALLINT': TYPE_INTEGER,
'FLOAT': TYPE_FLOAT,
'UNSIGNED_FLOAT': TYPE_FLOAT,
'DOUBLE': TYPE_FLOAT,
'UNSIGNED_DOUBLE': TYPE_FLOAT,
'DECIMAL': TYPE_FLOAT
}
class Phoenix(BaseQueryRunner):
noop_query = 'select 1'
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def type(cls):
return "phoenix"
def get_schema(self, get_stats=False):
schema = {}
query = """
SELECT TABLE_SCHEM, TABLE_NAME, COLUMN_NAME
FROM SYSTEM.CATALOG
WHERE TABLE_SCHEM IS NULL OR TABLE_SCHEM != 'SYSTEM' AND COLUMN_NAME IS NOT NULL
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results['rows']:
table_name = '{}.{}'.format(row['TABLE_SCHEM'], row['TABLE_NAME'])
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['COLUMN_NAME'])
return schema.values()
def run_query(self, query, user):
connection = phoenixdb.connect(
url=self.configuration.get('url', ''),
autocommit=True)
cursor = connection.cursor()
try:
cursor.execute(query)
column_tuples = [(i[0], TYPES_MAPPING.get(i[1], None)) for i in cursor.description]
columns = self.fetch_columns(column_tuples)
rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
error = None
cursor.close()
except Error as e:
json_data = None
error = 'code: {}, sql state:{}, message: {}'.format(e.code, e.sqlstate, e.message)
except (KeyboardInterrupt, InterruptException) as e:
error = "Query cancelled by user."
json_data = None
except Exception as ex:
json_data = None
error = unicode(ex)
finally:
if connection:
connection.close()
return json_data, error
register(Phoenix)
| true | true |
1c36ab81488993d0dfe0b53b52871458f047b937 | 952 | py | Python | Fusion_mechanism_analysis/nanopre_reads_analysis/scripts/get_huiwen.1.py | YinYuan-001/muntjac_code | a376cc95ad548903f047a0e169c06eed1e757c9b | [
"MIT"
] | 2 | 2021-12-02T07:01:21.000Z | 2022-03-04T08:27:05.000Z | Fusion_mechanism_analysis/nanopre_reads_analysis/scripts/get_huiwen.1.py | YinYuan-001/muntjac_code | a376cc95ad548903f047a0e169c06eed1e757c9b | [
"MIT"
] | 2 | 2021-12-08T03:11:30.000Z | 2022-03-22T07:47:20.000Z | Fusion_mechanism_analysis/nanopre_reads_analysis/scripts/get_huiwen.1.py | YinYuan-001/muntjac_code | a376cc95ad548903f047a0e169c06eed1e757c9b | [
"MIT"
] | 5 | 2021-09-10T08:00:11.000Z | 2021-12-08T11:40:58.000Z | #!/bin/env python
import click
@click.command()
@click.option('--filehuiwen')
@click.option('--filesize')
@click.option('--fileout')
def main(filehuiwen,filesize,fileout):
file_out=open(fileout,'w')
Size={}
with open(filesize,'r') as file_size:
for line in file_size:
Line=line.strip().split()
id=Line[0]
size=int(Line[1])
if id in Size:
Size[id]=max(Size[id],size)
else:
Size[id]=size
with open(filehuiwen,'r') as file_huiwen:
for line in file_huiwen:
Line=line.strip().split()
id=Line[0]
start=Line[1]
end=int(Line[2])
if start == "1" :
size=Size[id]
if end != size:
file_out.write(line)
else:
file_out.write(line)
if __name__=="__main__":
main()
| 26.444444 | 45 | 0.483193 |
import click
@click.command()
@click.option('--filehuiwen')
@click.option('--filesize')
@click.option('--fileout')
def main(filehuiwen,filesize,fileout):
file_out=open(fileout,'w')
Size={}
with open(filesize,'r') as file_size:
for line in file_size:
Line=line.strip().split()
id=Line[0]
size=int(Line[1])
if id in Size:
Size[id]=max(Size[id],size)
else:
Size[id]=size
with open(filehuiwen,'r') as file_huiwen:
for line in file_huiwen:
Line=line.strip().split()
id=Line[0]
start=Line[1]
end=int(Line[2])
if start == "1" :
size=Size[id]
if end != size:
file_out.write(line)
else:
file_out.write(line)
if __name__=="__main__":
main()
| true | true |
1c36aba3ab9d1f8671ab3bc8bd4d782854a69e0c | 5,620 | py | Python | variational_approximation.py | ChriPiv/stinespring-algo-paper | d61cf46c302c511286280e5de2b22d01284a4379 | [
"Apache-2.0"
] | null | null | null | variational_approximation.py | ChriPiv/stinespring-algo-paper | d61cf46c302c511286280e5de2b22d01284a4379 | [
"Apache-2.0"
] | null | null | null | variational_approximation.py | ChriPiv/stinespring-algo-paper | d61cf46c302c511286280e5de2b22d01284a4379 | [
"Apache-2.0"
] | 1 | 2021-11-29T10:02:13.000Z | 2021-11-29T10:02:13.000Z | # (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import autograd
import autograd.numpy as np
from scipy.optimize import minimize
from qiskit import *
from qiskit.quantum_info import *
from qiskit.aqua.components.variational_forms import *
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.utils import insert_noise
from json_tools import *
from channels import *
import autograd.numpy as np
REDUCED_UNITARY = True
# circuit construction routines
def rx(param):
return np.array(
[[np.cos(param*0.5), -1j * np.sin(param*0.5)],
[-1j*np.sin(param*0.5), np.cos(param*0.5)]])
def ry(param):
return (1.+0j)*np.array(
[[np.cos(param*0.5), -np.sin(param*0.5)],
[np.sin(param*0.5), np.cos(param*0.5)]])
def rz(param):
return np.array(
[[1., 0.],
[0, np.exp(1j*param)]])
def TP(matrices):
if len(matrices)==1: return matrices[0]
retval = np.kron(matrices[1], matrices[0])
for i in range(2, len(matrices)):
retval = np.kron(matrices[i], retval)
return retval
P0 = np.array([[1,0],[0,0]])
P1 = np.array([[0,0],[0,1]])
Id = np.array([[1,0],[0,1]])
X = np.array([[0,1],[1,0]])
CX = TP([P0,Id]) + TP([P1,X])
CXI = TP([P0,Id,Id]) + TP([P1,X,Id])
CIX = TP([P0,Id,Id]) + TP([P1,Id,X])
ICX = TP([Id,P0,Id]) + TP([Id,P1,X])
def get_varform_unitary(params, n_qubits, depth, full_connectivity=True):
num_parameters = (depth + 1) * 2 * n_qubits
unitary = np.eye(2**n_qubits)
def ryrz_row(param_idx):
val1 = TP([ ry(params[param_idx+2*i]) for i in range(n_qubits) ])
val2 = TP([ rz(params[param_idx+2*i+1]) for i in range(n_qubits) ])
return val2 @ val1
def entanglement_row():
if full_connectivity:
if n_qubits == 1: return np.identity(2)
elif n_qubits == 2: return CX
elif n_qubits == 3: return ICX @ CIX @ CXI
else:
if n_qubits == 1: return np.identity(2)
elif n_qubits == 2: return CX
elif n_qubits == 3: return ICX @ CXI
raise
param_idx = 0
unitary = ryrz_row(param_idx) @ unitary
param_idx += 2*n_qubits
for i in range(depth):
unitary = entanglement_row() @ unitary
unitary = ryrz_row(param_idx) @ unitary
param_idx += 2*n_qubits
assert param_idx == num_parameters
return unitary
def get_varform_circuit(params, n_qubits, depth, full_connectivity=True):
entanglement = 'full' if full_connectivity else 'linear'
varform = RYRZ(n_qubits, depth=depth, entanglement_gate='cx', entanglement=entanglement)
circ = varform.construct_circuit(params)
# remove barriers
for i in reversed(range(len(circ.data))):
if type(circ.data[i][0]) == qiskit.circuit.barrier.Barrier:
del circ.data[i]
return circ
# optimization routines
def error_l2(u1, u2, n_qubits):
def norm(x): return np.mean( np.square(np.real(x)) + np.square(np.imag(x)) )
if n_qubits == 2:
return norm(u1[:,0] - u2[:,0]) + \
norm(u1[:,2] - u2[:,2])
elif n_qubits == 3:
return norm(u1[:,0] - u2[:,0]) + \
norm(u1[:,2] - u2[:,2]) + \
norm(u1[:,4] - u2[:,4]) + \
norm(u1[:,6] - u2[:,6])
else: raise
def error_mean(u1, u2, n_qubits):
if not REDUCED_UNITARY: return np.mean(np.abs(u1-u2))
if n_qubits == 2:
return np.mean(np.abs(u1[:,0] - u2[:,0])) + \
np.mean(np.abs(u1[:,2] - u2[:,2]))
elif n_qubits == 3:
return np.mean(np.abs(u1[:,0] - u2[:,0])) + \
np.mean(np.abs(u1[:,2] - u2[:,2])) + \
np.mean(np.abs(u1[:,4] - u2[:,4])) + \
np.mean(np.abs(u1[:,6] - u2[:,6]))
else: raise
def get_approx_circuit(unitary, n_qubits, depth, full_connectivity=True):
def loss(x):
ux = get_varform_unitary(x, n_qubits, depth, full_connectivity)
err = error_l2(ux, unitary, n_qubits)
return err
loss_grad = autograd.grad(loss)
loss_hess = autograd.hessian(loss)
num_parameters = (depth + 1) * 2 * n_qubits
bestval = 1e10
for _ in range(5):
x0 = np.random.uniform(low=0., high=2.*np.pi, size=(num_parameters))
res = minimize(loss, x0, jac=loss_grad, options={"maxiter":int(1e8)}, method='BFGS')
if res.fun < bestval:
bestval = res.fun
best = res.x
u_final = get_varform_unitary(best, n_qubits, depth, full_connectivity)
return u_final, best
if __name__ == "__main__":
full_connectivity = True
n_qubits = 3
depth = 10
num_parameters = (depth + 1) * 2 * n_qubits
x0 = np.random.uniform(low=0., high=2.*np.pi, size=(num_parameters))
U = get_varform_unitary(x0, n_qubits, depth, full_connectivity)
circ = get_varform_circuit(x0, n_qubits, depth, full_connectivity)
U_ref = Operator(circ).data
assert np.linalg.norm(U-U_ref) < 1e-10
full_connectivity = False
U = get_varform_unitary(x0, n_qubits, depth, full_connectivity)
circ = get_varform_circuit(x0, n_qubits, depth, full_connectivity)
U_ref = Operator(circ).data
assert np.linalg.norm(U-U_ref) < 1e-10
print("done.")
| 34.691358 | 92 | 0.614591 |
import autograd
import autograd.numpy as np
from scipy.optimize import minimize
from qiskit import *
from qiskit.quantum_info import *
from qiskit.aqua.components.variational_forms import *
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.utils import insert_noise
from json_tools import *
from channels import *
import autograd.numpy as np
REDUCED_UNITARY = True
def rx(param):
return np.array(
[[np.cos(param*0.5), -1j * np.sin(param*0.5)],
[-1j*np.sin(param*0.5), np.cos(param*0.5)]])
def ry(param):
return (1.+0j)*np.array(
[[np.cos(param*0.5), -np.sin(param*0.5)],
[np.sin(param*0.5), np.cos(param*0.5)]])
def rz(param):
return np.array(
[[1., 0.],
[0, np.exp(1j*param)]])
def TP(matrices):
if len(matrices)==1: return matrices[0]
retval = np.kron(matrices[1], matrices[0])
for i in range(2, len(matrices)):
retval = np.kron(matrices[i], retval)
return retval
P0 = np.array([[1,0],[0,0]])
P1 = np.array([[0,0],[0,1]])
Id = np.array([[1,0],[0,1]])
X = np.array([[0,1],[1,0]])
CX = TP([P0,Id]) + TP([P1,X])
CXI = TP([P0,Id,Id]) + TP([P1,X,Id])
CIX = TP([P0,Id,Id]) + TP([P1,Id,X])
ICX = TP([Id,P0,Id]) + TP([Id,P1,X])
def get_varform_unitary(params, n_qubits, depth, full_connectivity=True):
num_parameters = (depth + 1) * 2 * n_qubits
unitary = np.eye(2**n_qubits)
def ryrz_row(param_idx):
val1 = TP([ ry(params[param_idx+2*i]) for i in range(n_qubits) ])
val2 = TP([ rz(params[param_idx+2*i+1]) for i in range(n_qubits) ])
return val2 @ val1
def entanglement_row():
if full_connectivity:
if n_qubits == 1: return np.identity(2)
elif n_qubits == 2: return CX
elif n_qubits == 3: return ICX @ CIX @ CXI
else:
if n_qubits == 1: return np.identity(2)
elif n_qubits == 2: return CX
elif n_qubits == 3: return ICX @ CXI
raise
param_idx = 0
unitary = ryrz_row(param_idx) @ unitary
param_idx += 2*n_qubits
for i in range(depth):
unitary = entanglement_row() @ unitary
unitary = ryrz_row(param_idx) @ unitary
param_idx += 2*n_qubits
assert param_idx == num_parameters
return unitary
def get_varform_circuit(params, n_qubits, depth, full_connectivity=True):
entanglement = 'full' if full_connectivity else 'linear'
varform = RYRZ(n_qubits, depth=depth, entanglement_gate='cx', entanglement=entanglement)
circ = varform.construct_circuit(params)
for i in reversed(range(len(circ.data))):
if type(circ.data[i][0]) == qiskit.circuit.barrier.Barrier:
del circ.data[i]
return circ
def error_l2(u1, u2, n_qubits):
def norm(x): return np.mean( np.square(np.real(x)) + np.square(np.imag(x)) )
if n_qubits == 2:
return norm(u1[:,0] - u2[:,0]) + \
norm(u1[:,2] - u2[:,2])
elif n_qubits == 3:
return norm(u1[:,0] - u2[:,0]) + \
norm(u1[:,2] - u2[:,2]) + \
norm(u1[:,4] - u2[:,4]) + \
norm(u1[:,6] - u2[:,6])
else: raise
def error_mean(u1, u2, n_qubits):
if not REDUCED_UNITARY: return np.mean(np.abs(u1-u2))
if n_qubits == 2:
return np.mean(np.abs(u1[:,0] - u2[:,0])) + \
np.mean(np.abs(u1[:,2] - u2[:,2]))
elif n_qubits == 3:
return np.mean(np.abs(u1[:,0] - u2[:,0])) + \
np.mean(np.abs(u1[:,2] - u2[:,2])) + \
np.mean(np.abs(u1[:,4] - u2[:,4])) + \
np.mean(np.abs(u1[:,6] - u2[:,6]))
else: raise
def get_approx_circuit(unitary, n_qubits, depth, full_connectivity=True):
def loss(x):
ux = get_varform_unitary(x, n_qubits, depth, full_connectivity)
err = error_l2(ux, unitary, n_qubits)
return err
loss_grad = autograd.grad(loss)
loss_hess = autograd.hessian(loss)
num_parameters = (depth + 1) * 2 * n_qubits
bestval = 1e10
for _ in range(5):
x0 = np.random.uniform(low=0., high=2.*np.pi, size=(num_parameters))
res = minimize(loss, x0, jac=loss_grad, options={"maxiter":int(1e8)}, method='BFGS')
if res.fun < bestval:
bestval = res.fun
best = res.x
u_final = get_varform_unitary(best, n_qubits, depth, full_connectivity)
return u_final, best
if __name__ == "__main__":
full_connectivity = True
n_qubits = 3
depth = 10
num_parameters = (depth + 1) * 2 * n_qubits
x0 = np.random.uniform(low=0., high=2.*np.pi, size=(num_parameters))
U = get_varform_unitary(x0, n_qubits, depth, full_connectivity)
circ = get_varform_circuit(x0, n_qubits, depth, full_connectivity)
U_ref = Operator(circ).data
assert np.linalg.norm(U-U_ref) < 1e-10
full_connectivity = False
U = get_varform_unitary(x0, n_qubits, depth, full_connectivity)
circ = get_varform_circuit(x0, n_qubits, depth, full_connectivity)
U_ref = Operator(circ).data
assert np.linalg.norm(U-U_ref) < 1e-10
print("done.")
| true | true |
1c36afca34a6ce3ee5ed1be12d1cc4d387ca18cc | 1,883 | py | Python | sunimage2stl/makeMovie.py | nathanielcrosby/sunimage2stl | 31858c46f48eb33afc3a4415292ce14b36b1e712 | [
"MIT"
] | null | null | null | sunimage2stl/makeMovie.py | nathanielcrosby/sunimage2stl | 31858c46f48eb33afc3a4415292ce14b36b1e712 | [
"MIT"
] | null | null | null | sunimage2stl/makeMovie.py | nathanielcrosby/sunimage2stl | 31858c46f48eb33afc3a4415292ce14b36b1e712 | [
"MIT"
] | 1 | 2019-09-20T16:18:21.000Z | 2019-09-20T16:18:21.000Z | import matplotlib.pyplot as plt
import os
import imageio
def make_movie(files, output, fps=10, **kwargs):
'''
uses the imageio library to take the jpegs created before and save them to a string
of images that is spaced at a certain interval (duration)
Parameters:
files : the array of all the images
output : name of the file outputted
fps : frames per second of the movie
'''
duration = 1 / fps
images = []
for filename in files:
images.append(imageio.imread(filename))
#saves array of images as a gif with a set time between each still
imageio.mimsave(output, images, duration=duration)
def rotanimate(ax, output, azim, elev, fps=10, width=10, height=10, prefix='tmprot_', **kwargs):
'''
Produces an animation (.gif) from a 3D plot on a 3D ax
Makes jpeg pictures of the given 3d ax, with different angles.
Args:
ax (3D axis): the ax containing the plot of interest
output (string): the name of the file created
azim (list): the list of rotational angles (in degree) under which to show the plot.
elev (list): the list of elevational angle at which to show the plot
- width : in inches
- heigth: in inches
- fps : frames per second
prefix (str): prefix for the files created.
Inputs the images, output, and fps to the make_movie function
'''
files = []
ax.figure.set_size_inches(width, height)
#loops through and creates stills that are added to files array
for i,angle in enumerate(azim):
#.vew_init sets the view angle based on the list values
ax.view_init(elev = elev[i], azim=angle)
fname = '%s%03d.jpeg'%(prefix,i)
ax.figure.savefig(fname)
files.append(fname)
make_movie(files, output, fps=fps, **kwargs)
for f in files:
os.remove(f) | 32.465517 | 96 | 0.654275 | import matplotlib.pyplot as plt
import os
import imageio
def make_movie(files, output, fps=10, **kwargs):
duration = 1 / fps
images = []
for filename in files:
images.append(imageio.imread(filename))
imageio.mimsave(output, images, duration=duration)
def rotanimate(ax, output, azim, elev, fps=10, width=10, height=10, prefix='tmprot_', **kwargs):
files = []
ax.figure.set_size_inches(width, height)
for i,angle in enumerate(azim):
ax.view_init(elev = elev[i], azim=angle)
fname = '%s%03d.jpeg'%(prefix,i)
ax.figure.savefig(fname)
files.append(fname)
make_movie(files, output, fps=fps, **kwargs)
for f in files:
os.remove(f) | true | true |
1c36b03186f332d5d13ef11c095479254a65b2eb | 4,367 | py | Python | contrib/devtools/logprint-scanner.py | leviathan-help/LVT-MasternodeCoin | 201329113c9e7ab79c193abe262bdfba6b540885 | [
"MIT"
] | 1 | 2019-11-13T09:17:16.000Z | 2019-11-13T09:17:16.000Z | contrib/devtools/logprint-scanner.py | leviathan-help/LVT-MasternodeCoin | 201329113c9e7ab79c193abe262bdfba6b540885 | [
"MIT"
] | null | null | null | contrib/devtools/logprint-scanner.py | leviathan-help/LVT-MasternodeCoin | 201329113c9e7ab79c193abe262bdfba6b540885 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The LEVIATHAN developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os, sys
from subprocess import check_output
def countRelevantCommas(line):
openParensPosStack = []
openParensPos = 0
charCounter = 0
numRelevantCommas = 0
firstOpenParensIndex = line.find("(")
for char in line:
if char == '(':
openParensPosStack.append(charCounter)
if char == ')':
openParensPosStack.pop()
if char == "," and openParensPosStack[-1] == firstOpenParensIndex:
numRelevantCommas += 1
charCounter += 1
return numRelevantCommas
if __name__ == "__main__":
out = check_output("git rev-parse --show-toplevel", shell=True, universal_newlines=True)
srcDir = out.rstrip() + "/src/"
filelist = [os.path.join(dp, f) for dp, dn, filenames in os.walk(srcDir) for f in filenames if os.path.splitext(f)[1] == '.cpp' or os.path.splitext(f)[1] == '.h' ]
incorrectInstanceCounter = 0
for file in filelist:
f = open(file,"r", encoding="utf-8")
data = f.read()
rows = data.split("\n")
count = 0
full_data = []
lineCounter = 1
tempLine = ""
tempCount = 0
for row in rows:
# Collapse multiple lines into one
tempLine += row
# Line contains LogPrint or LogPrintf
if tempLine.find("LogPrint") != -1:
if tempLine.count("(") == tempLine.count(")"):
havePercents = tempLine.count('%') > 0
if havePercents:
# This line of code has a format specifier that requires checking number of associated arguments
# Determine the number of arguments provided, see if that matches the number of format specifiers
# Count the number of commas after the format specifier string. Check to see if it matches the number of format specifiers.
# Assumes quotes are not escaped in the specifier string and there are no percent signs when specifying the debug level.
# First, determine the position of the comma after the format specifier section, named commaAfterEndSpecifierStringIndex
firstSpecifierIndex = tempLine.find('%')
startSpecifierStringIndex = tempLine.rfind('"',firstSpecifierIndex)
endSpecifierStringIndex = tempLine.find('"',firstSpecifierIndex)
commaAfterEndSpecifierStringIndex = tempLine.find(',',endSpecifierStringIndex)
# Count the number of commas after the specifier string
line = "(" + tempLine[commaAfterEndSpecifierStringIndex:-1]
numCommas = countRelevantCommas(line)
# Determine number of extra percents after specifier string
numExtraPercents = tempLine.count('%', commaAfterEndSpecifierStringIndex)
# Subtract extra from total count. This is the number of expected specifiers
# ignore %%
numPercents = tempLine.count('%') - numExtraPercents - 2*tempLine.count('%%')
if numPercents != numCommas:
print("Incorrect number of arguments for LogPrint(f) statement found.")
print(str(file) + ":" + str(lineCounter - tempCount))
print("Line = " + tempLine)
print("numRelevantCommas = " + str(numCommas) + ", numRelevantPercents = " + str(numPercents))
print("")
incorrectInstanceCounter += 1
# Done with this multiline, clear tempLine
tempLine = ""
tempCount = 0
else:
tempCount += 1
else:
# No LogPrint, clear tempLine
tempLine = ""
tempCount = 0
lineCounter += 1
print("# of incorrect instances: " + str(incorrectInstanceCounter))
sys.exit(incorrectInstanceCounter)
| 42.398058 | 167 | 0.568583 |
import os, sys
from subprocess import check_output
def countRelevantCommas(line):
openParensPosStack = []
openParensPos = 0
charCounter = 0
numRelevantCommas = 0
firstOpenParensIndex = line.find("(")
for char in line:
if char == '(':
openParensPosStack.append(charCounter)
if char == ')':
openParensPosStack.pop()
if char == "," and openParensPosStack[-1] == firstOpenParensIndex:
numRelevantCommas += 1
charCounter += 1
return numRelevantCommas
if __name__ == "__main__":
out = check_output("git rev-parse --show-toplevel", shell=True, universal_newlines=True)
srcDir = out.rstrip() + "/src/"
filelist = [os.path.join(dp, f) for dp, dn, filenames in os.walk(srcDir) for f in filenames if os.path.splitext(f)[1] == '.cpp' or os.path.splitext(f)[1] == '.h' ]
incorrectInstanceCounter = 0
for file in filelist:
f = open(file,"r", encoding="utf-8")
data = f.read()
rows = data.split("\n")
count = 0
full_data = []
lineCounter = 1
tempLine = ""
tempCount = 0
for row in rows:
tempLine += row
if tempLine.find("LogPrint") != -1:
if tempLine.count("(") == tempLine.count(")"):
havePercents = tempLine.count('%') > 0
if havePercents:
firstSpecifierIndex = tempLine.find('%')
startSpecifierStringIndex = tempLine.rfind('"',firstSpecifierIndex)
endSpecifierStringIndex = tempLine.find('"',firstSpecifierIndex)
commaAfterEndSpecifierStringIndex = tempLine.find(',',endSpecifierStringIndex)
line = "(" + tempLine[commaAfterEndSpecifierStringIndex:-1]
numCommas = countRelevantCommas(line)
numExtraPercents = tempLine.count('%', commaAfterEndSpecifierStringIndex)
numPercents = tempLine.count('%') - numExtraPercents - 2*tempLine.count('%%')
if numPercents != numCommas:
print("Incorrect number of arguments for LogPrint(f) statement found.")
print(str(file) + ":" + str(lineCounter - tempCount))
print("Line = " + tempLine)
print("numRelevantCommas = " + str(numCommas) + ", numRelevantPercents = " + str(numPercents))
print("")
incorrectInstanceCounter += 1
tempLine = ""
tempCount = 0
else:
tempCount += 1
else:
tempLine = ""
tempCount = 0
lineCounter += 1
print("# of incorrect instances: " + str(incorrectInstanceCounter))
sys.exit(incorrectInstanceCounter)
| true | true |
1c36b11ef5fe55e4444f81c0e88b3bf952295475 | 16,291 | py | Python | locust/contrib/fasthttp.py | cultureamp/locust | 0cb12d88f70d4505b8cc45a70a13354e1a3c4492 | [
"MIT"
] | null | null | null | locust/contrib/fasthttp.py | cultureamp/locust | 0cb12d88f70d4505b8cc45a70a13354e1a3c4492 | [
"MIT"
] | null | null | null | locust/contrib/fasthttp.py | cultureamp/locust | 0cb12d88f70d4505b8cc45a70a13354e1a3c4492 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import re
import six
import socket
from base64 import b64encode
from six.moves.urllib.parse import urlparse, urlunparse
from ssl import SSLError
from timeit import default_timer
if six.PY2:
from cookielib import CookieJar
class ConnectionRefusedError(Exception):
# ConnectionRefusedError doesn't exist in python 2, so we'll
# define a dummy class to avoid a NameError
pass
else:
from http.cookiejar import CookieJar
unicode = str
import gevent
from gevent.timeout import Timeout
from geventhttpclient.useragent import UserAgent, CompatRequest, CompatResponse, ConnectionError
from geventhttpclient.response import HTTPConnectionClosed
from locust import events
from locust.core import Locust
from locust.exception import LocustError, CatchResponseError, ResponseError
# Monkey patch geventhttpclient.useragent.CompatRequest so that Cookiejar works with Python >= 3.3
# More info: https://github.com/requests/requests/pull/871
CompatRequest.unverifiable = False
# Workaround for AttributeError: 'CompatRequest' object has no attribute 'type' in Cookiejar
# https://github.com/locustio/locust/issues/1138
# Might allow secure cookies over non-secure connections but that is a minor concern in a load testing tool
CompatRequest.type = "https"
# Regexp for checking if an absolute URL was specified
absolute_http_url_regexp = re.compile(r"^https?://", re.I)
# List of exceptions that can be raised by geventhttpclient when sending an HTTP request,
# and that should result in a Locust failure
FAILURE_EXCEPTIONS = (ConnectionError, ConnectionRefusedError, socket.error, \
SSLError, Timeout, HTTPConnectionClosed)
def _construct_basic_auth_str(username, password):
"""Construct Authorization header value to be used in HTTP Basic Auth"""
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
return 'Basic ' + b64encode(b':'.join((username, password))).strip().decode("ascii")
class FastHttpLocust(Locust):
"""
Represents an HTTP "user" which is to be hatched and attack the system that is to be load tested.
The behaviour of this user is defined by the task_set attribute, which should point to a
:py:class:`TaskSet <locust.core.TaskSet>` class.
This class creates a *client* attribute on instantiation which is an HTTP client with support
for keeping a user session between requests.
"""
client = None
"""
Instance of HttpSession that is created upon instantiation of Locust.
The client support cookies, and therefore keeps the session between HTTP requests.
"""
def __init__(self):
super(FastHttpLocust, self).__init__()
if self.host is None:
raise LocustError("You must specify the base host. Either in the host attribute in the Locust class, or on the command line using the --host option.")
if not re.match(r"^https?://[^/]+$", self.host, re.I):
raise LocustError("Invalid host (`%s`). The specified host string must be a base URL without a trailing slash. E.g. http://example.org" % self.host)
self.client = FastHttpSession(base_url=self.host)
class FastHttpSession(object):
auth_header = None
def __init__(self, base_url, **kwargs):
self.base_url = base_url
self.cookiejar = CookieJar()
self.client = LocustUserAgent(
max_retries=1,
cookiejar=self.cookiejar,
insecure=True,
ssl_options={"cert_reqs": gevent.ssl.CERT_NONE},
**kwargs
)
# Check for basic authentication
parsed_url = urlparse(self.base_url)
if parsed_url.username and parsed_url.password:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%d" % parsed_url.port
# remove username and password from the base_url
self.base_url = urlunparse((parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment))
# store authentication header (we construct this by using _basic_auth_str() function from requests.auth)
self.auth_header = _construct_basic_auth_str(parsed_url.username, parsed_url.password)
def _build_url(self, path):
""" prepend url with hostname unless it's already an absolute URL """
if absolute_http_url_regexp.match(path):
return path
else:
return "%s%s" % (self.base_url, path)
def _send_request_safe_mode(self, method, url, **kwargs):
"""
Send an HTTP request, and catch any exception that might occur due to either
connection problems, or invalid HTTP status codes
"""
try:
return self.client.urlopen(url, method=method, **kwargs)
except FAILURE_EXCEPTIONS as e:
if hasattr(e, "response"):
r = e.response
else:
r = ErrorResponse()
r.error = e
return r
def request(self, method, path, name=None, data=None, catch_response=False, stream=False, \
headers=None, auth=None, **kwargs):
"""
Send and HTTP request
Returns :py:class:`locust.contrib.fasthttp.FastResponse` object.
:param method: method for the new :class:`Request` object.
:param path: Path that will be concatenated with the base host URL that has been specified.
Can also be a full URL, in which case the full URL will be requested, and the base host
is ignored.
:param name: (optional) An argument that can be specified to use as label in Locust's
statistics instead of the URL path. This can be used to group different URL's
that are requested into a single entry in Locust's statistics.
:param catch_response: (optional) Boolean argument that, if set, can be used to make a request
return a context manager to work as argument to a with statement. This will allow the
request to be marked as a fail based on the content of the response, even if the response
code is ok (2xx). The opposite also works, one can use catch_response to catch a request
and then mark it as successful even if the response code was not (i.e 500 or 404).
:param data: (optional) Dictionary or bytes to send in the body of the request.
:param headers: (optional) Dictionary of HTTP Headers to send with the request.
:param auth: (optional) Auth (username, password) tuple to enable Basic HTTP Auth.
:param stream: (optional) If set to true the response body will not be consumed immediately
and can instead be consumed by accessing the stream attribute on the Response object.
Another side effect of setting stream to True is that the time for downloading the response
content will not be accounted for in the request time that is reported by Locust.
"""
# prepend url with hostname unless it's already an absolute URL
url = self._build_url(path)
# store meta data that is used when reporting the request to locust's statistics
request_meta = {}
# set up pre_request hook for attaching meta data to the request object
request_meta["method"] = method
request_meta["start_time"] = default_timer()
request_meta["name"] = name or path
headers = headers or {}
if auth:
headers['Authorization'] = _construct_basic_auth_str(auth[0], auth[1])
elif self.auth_header:
headers['Authorization'] = self.auth_header
if not "Accept-Encoding" in headers:
headers['Accept-Encoding'] = "gzip, deflate"
# send request, and catch any exceptions
response = self._send_request_safe_mode(method, url, payload=data, headers=headers, **kwargs)
# get the length of the content, but if the argument stream is set to True, we take
# the size from the content-length header, in order to not trigger fetching of the body
if stream:
request_meta["content_size"] = int(response.headers.get("content-length") or 0)
else:
request_meta["content_size"] = len(response.content or "")
# Record the consumed time
# Note: This is intentionally placed after we record the content_size above, since
# we'll then trigger fetching of the body (unless stream=True)
request_meta["response_time"] = int((default_timer() - request_meta["start_time"]) * 1000)
if catch_response:
response.locust_request_meta = request_meta
return ResponseContextManager(response)
else:
try:
response.raise_for_status()
except FAILURE_EXCEPTIONS as e:
events.request_failure.fire(
request_type=request_meta["method"],
name=request_meta["name"],
response_time=request_meta["response_time"],
response_length=request_meta["content_size"],
exception=e,
)
else:
events.request_success.fire(
request_type=request_meta["method"],
name=request_meta["name"],
response_time=request_meta["response_time"],
response_length=request_meta["content_size"],
)
return response
def delete(self, path, **kwargs):
return self.request("DELETE", path, **kwargs)
def get(self, path, **kwargs):
"""Sends a GET request"""
return self.request("GET", path, **kwargs)
def head(self, path, **kwargs):
"""Sends a HEAD request"""
return self.request("HEAD", path, **kwargs)
def options(self, path, **kwargs):
"""Sends a OPTIONS request"""
return self.request("OPTIONS", path, **kwargs)
def patch(self, path, data=None, **kwargs):
"""Sends a POST request"""
return self.request("PATCH", path, data=data, **kwargs)
def post(self, path, data=None, **kwargs):
"""Sends a POST request"""
return self.request("POST", path, data=data, **kwargs)
def put(self, path, data=None, **kwargs):
"""Sends a PUT request"""
return self.request("PUT", path, data=data, **kwargs)
class FastResponse(CompatResponse):
headers = None
"""Dict like object containing the response headers"""
_response = None
@property
def text(self):
"""
Returns the text content of the response as a decoded string
(unicode on python2)
"""
try:
charset = self.headers.get('content-type', '').partition("charset=")[2]
content = unicode(self.content, charset or 'utf-8', errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# Fallback to decode without specifying encoding
if self.content is None:
content = None
else:
content = unicode(self.content, errors='replace')
return content
def raise_for_status(self):
"""Raise any connection errors that occured during the request"""
if hasattr(self, 'error') and self.error:
raise self.error
@property
def status_code(self):
"""
We override status_code in order to return None if no valid response was
returned. E.g. in the case of connection errors
"""
return self._response is not None and self._response.get_code() or 0
def _content(self):
if self.headers is None:
return None
return super(FastResponse, self)._content()
class ErrorResponse(object):
"""
This is used as a dummy response object when geventhttpclient raises an error
that doesn't have a real Response object attached. E.g. a socket error or similar
"""
headers = None
content = None
status_code = 0
error = None
text = None
def raise_for_status(self):
raise self.error
class LocustUserAgent(UserAgent):
response_type = FastResponse
valid_response_codes = frozenset([200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 301, 302, 303, 307])
def _urlopen(self, request):
"""Override _urlopen() in order to make it use the response_type attribute"""
client = self.clientpool.get_client(request.url_split)
resp = client.request(request.method, request.url_split.request_uri,
body=request.payload, headers=request.headers)
return self.response_type(resp, request=request, sent_request=resp._sent_request)
class ResponseContextManager(FastResponse):
"""
A Response class that also acts as a context manager that provides the ability to manually
control if an HTTP request should be marked as successful or a failure in Locust's statistics
This class is a subclass of :py:class:`FastResponse <locust.contrib.fasthttp.FastResponse>`
with two additional methods: :py:meth:`success <locust.contrib.fasthttp.ResponseContextManager.success>`
and :py:meth:`failure <locust.contrib.fasthttp.ResponseContextManager.failure>`.
"""
_is_reported = False
def __init__(self, response):
# copy data from response to this object
self.__dict__ = response.__dict__
self._cached_content = response.content
def __enter__(self):
return self
def __exit__(self, exc, value, traceback):
if self._is_reported:
# if the user has already manually marked this response as failure or success
# we can ignore the default haviour of letting the response code determine the outcome
return exc is None
if exc:
if isinstance(value, ResponseError):
self.failure(value)
else:
return False
else:
try:
self.raise_for_status()
except FAILURE_EXCEPTIONS as e:
self.failure(e)
else:
self.success()
return True
def success(self):
"""
Report the response as successful
Example::
with self.client.get("/does/not/exist", catch_response=True) as response:
if response.status_code == 404:
response.success()
"""
events.request_success.fire(
request_type=self.locust_request_meta["method"],
name=self.locust_request_meta["name"],
response_time=self.locust_request_meta["response_time"],
response_length=self.locust_request_meta["content_size"],
)
self._is_reported = True
def failure(self, exc):
"""
Report the response as a failure.
exc can be either a python exception, or a string in which case it will
be wrapped inside a CatchResponseError.
Example::
with self.client.get("/", catch_response=True) as response:
if response.content == "":
response.failure("No data")
"""
if isinstance(exc, six.string_types):
exc = CatchResponseError(exc)
events.request_failure.fire(
request_type=self.locust_request_meta["method"],
name=self.locust_request_meta["name"],
response_time=self.locust_request_meta["response_time"],
response_length=self.locust_request_meta["content_size"],
exception=exc,
)
self._is_reported = True
| 40.932161 | 162 | 0.636118 | from __future__ import absolute_import
import re
import six
import socket
from base64 import b64encode
from six.moves.urllib.parse import urlparse, urlunparse
from ssl import SSLError
from timeit import default_timer
if six.PY2:
from cookielib import CookieJar
class ConnectionRefusedError(Exception):
pass
else:
from http.cookiejar import CookieJar
unicode = str
import gevent
from gevent.timeout import Timeout
from geventhttpclient.useragent import UserAgent, CompatRequest, CompatResponse, ConnectionError
from geventhttpclient.response import HTTPConnectionClosed
from locust import events
from locust.core import Locust
from locust.exception import LocustError, CatchResponseError, ResponseError
CompatRequest.unverifiable = False
CompatRequest.type = "https"
absolute_http_url_regexp = re.compile(r"^https?://", re.I)
FAILURE_EXCEPTIONS = (ConnectionError, ConnectionRefusedError, socket.error, \
SSLError, Timeout, HTTPConnectionClosed)
def _construct_basic_auth_str(username, password):
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
return 'Basic ' + b64encode(b':'.join((username, password))).strip().decode("ascii")
class FastHttpLocust(Locust):
client = None
def __init__(self):
super(FastHttpLocust, self).__init__()
if self.host is None:
raise LocustError("You must specify the base host. Either in the host attribute in the Locust class, or on the command line using the --host option.")
if not re.match(r"^https?://[^/]+$", self.host, re.I):
raise LocustError("Invalid host (`%s`). The specified host string must be a base URL without a trailing slash. E.g. http://example.org" % self.host)
self.client = FastHttpSession(base_url=self.host)
class FastHttpSession(object):
auth_header = None
def __init__(self, base_url, **kwargs):
self.base_url = base_url
self.cookiejar = CookieJar()
self.client = LocustUserAgent(
max_retries=1,
cookiejar=self.cookiejar,
insecure=True,
ssl_options={"cert_reqs": gevent.ssl.CERT_NONE},
**kwargs
)
parsed_url = urlparse(self.base_url)
if parsed_url.username and parsed_url.password:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%d" % parsed_url.port
self.base_url = urlunparse((parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment))
self.auth_header = _construct_basic_auth_str(parsed_url.username, parsed_url.password)
def _build_url(self, path):
if absolute_http_url_regexp.match(path):
return path
else:
return "%s%s" % (self.base_url, path)
def _send_request_safe_mode(self, method, url, **kwargs):
try:
return self.client.urlopen(url, method=method, **kwargs)
except FAILURE_EXCEPTIONS as e:
if hasattr(e, "response"):
r = e.response
else:
r = ErrorResponse()
r.error = e
return r
def request(self, method, path, name=None, data=None, catch_response=False, stream=False, \
headers=None, auth=None, **kwargs):
url = self._build_url(path)
# store meta data that is used when reporting the request to locust's statistics
request_meta = {}
request_meta["method"] = method
request_meta["start_time"] = default_timer()
request_meta["name"] = name or path
headers = headers or {}
if auth:
headers['Authorization'] = _construct_basic_auth_str(auth[0], auth[1])
elif self.auth_header:
headers['Authorization'] = self.auth_header
if not "Accept-Encoding" in headers:
headers['Accept-Encoding'] = "gzip, deflate"
response = self._send_request_safe_mode(method, url, payload=data, headers=headers, **kwargs)
if stream:
request_meta["content_size"] = int(response.headers.get("content-length") or 0)
else:
request_meta["content_size"] = len(response.content or "")
request_meta["response_time"] = int((default_timer() - request_meta["start_time"]) * 1000)
if catch_response:
response.locust_request_meta = request_meta
return ResponseContextManager(response)
else:
try:
response.raise_for_status()
except FAILURE_EXCEPTIONS as e:
events.request_failure.fire(
request_type=request_meta["method"],
name=request_meta["name"],
response_time=request_meta["response_time"],
response_length=request_meta["content_size"],
exception=e,
)
else:
events.request_success.fire(
request_type=request_meta["method"],
name=request_meta["name"],
response_time=request_meta["response_time"],
response_length=request_meta["content_size"],
)
return response
def delete(self, path, **kwargs):
return self.request("DELETE", path, **kwargs)
def get(self, path, **kwargs):
return self.request("GET", path, **kwargs)
def head(self, path, **kwargs):
return self.request("HEAD", path, **kwargs)
def options(self, path, **kwargs):
return self.request("OPTIONS", path, **kwargs)
def patch(self, path, data=None, **kwargs):
return self.request("PATCH", path, data=data, **kwargs)
def post(self, path, data=None, **kwargs):
return self.request("POST", path, data=data, **kwargs)
def put(self, path, data=None, **kwargs):
return self.request("PUT", path, data=data, **kwargs)
class FastResponse(CompatResponse):
headers = None
_response = None
@property
def text(self):
try:
charset = self.headers.get('content-type', '').partition("charset=")[2]
content = unicode(self.content, charset or 'utf-8', errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# Fallback to decode without specifying encoding
if self.content is None:
content = None
else:
content = unicode(self.content, errors='replace')
return content
def raise_for_status(self):
if hasattr(self, 'error') and self.error:
raise self.error
@property
def status_code(self):
return self._response is not None and self._response.get_code() or 0
def _content(self):
if self.headers is None:
return None
return super(FastResponse, self)._content()
class ErrorResponse(object):
headers = None
content = None
status_code = 0
error = None
text = None
def raise_for_status(self):
raise self.error
class LocustUserAgent(UserAgent):
response_type = FastResponse
valid_response_codes = frozenset([200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 301, 302, 303, 307])
def _urlopen(self, request):
client = self.clientpool.get_client(request.url_split)
resp = client.request(request.method, request.url_split.request_uri,
body=request.payload, headers=request.headers)
return self.response_type(resp, request=request, sent_request=resp._sent_request)
class ResponseContextManager(FastResponse):
_is_reported = False
def __init__(self, response):
# copy data from response to this object
self.__dict__ = response.__dict__
self._cached_content = response.content
def __enter__(self):
return self
def __exit__(self, exc, value, traceback):
if self._is_reported:
# if the user has already manually marked this response as failure or success
# we can ignore the default haviour of letting the response code determine the outcome
return exc is None
if exc:
if isinstance(value, ResponseError):
self.failure(value)
else:
return False
else:
try:
self.raise_for_status()
except FAILURE_EXCEPTIONS as e:
self.failure(e)
else:
self.success()
return True
def success(self):
events.request_success.fire(
request_type=self.locust_request_meta["method"],
name=self.locust_request_meta["name"],
response_time=self.locust_request_meta["response_time"],
response_length=self.locust_request_meta["content_size"],
)
self._is_reported = True
def failure(self, exc):
if isinstance(exc, six.string_types):
exc = CatchResponseError(exc)
events.request_failure.fire(
request_type=self.locust_request_meta["method"],
name=self.locust_request_meta["name"],
response_time=self.locust_request_meta["response_time"],
response_length=self.locust_request_meta["content_size"],
exception=exc,
)
self._is_reported = True
| true | true |
1c36b1a340344a5da1803cd31d2193cafcea85a6 | 5,197 | py | Python | docs/tutorials/1 - Beginer - Plot ALBEDOs from Smarts.py | NREL/pySMARTS | 83e702ed508eedcd8f6a6e11f2e640557f649dcd | [
"BSD-3-Clause"
] | 5 | 2021-10-04T12:41:59.000Z | 2022-01-07T04:47:14.000Z | docs/tutorials/1 - Beginer - Plot ALBEDOs from Smarts.py | NREL/py-SMARTS | 83e702ed508eedcd8f6a6e11f2e640557f649dcd | [
"BSD-3-Clause"
] | 3 | 2021-09-22T21:54:43.000Z | 2021-09-28T17:14:35.000Z | docs/tutorials/1 - Beginer - Plot ALBEDOs from Smarts.py | NREL/pySMARTS | 83e702ed508eedcd8f6a6e11f2e640557f649dcd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # 1 - Beginner - Plot Spectra and Albedos from SMARTS
#
# ##### Generate & Plot Spectra and Albedos from SMARTS
# ######      * 1. DNI and DHI for a particular time and location
# ######      * 2. Ground Albedo for various materials at AM 1.5
# ######      * 3. Ground Albedo for complete AOD and PWD Weather Data
#
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import pvlib
import datetime
import pprint
import os
# In[2]:
plt.rcParams['timezone'] = 'Etc/GMT+7'
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 18}
plt.rc('font', **font)
plt.rcParams['figure.figsize'] = (12, 5)
# In[3]:
import pySMARTS
# In[4]:
pySMARTS.__version__
# #### Real Input data from SRRL for OCTOBER 21st, 12:45 PM
# # 1. Plot a DNI and DHI for a particular time and location
#
# In[22]:
IOUT = '2 3' # DNI and DHI
# In[21]:
YEAR = '2021'
MONTH = '06'
DAY = '21'
HOUR = '12'
LATIT = '33'
LONGIT = '110'
ALTIT = '0.9' # km above sea level
ZONE = '-7' # Timezone
# In[ ]:
pySMARTS.SMARTSTimeLocation(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE)
# # 2. Plot Albedos from SMARTS
# In[16]:
IOUT = '30' # Albedo
# #### Plot Ground Albedo AM 1.0
# In[ ]:
materials = ['Concrete', 'LiteLoam', 'RConcrte', 'Gravel']
alb_db = pd.DataFrame()
for i in range (0, len(materials)):
alb = pySMARTS.SMARTSAirMass(IOUT=IOUT, AMASS='1.5', material=materials[i])
alb_db[materials[i]] = alb[alb.keys()[1]]
alb_db.index = alb.Wvlgth
alb_db_10 = alb_db
for col in alb_db:
alb_db[col].plot(legend=True)
plt.xlabel('Wavelength [nm]')
plt.xlim([300, 2500])
plt.axhline(y=0.084, color='r')
plt.axhline(y=0.10, color='r')
#UV albedo: 295 to 385
#Total albedo: 300 to 3000
#10.4 and 8.4 $ Measured
#References
plt.ylim([0,1])
plt.ylabel('Reflectance')
plt.legend(bbox_to_anchor=(1.04,0.75), loc="upper left")
plt.title('Ground albedos AM 1')
plt.show()
vis=alb_db.iloc[40:1801].mean()
uv=alb_db.iloc[30:210].mean()
print(vis)
print(uv)
# ## Extra: Averaging Albedos for Visible and UV
#
# In[ ]:
vis=alb_db.iloc[40:1801].mean()
uv=alb_db.iloc[30:210].mean()
print("Albedo on Visible Range:\n", vis)
print("Albedo on UV Range:\n", uv)
# <div class="alert alert-block alert-info"><b>Tip: </b> If you want full spectrum averages, we recommend interpolating as the default granularity of SMARTS at higher wavelengths is not the same than at lower wavelengths, thus the 'step' is not the same. </div>
#
# In[68]:
r = pd.RangeIndex(2800,40000, 5)
r = r/10
alb2 = alb_db.reindex(r, method='ffill')
print("Albedo for all wavelengths:", alb2.mean())
# In[74]:
# FYI: Wavelengths corresponding to the albedo before and after interpolating
"""
# Visible
alb_db.iloc[40] # 300
alb_db.iloc[1801] # 3000
# UV
alb_db.iloc[30] # 295
alb_db.iloc[210] # 385
# Visible
alb2.iloc[40] # 300
alb2.iloc[5440] # 3000
# UV
alb2.iloc[30] # 295
alb2.iloc[210] # 385
"""
# # 3. ADVANCED: Plot Ground Albedo for More Complete Weather Data
#
# #### This asumes you know a lot more parameters about your weather data souch as: Broadband Turbidity, Aeorsol Opticla Density parameters, and Precipitable Water.
#
# ### Real Input data from SRRL for OCTOBER 21st, 12:45 PM
# In[7]:
alb = 0.2205
YEAR='2020'; MONTH='10'; DAY='21'; HOUR = '12.75'
LATIT='39.74'; LONGIT='-105.17'; ALTIT='1.0'; ZONE='-7'
TILT='33.0'; WAZIM='180.0'; HEIGHT='0'
material='DryGrass'
min_wvl='280'; Max_wvl='4000'
TAIR = '20.3'
RH = '2.138'
SEASON = 'WINTER'
TDAY = '12.78'
SPR = '810.406'
RHOG = '0.2205'
WAZIMtracker = '270'
TILTtracker = '23.37'
tracker_tetha_bifrad = '-23.37'
TAU5='0.18422' # SRRL-GRAL "Broadband Turbidity"
TAU5 = '0.037' # SRRL-AOD [500nm]
GG = '0.7417' # SSRL-AOD Asymmetry [500nm]
BETA = '0.0309' # SRRL-AOD Beta
ALPHA = '0.1949' # SRRL-AOD Alpha [Angstrom exp]
OMEGL = '0.9802' # SRRL-AOD SSA [500nm]
W = str(7.9/10) # SRRL-PWD Precipitable Water [mm]
# In[8]:
material = 'DryGrass'
alb_db = pd.DataFrame()
alb = pySMARTS.SMARTSSRRL(
IOUT=IOUT, YEAR=YEAR, MONTH=MONTH,DAY=DAY, HOUR='12.45', LATIT=LATIT,
LONGIT=LONGIT, ALTIT=ALTIT,
ZONE=ZONE, W=W, RH=RH, TAIR=TAIR,
SEASON=SEASON, TDAY=TDAY, TAU5=None, SPR=SPR,
TILT=TILT, WAZIM=WAZIM,
ALPHA1 = ALPHA, ALPHA2 = 0, OMEGL = OMEGL,
GG = GG, BETA = BETA,
RHOG=RHOG, HEIGHT=HEIGHT, material=material, POA = True)
alb_db[material] = alb[alb.keys()[1]]
alb_db.index = alb.Wvlgth
# In[ ]:
alb_db[material].plot(legend=True, color='y')
plt.xlabel('Wavelength [nm]')
plt.xlim([300, 2500])
plt.ylim([0,1])
plt.ylabel('Reflectance')
plt.legend(bbox_to_anchor=(1.04,0.75), loc="upper left")
plt.title('Albedo @ 12.45 Oct 21, 2020 for SRRL Weather Data ')
plt.show()
# ### A plotly plot to explore the results
# In[24]:
import plotly.express as px
# In[ ]:
fig = px.line(alb_db[material], title='Albedo @ 12.45 Oct 21, 2020 for SRRL Weather Data')
fig.update_layout(xaxis_title='Wavelength [nm]',
yaxis_title='Reflectance')
fig.show()
| 19.537594 | 261 | 0.655186 | true | true | |
1c36b1faf43655ad512765e901d3ca72bc22fca3 | 1,728 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/test_unicode_names.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 8 | 2019-10-07T16:33:47.000Z | 2020-12-07T03:59:58.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/test_unicode_names.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1 | 2018-04-03T22:37:40.000Z | 2018-04-03T23:53:43.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/test_unicode_names.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from numba import njit, cfunc, cgutils
from numba.six import exec_
from numba.utils import PY2
from .support import TestCase, unittest
unicode_name1 = u"""
def unicode_name1(ಠ_ರೃ, ಠਊಠ):
return (ಠ_ರೃ) + (ಠਊಠ)
"""
unicode_name2 = u"""
def Ծ_Ծ(ಠ_ರೃ, ಠਊಠ):
return (ಠ_ರೃ) + (ಠਊಠ)
"""
@unittest.skipIf(PY2, "unicode identifier not supported in python2")
class TestUnicodeNames(TestCase):
def make_testcase(self, src, fname):
glb = {}
exec_(src, glb)
fn = glb[fname]
return fn
def test_unicode_name1(self):
fn = self.make_testcase(unicode_name1, 'unicode_name1')
cfn = njit(fn)
self.assertEqual(cfn(1, 2), 3)
def test_unicode_name2(self):
fn = self.make_testcase(unicode_name2, 'Ծ_Ծ')
cfn = njit(fn)
self.assertEqual(cfn(1, 2), 3)
def test_cfunc(self):
fn = self.make_testcase(unicode_name2, 'Ծ_Ծ')
cfn = cfunc("int32(int32, int32)")(fn)
self.assertEqual(cfn.ctypes(1, 2), 3)
class TestUnicodeUtils(TestCase):
def test_normalize_ir_text(self):
# non-unicode input
out = cgutils.normalize_ir_text('abc')
# str returned
self.assertIsInstance(out, str)
# try encoding to latin
out.encode('latin1')
@unittest.skipIf(PY2, "unicode identifier not supported in python2")
def test_normalize_ir_text_py3(self):
# unicode input
out = cgutils.normalize_ir_text(unicode_name2)
# str returned
self.assertIsInstance(out, str)
# try encoding to latin
out.encode('latin1')
if __name__ == '__main__':
unittest.main()
| 25.411765 | 72 | 0.640046 |
from __future__ import print_function, absolute_import
from numba import njit, cfunc, cgutils
from numba.six import exec_
from numba.utils import PY2
from .support import TestCase, unittest
unicode_name1 = u"""
def unicode_name1(ಠ_ರೃ, ಠਊಠ):
return (ಠ_ರೃ) + (ಠਊಠ)
"""
unicode_name2 = u"""
def Ծ_Ծ(ಠ_ರೃ, ಠਊಠ):
return (ಠ_ರೃ) + (ಠਊಠ)
"""
@unittest.skipIf(PY2, "unicode identifier not supported in python2")
class TestUnicodeNames(TestCase):
def make_testcase(self, src, fname):
glb = {}
exec_(src, glb)
fn = glb[fname]
return fn
def test_unicode_name1(self):
fn = self.make_testcase(unicode_name1, 'unicode_name1')
cfn = njit(fn)
self.assertEqual(cfn(1, 2), 3)
def test_unicode_name2(self):
fn = self.make_testcase(unicode_name2, 'Ծ_Ծ')
cfn = njit(fn)
self.assertEqual(cfn(1, 2), 3)
def test_cfunc(self):
fn = self.make_testcase(unicode_name2, 'Ծ_Ծ')
cfn = cfunc("int32(int32, int32)")(fn)
self.assertEqual(cfn.ctypes(1, 2), 3)
class TestUnicodeUtils(TestCase):
def test_normalize_ir_text(self):
out = cgutils.normalize_ir_text('abc')
self.assertIsInstance(out, str)
out.encode('latin1')
@unittest.skipIf(PY2, "unicode identifier not supported in python2")
def test_normalize_ir_text_py3(self):
out = cgutils.normalize_ir_text(unicode_name2)
self.assertIsInstance(out, str)
out.encode('latin1')
if __name__ == '__main__':
unittest.main()
| true | true |
1c36b2e967d5b6bf57c223c551285b333e5b2dd0 | 8,735 | py | Python | release/stubs.min/System/Windows/Media/Animation_parts/MatrixKeyFrame.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Media/Animation_parts/MatrixKeyFrame.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Media/Animation_parts/MatrixKeyFrame.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class MatrixKeyFrame(Freezable, ISealable, IKeyFrame):
""" Abstract class that,when implemented,defines an animation segment with its own target value and interpolation method for a System.Windows.Media.Animation.MatrixAnimationUsingKeyFrames. """
def CloneCore(self, *args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValueCore(self, *args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self, *args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self, *args):
"""
CreateInstanceCore(self: Freezable) -> Freezable
When implemented in a derived class,creates a new instance of the System.Windows.Freezable
derived class.
Returns: The new instance.
"""
pass
def FreezeCore(self, *args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def GetAsFrozenCore(self, *args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self, *args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified System.Windows.Freezable. If the
object has animated dependency properties,their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def InterpolateValue(self, baseValue, keyFrameProgress):
"""
InterpolateValue(self: MatrixKeyFrame,baseValue: Matrix,keyFrameProgress: float) -> Matrix
Returns the interpolated value of a specific key frame at the progress increment provided.
baseValue: The value to animate from.
keyFrameProgress: A value between 0.0 and 1.0,inclusive,that specifies the percentage of time that has elapsed
for this key frame.
Returns: The output value of this key frame given the specified base value and progress.
"""
pass
def InterpolateValueCore(self, *args):
"""
InterpolateValueCore(self: MatrixKeyFrame,baseValue: Matrix,keyFrameProgress: float) -> Matrix
Calculates the value of a key frame at the progress increment provided.
baseValue: The value to animate from; typically the value of the previous key frame.
keyFrameProgress: A value between 0.0 and 1.0,inclusive,that specifies the percentage of time that has elapsed
for this key frame.
Returns: The output value of this key frame given the specified base value and progress.
"""
pass
def OnChanged(self, *args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self, *args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self, *args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self, *args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self, *args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self, *args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *args): # cannot find CLR constructor
"""
__new__(cls: type)
__new__(cls: type,value: Matrix)
__new__(cls: type,value: Matrix,keyTime: KeyTime)
"""
pass
KeyTime = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the time at which the key frame's target System.Windows.Media.Animation.MatrixKeyFrame.Value should be reached.
Get: KeyTime(self: MatrixKeyFrame) -> KeyTime
Set: KeyTime(self: MatrixKeyFrame)=value
"""
Value = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the key frame's target value.
Get: Value(self: MatrixKeyFrame) -> Matrix
Set: Value(self: MatrixKeyFrame)=value
"""
KeyTimeProperty = None
ValueProperty = None
| 26.231231 | 221 | 0.687235 | class MatrixKeyFrame(Freezable, ISealable, IKeyFrame):
def CloneCore(self, *args):
pass
def CloneCurrentValueCore(self, *args):
pass
def CreateInstance(self, *args):
pass
def CreateInstanceCore(self, *args):
pass
def FreezeCore(self, *args):
pass
def GetAsFrozenCore(self, *args):
pass
def GetCurrentValueAsFrozenCore(self, *args):
pass
def InterpolateValue(self, baseValue, keyFrameProgress):
pass
def InterpolateValueCore(self, *args):
pass
def OnChanged(self, *args):
pass
def OnFreezablePropertyChanged(self, *args):
pass
def OnPropertyChanged(self, *args):
pass
def ReadPreamble(self, *args):
pass
def ShouldSerializeProperty(self, *args):
pass
def WritePostscript(self, *args):
pass
def WritePreamble(self, *args):
pass
def __init__(self, *args):
pass
@staticmethod
def __new__(self, *args):
pass
KeyTime = property(lambda self: object(), lambda self, v: None, lambda self: None)
Value = property(lambda self: object(), lambda self, v: None, lambda self: None)
KeyTimeProperty = None
ValueProperty = None
| true | true |
1c36b38c2e0e0cd671d509a564456eee309b7cf3 | 587 | py | Python | districts/migrations/0001_initial.py | Aniketh896/CovidSurveillance | 54b358a3fc31f0dc64032cba0150253e11fdbbd9 | [
"MIT"
] | 5 | 2020-05-06T08:10:43.000Z | 2021-05-26T18:55:57.000Z | districts/migrations/0001_initial.py | Aniketh896/CovidSurveillance | 54b358a3fc31f0dc64032cba0150253e11fdbbd9 | [
"MIT"
] | 9 | 2020-05-18T14:01:50.000Z | 2022-03-12T00:30:23.000Z | districts/migrations/0001_initial.py | Aniketh896/CovidSurveillance | 54b358a3fc31f0dc64032cba0150253e11fdbbd9 | [
"MIT"
] | 6 | 2020-05-06T09:01:29.000Z | 2021-05-20T15:25:30.000Z | # Generated by Django 3.0.6 on 2020-05-05 21:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DistrictsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active_cases', models.IntegerField()),
('district', models.TextField()),
('zone', models.TextField()),
],
),
]
| 24.458333 | 114 | 0.55707 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DistrictsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active_cases', models.IntegerField()),
('district', models.TextField()),
('zone', models.TextField()),
],
),
]
| true | true |
1c36b3adf05871417441451bc5d79cf3af372fe2 | 8,367 | py | Python | PrecisionRecallTest.py | PerttuHamalainen/DRMM | 1d7d52df95adee344516322700209f3a9f8147fb | [
"MIT"
] | 5 | 2020-08-04T07:23:24.000Z | 2021-12-02T21:01:03.000Z | PrecisionRecallTest.py | PerttuHamalainen/DRMM | 1d7d52df95adee344516322700209f3a9f8147fb | [
"MIT"
] | 9 | 2020-08-10T00:39:21.000Z | 2022-03-12T00:43:59.000Z | PrecisionRecallTest.py | PerttuHamalainen/DRMM | 1d7d52df95adee344516322700209f3a9f8147fb | [
"MIT"
] | null | null | null |
import numpy as np
import random
import os
import matplotlib.pyplot as pp
#os.environ["CUDA_VISIBLE_DEVICES"]="-1" #disable Tensorflow GPU usage, these simple graphs run faster on CPU
import tensorflow as tf
import DRMM as DRMM
from skimage.util import view_as_blocks
from precision_recall import knn_precision_recall_features
import MocapUtils as mocap
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datasetIdx', type=int, default=1)
parser.add_argument('--modelIdx', type=int, default=0)
parser.add_argument('--nIter', type=int, default=50000)
parser.add_argument('--nEval', type=int, default=20000)
args = parser.parse_args()
datasetIdx=args.datasetIdx
modelIdx=args.modelIdx
nIter=args.nIter
nBatch=64
initialLearningRate=0.002
#datasets=["IK (arm)"]
datasets=["IK (fullbody)","Motion Capture"]
nTargetEvalSamples=args.nEval
#Returns squared distance matrix D with elements d_ij = | a_i - b_j|^2, where a_i = A[i,:] and b_j=B[j,:]
def pairwiseSqDistances(A,B):
#d_ij=(a_i-b_j)'(a_i-b_j) = a_i'a_i - 2 a_i'b_j + b_j'b_j
#D = [a_0'a_0, a_1'a_1, ...] - 2 AB' + [b_0'b_0, b_1'b_1, ...]', assuming broadcasting
#D = A_d - 2 AB' + B_d
A_d=np.sum(A * A,axis=1,keepdims=True)
B_d=np.reshape(np.sum(B * B,axis=1),[1,B.shape[0]])
return np.clip(A_d - 2 * np.matmul(A,np.transpose(B)) + B_d,0,np.inf) #relu to ensure no negative results due to computational inaccuracy
def modifiedHausdorffDistance(A,B):
sqDist=pairwiseSqDistances(A,B)
return np.sqrt(np.sum(np.min(sqDist,axis=0))+np.sum(np.min(sqDist,axis=1)))
def numDrmmParameters(dataDim,nLayers,nComponentsPerLayer):
nParameters=0
layerInputVars=dataDim
for layerIdx in range(nLayers):
nParameters+=1 #scalar variance parameter
nParameters+=layerInputVars*nComponentsPerLayer #Gaussian means or class prototypes
nParameters+=nComponentsPerLayer #marginal probabilities
layerInputVars+=nComponentsPerLayer
return nParameters
plotIdx=0
dataset=datasets[datasetIdx]
#Load or create data
if dataset=="Swissroll 3D":
print("Creating 3D swissroll data")
x=[]
noiseSd=0.0
for angle in np.arange(0,4.0*np.pi,0.001):
#swiss roll
x.append(np.reshape(0.5*angle*np.array([np.sin(angle),np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))
#circle
#x.append(np.reshape(np.array([np.sin(angle),np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))
#sine wave
#x.append(np.reshape(np.array([angle,np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))
data=np.concatenate(x)
data=np.concatenate([data,np.random.uniform(-2,2,size=[data.shape[0],1])],axis=1)
elif dataset=="Sierpinski 2D":
x=[]
def sierpinski(x0,x1,x2,data,depth=8):
if depth==0:
data.append(x0)
data.append(x1)
data.append(x2)
else:
depth-=1
sierpinski(x0,0.5*(x0+x1),0.5*(x0+x2),data,depth)
sierpinski(x1,0.5*(x1+x0),0.5*(x1+x2),data,depth)
sierpinski(x2,0.5*(x2+x0),0.5*(x2+x1),data,depth)
def pointOnUnitCircle(angle):
return np.array([np.sin(angle),np.cos(angle)])
sierpinski(pointOnUnitCircle(0),pointOnUnitCircle(1.0/3.0*2.0*np.pi),pointOnUnitCircle(2.0/3.0*2.0*np.pi),x)
data=np.array(x)
elif dataset=="IK (arm)":
print("Loading data")
dataFile="./IKTest/arm_data.npy"
data=np.load(dataFile)
elif dataset=="IK (fullbody)":
print("Loading data")
dataFile="./IKTest/fullbody_data.npy"
data=np.load(dataFile)
elif dataset == "Motion Capture":
print("Loading Motion Capture Data")
mocapData = mocap.MocapDataset("mocapdata/laforge_locomotion_nosliding.zip",
sequenceLength=30,
optimizeForSpeed=True)
data=mocapData.allSequences.reshape([mocapData.allSequences.shape[0],-1])
else:
raise Exception("Invalid dataset")
dataDim=data.shape[1]
print("Dataset has {} vectors of {} variables".format(data.shape[0],data.shape[1]))
#if data.shape[0]>maxData:
# data=data[:maxData]
#A helper function for extracting a random data batch
def getDataBatch(nBatch):
return data[np.random.randint(data.shape[0], size=nBatch),:]
layerAmounts=[1,2,3,4]
nLayers=layerAmounts[modelIdx]
#We test GMM with 64,128... components, and DRMM:s with the same (approximately) number of parameters
for modelSize in [64,128,256,512,1024]:
if nLayers==1:
nComponentsPerLayer=modelSize
else:
targetNumParams=numDrmmParameters(dataDim,1,modelSize)
nComponentsPerLayer=4
while (numDrmmParameters(dataDim,nLayers,nComponentsPerLayer)<targetNumParams):
nComponentsPerLayer+=1
nParameters=numDrmmParameters(dataDim,nLayers,nComponentsPerLayer)
#Init tf
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
#create model
if nParameters<2000: # or (datasetIdx==2 and nLayers==1): #run small models on CPU (faster), and also run the datasetIdx==2 and nLayers==1 cases on CPU because they need a lot of memory and the dev. laptop GPU is only 6GB
with tf.device('/cpu:0'):
model=DRMM.DRMM(sess=sess,
nLayers=nLayers,
nComponentsPerLayer=nComponentsPerLayer,
initialLearningRate=initialLearningRate,
inputs=DRMM.dataStream("continuous",shape=[None,dataDim]))
else:
model = DRMM.DRMM(sess=sess,
nLayers=nLayers,
nComponentsPerLayer=nComponentsPerLayer,
initialLearningRate=initialLearningRate,
inputs=DRMM.dataStream("continuous", shape=[None, dataDim]))
assert(nParameters==model.nParameters) #check that our parameter amount estimation was correct
#Initialize
tf.global_variables_initializer().run(session=sess)
model.init(data[:min([2000,data.shape[0]])])
#Optimize
for i in range(nIter):
info = model.train(i / nIter, getDataBatch(nBatch))
# Print progress
if i % 100 == 0 or i == nIter - 1:
logp = np.mean(
model.getLogP(inputs=DRMM.DataIn(data=getDataBatch(1024),mask=np.ones([1024,dataDim])))) # evaluate log-likelihood of a large data batch
print(
"\rIteration {}/{}, phase {:.3f} Loss {:.3f}, logp {:.3f} learning rate {:.6f}, precision {:.3f}".format(
i, nIter, i / nIter, info["loss"], logp, info["lr"], info["rho"]), end="")
#Evaluate
nEvalSamples=min([data.shape[0],nTargetEvalSamples])
print("\nGenerating {} samples".format(nEvalSamples))
sampled_fetch=np.zeros([nEvalSamples,dataDim])
nSampled=0
while (nSampled<nEvalSamples):
batchSize=min([10000,nEvalSamples-nSampled])
sampled_fetch[nSampled:nSampled+batchSize]=model.sample(nSamples=batchSize)
nSampled+=batchSize
#print(sampled_fetch)
print("Evaluating")
if nEvalSamples<data.shape[0]:
evalData=getDataBatch(nEvalSamples)
else:
evalData=data
#evalData=data[:min([nEvalSamples, data.shape[0]])]
logp = np.mean(model.getLogP(inputs=DRMM.DataIn(data=evalData,mask=np.ones_like(evalData))))
with sess.as_default():
#Precision and recall code from: https://github.com/kynkaat/improved-precision-and-recall-metric
precrecall=knn_precision_recall_features(evalData,sampled_fetch,row_batch_size=10000)
precision=precrecall['precision'][0]
recall=precrecall['recall'][0]
f1=2.0*(recall * precision) / (recall + precision + 1e-8)
print("F1 {}, logp {}".format(f1,logp))
logFileName="Results/benchmark_precrecall.csv"
if not os.path.isfile(logFileName):
logFile=open(logFileName,"w")
logFile.write("dataset,datasetIdx,nLayers,nComponentsPerLayer,nParameters,precision,recall,f1,logp\n")
else:
logFile=open(logFileName,"a")
#logFile.write("dataset,datasetIdx,nLayers,nComponentsPerLayer,sampleQuality")
logFile.write("{},{},{},{},{},{},{},{},{}\n".format(dataset,datasetIdx,nLayers,nComponentsPerLayer,model.nParameters,precision,recall,f1,logp))
logFile.close()
#pp.close()
| 40.616505 | 226 | 0.668101 |
import numpy as np
import random
import os
import matplotlib.pyplot as pp
t view_as_blocks
from precision_recall import knn_precision_recall_features
import MocapUtils as mocap
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datasetIdx', type=int, default=1)
parser.add_argument('--modelIdx', type=int, default=0)
parser.add_argument('--nIter', type=int, default=50000)
parser.add_argument('--nEval', type=int, default=20000)
args = parser.parse_args()
datasetIdx=args.datasetIdx
modelIdx=args.modelIdx
nIter=args.nIter
nBatch=64
initialLearningRate=0.002
datasets=["IK (fullbody)","Motion Capture"]
nTargetEvalSamples=args.nEval
def pairwiseSqDistances(A,B):
A_d=np.sum(A * A,axis=1,keepdims=True)
B_d=np.reshape(np.sum(B * B,axis=1),[1,B.shape[0]])
return np.clip(A_d - 2 * np.matmul(A,np.transpose(B)) + B_d,0,np.inf) #relu to ensure no negative results due to computational inaccuracy
def modifiedHausdorffDistance(A,B):
sqDist=pairwiseSqDistances(A,B)
return np.sqrt(np.sum(np.min(sqDist,axis=0))+np.sum(np.min(sqDist,axis=1)))
def numDrmmParameters(dataDim,nLayers,nComponentsPerLayer):
nParameters=0
layerInputVars=dataDim
for layerIdx in range(nLayers):
nParameters+=1 #scalar variance parameter
nParameters+=layerInputVars*nComponentsPerLayer #Gaussian means or class prototypes
nParameters+=nComponentsPerLayer #marginal probabilities
layerInputVars+=nComponentsPerLayer
return nParameters
plotIdx=0
dataset=datasets[datasetIdx]
#Load or create data
if dataset=="Swissroll 3D":
print("Creating 3D swissroll data")
x=[]
noiseSd=0.0
for angle in np.arange(0,4.0*np.pi,0.001):
#swiss roll
x.append(np.reshape(0.5*angle*np.array([np.sin(angle),np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))
#circle
#x.append(np.reshape(np.array([np.sin(angle),np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))
#sine wave
#x.append(np.reshape(np.array([angle,np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))
data=np.concatenate(x)
data=np.concatenate([data,np.random.uniform(-2,2,size=[data.shape[0],1])],axis=1)
elif dataset=="Sierpinski 2D":
x=[]
def sierpinski(x0,x1,x2,data,depth=8):
if depth==0:
data.append(x0)
data.append(x1)
data.append(x2)
else:
depth-=1
sierpinski(x0,0.5*(x0+x1),0.5*(x0+x2),data,depth)
sierpinski(x1,0.5*(x1+x0),0.5*(x1+x2),data,depth)
sierpinski(x2,0.5*(x2+x0),0.5*(x2+x1),data,depth)
def pointOnUnitCircle(angle):
return np.array([np.sin(angle),np.cos(angle)])
sierpinski(pointOnUnitCircle(0),pointOnUnitCircle(1.0/3.0*2.0*np.pi),pointOnUnitCircle(2.0/3.0*2.0*np.pi),x)
data=np.array(x)
elif dataset=="IK (arm)":
print("Loading data")
dataFile="./IKTest/arm_data.npy"
data=np.load(dataFile)
elif dataset=="IK (fullbody)":
print("Loading data")
dataFile="./IKTest/fullbody_data.npy"
data=np.load(dataFile)
elif dataset == "Motion Capture":
print("Loading Motion Capture Data")
mocapData = mocap.MocapDataset("mocapdata/laforge_locomotion_nosliding.zip",
sequenceLength=30,
optimizeForSpeed=True)
data=mocapData.allSequences.reshape([mocapData.allSequences.shape[0],-1])
else:
raise Exception("Invalid dataset")
dataDim=data.shape[1]
print("Dataset has {} vectors of {} variables".format(data.shape[0],data.shape[1]))
#if data.shape[0]>maxData:
# data=data[:maxData]
#A helper function for extracting a random data batch
def getDataBatch(nBatch):
return data[np.random.randint(data.shape[0], size=nBatch),:]
layerAmounts=[1,2,3,4]
nLayers=layerAmounts[modelIdx]
#We test GMM with 64,128... components, and DRMM:s with the same (approximately) number of parameters
for modelSize in [64,128,256,512,1024]:
if nLayers==1:
nComponentsPerLayer=modelSize
else:
targetNumParams=numDrmmParameters(dataDim,1,modelSize)
nComponentsPerLayer=4
while (numDrmmParameters(dataDim,nLayers,nComponentsPerLayer)<targetNumParams):
nComponentsPerLayer+=1
nParameters=numDrmmParameters(dataDim,nLayers,nComponentsPerLayer)
#Init tf
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
#create model
if nParameters<2000: # or (datasetIdx==2 and nLayers==1): #run small models on CPU (faster), and also run the datasetIdx==2 and nLayers==1 cases on CPU because they need a lot of memory and the dev. laptop GPU is only 6GB
with tf.device('/cpu:0'):
model=DRMM.DRMM(sess=sess,
nLayers=nLayers,
nComponentsPerLayer=nComponentsPerLayer,
initialLearningRate=initialLearningRate,
inputs=DRMM.dataStream("continuous",shape=[None,dataDim]))
else:
model = DRMM.DRMM(sess=sess,
nLayers=nLayers,
nComponentsPerLayer=nComponentsPerLayer,
initialLearningRate=initialLearningRate,
inputs=DRMM.dataStream("continuous", shape=[None, dataDim]))
assert(nParameters==model.nParameters) #check that our parameter amount estimation was correct
#Initialize
tf.global_variables_initializer().run(session=sess)
model.init(data[:min([2000,data.shape[0]])])
#Optimize
for i in range(nIter):
info = model.train(i / nIter, getDataBatch(nBatch))
# Print progress
if i % 100 == 0 or i == nIter - 1:
logp = np.mean(
model.getLogP(inputs=DRMM.DataIn(data=getDataBatch(1024),mask=np.ones([1024,dataDim])))) # evaluate log-likelihood of a large data batch
print(
"\rIteration {}/{}, phase {:.3f} Loss {:.3f}, logp {:.3f} learning rate {:.6f}, precision {:.3f}".format(
i, nIter, i / nIter, info["loss"], logp, info["lr"], info["rho"]), end="")
#Evaluate
nEvalSamples=min([data.shape[0],nTargetEvalSamples])
print("\nGenerating {} samples".format(nEvalSamples))
sampled_fetch=np.zeros([nEvalSamples,dataDim])
nSampled=0
while (nSampled<nEvalSamples):
batchSize=min([10000,nEvalSamples-nSampled])
sampled_fetch[nSampled:nSampled+batchSize]=model.sample(nSamples=batchSize)
nSampled+=batchSize
#print(sampled_fetch)
print("Evaluating")
if nEvalSamples<data.shape[0]:
evalData=getDataBatch(nEvalSamples)
else:
evalData=data
#evalData=data[:min([nEvalSamples, data.shape[0]])]
logp = np.mean(model.getLogP(inputs=DRMM.DataIn(data=evalData,mask=np.ones_like(evalData))))
with sess.as_default():
#Precision and recall code from: https://github.com/kynkaat/improved-precision-and-recall-metric
precrecall=knn_precision_recall_features(evalData,sampled_fetch,row_batch_size=10000)
precision=precrecall['precision'][0]
recall=precrecall['recall'][0]
f1=2.0*(recall * precision) / (recall + precision + 1e-8)
print("F1 {}, logp {}".format(f1,logp))
logFileName="Results/benchmark_precrecall.csv"
if not os.path.isfile(logFileName):
logFile=open(logFileName,"w")
logFile.write("dataset,datasetIdx,nLayers,nComponentsPerLayer,nParameters,precision,recall,f1,logp\n")
else:
logFile=open(logFileName,"a")
#logFile.write("dataset,datasetIdx,nLayers,nComponentsPerLayer,sampleQuality")
logFile.write("{},{},{},{},{},{},{},{},{}\n".format(dataset,datasetIdx,nLayers,nComponentsPerLayer,model.nParameters,precision,recall,f1,logp))
logFile.close()
#pp.close()
| true | true |
1c36b528cd2bec19c416e69edd29c1a4915ee0be | 5,891 | py | Python | docs/source/content/example_notebooks/cantilever/model_static_cantilever.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 80 | 2018-08-30T13:01:52.000Z | 2022-03-24T15:02:48.000Z | docs/source/content/example_notebooks/cantilever/model_static_cantilever.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 88 | 2018-05-17T16:18:58.000Z | 2022-03-11T21:05:48.000Z | docs/source/content/example_notebooks/cantilever/model_static_cantilever.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 44 | 2018-01-02T14:27:28.000Z | 2022-03-12T13:49:36.000Z | import h5py as h5
import numpy as np
import os
def clean_test_files(route, case_name):
fem_file_name = route + '/' + case_name + '.fem.h5'
if os.path.isfile(fem_file_name):
os.remove(fem_file_name)
solver_file_name = route + '/' + case_name + '.sharpy'
if os.path.isfile(solver_file_name):
os.remove(solver_file_name)
def generate_fem_file(route, case_name, num_elem, deadforce=600e3, followerforce=0):
length = 5
num_node_elem=3
num_node = (num_node_elem - 1)*num_elem + 1
# import pdb; pdb.set_trace()
angle = 0*np.pi/180.0 # Angle of the beam reference line within the x-y plane of the B frame.
x = (np.linspace(0, length, num_node))*np.cos(angle)
y = (np.linspace(0, length, num_node))*np.sin(angle)
z = np.zeros((num_node,))
structural_twist = np.zeros((num_elem, num_node_elem))
frame_of_reference_delta = np.zeros((num_elem, num_node_elem, 3))
for ielem in range(num_elem):
for inode in range(num_node_elem):
frame_of_reference_delta[ielem, inode, :] = [-np.sin(angle), np.cos(angle), 0]
scale = 1
x *= scale
y *= scale
z *= scale
conn = np.zeros((num_elem, num_node_elem), dtype=int)
for ielem in range(num_elem):
conn[ielem, :] = (np.ones((3,)) * ielem * (num_node_elem - 1)
+ [0, 2, 1])
# stiffness array
# import pdb; pdb.set_trace()
num_stiffness = 1
ea = 4.8e8
ga = 3.231e8
gj = 1.0e6
ei = 9.346e6
base_stiffness = np.diag([ea, ga, ga, gj, ei, ei])
stiffness = np.zeros((num_stiffness, 6, 6))
# import pdb; pdb.set_trace()
for i in range(num_stiffness):
stiffness[i, :, :] = base_stiffness
# element stiffness
elem_stiffness = np.zeros((num_elem,), dtype=int)
# mass array
num_mass = 1
m_bar = 0. # Mass is made zero for the static analysis.
j = 10
base_mass = np.diag([m_bar, m_bar, m_bar, j, j, j])
mass = np.zeros((num_mass, 6, 6))
for i in range(num_mass):
mass[i, :, :] = base_mass
# element masses
elem_mass = np.zeros((num_elem,), dtype=int)
# bocos
boundary_conditions = np.zeros((num_node, 1), dtype=int)
boundary_conditions[0] = 1 # Clamped at s=0
boundary_conditions[-1] = -1 # Free end at s=L
# beam number
beam_number = np.zeros((num_elem, 1), dtype=int)
# Applied follower forces.
app_forces = np.zeros((num_node, 6))
app_forces[-1, 2] = followerforce
# Lmmped masses input -- Dead force is applied by a mass at the tip.
n_lumped_mass = 1
lumped_mass_nodes = np.array([num_node - 1], dtype=int)
lumped_mass = np.zeros((n_lumped_mass, ))
lumped_mass[0] = deadforce/9.81
lumped_mass_inertia = np.zeros((n_lumped_mass, 3, 3))
lumped_mass_position = np.zeros((n_lumped_mass, 3))
#Store in h5 format.
with h5.File(route + '/' + case_name + '.fem.h5', 'a') as h5file:
coordinates = h5file.create_dataset('coordinates', data = np.column_stack((x, y, z)))
conectivities = h5file.create_dataset('connectivities', data = conn)
num_nodes_elem_handle = h5file.create_dataset(
'num_node_elem', data = num_node_elem)
num_nodes_handle = h5file.create_dataset(
'num_node', data = num_node)
num_elem_handle = h5file.create_dataset(
'num_elem', data = num_elem)
stiffness_db_handle = h5file.create_dataset(
'stiffness_db', data = stiffness)
stiffness_handle = h5file.create_dataset(
'elem_stiffness', data = elem_stiffness)
mass_db_handle = h5file.create_dataset(
'mass_db', data = mass)
mass_handle = h5file.create_dataset(
'elem_mass', data = elem_mass)
frame_of_reference_delta_handle = h5file.create_dataset(
'frame_of_reference_delta', data=frame_of_reference_delta)
structural_twist_handle = h5file.create_dataset(
'structural_twist', data=structural_twist)
bocos_handle = h5file.create_dataset(
'boundary_conditions', data=boundary_conditions)
beam_handle = h5file.create_dataset(
'beam_number', data=beam_number)
app_forces_handle = h5file.create_dataset(
'app_forces', data=app_forces)
lumped_mass_nodes_handle = h5file.create_dataset(
'lumped_mass_nodes', data=lumped_mass_nodes)
lumped_mass_handle = h5file.create_dataset(
'lumped_mass', data=lumped_mass)
lumped_mass_inertia_handle = h5file.create_dataset(
'lumped_mass_inertia', data=lumped_mass_inertia)
lumped_mass_position_handle = h5file.create_dataset(
'lumped_mass_position', data=lumped_mass_position)
return num_node, coordinates
# Solver options
def generate_solver_file (route, case_name):
file_name = route + '/' + case_name + '.sharpy'
import configobj
config = configobj.ConfigObj()
config.filename = file_name
config['SHARPy'] = {'case': case_name,
'route': route,
'flow': ['BeamLoader', 'NonLinearStatic'],
'write_screen': 'off',
'write_log': 'on',
'log_folder': route + '/output/',
'log_file': case_name + '.log'}
config['BeamLoader'] = {'unsteady': 'off'}
config['NonLinearStatic'] = {'print_info': 'off',
'max_iterations': 99, # Default 99
'num_load_steps': 10, # Default 10
'delta_curved': 1e-5,
'min_delta': 1e-8, # Default 1e-8
'gravity_on': 'on',
'gravity': 9.81}
config.write()
# eof
| 38.503268 | 103 | 0.604312 | import h5py as h5
import numpy as np
import os
def clean_test_files(route, case_name):
fem_file_name = route + '/' + case_name + '.fem.h5'
if os.path.isfile(fem_file_name):
os.remove(fem_file_name)
solver_file_name = route + '/' + case_name + '.sharpy'
if os.path.isfile(solver_file_name):
os.remove(solver_file_name)
def generate_fem_file(route, case_name, num_elem, deadforce=600e3, followerforce=0):
length = 5
num_node_elem=3
num_node = (num_node_elem - 1)*num_elem + 1
angle = 0*np.pi/180.0
x = (np.linspace(0, length, num_node))*np.cos(angle)
y = (np.linspace(0, length, num_node))*np.sin(angle)
z = np.zeros((num_node,))
structural_twist = np.zeros((num_elem, num_node_elem))
frame_of_reference_delta = np.zeros((num_elem, num_node_elem, 3))
for ielem in range(num_elem):
for inode in range(num_node_elem):
frame_of_reference_delta[ielem, inode, :] = [-np.sin(angle), np.cos(angle), 0]
scale = 1
x *= scale
y *= scale
z *= scale
conn = np.zeros((num_elem, num_node_elem), dtype=int)
for ielem in range(num_elem):
conn[ielem, :] = (np.ones((3,)) * ielem * (num_node_elem - 1)
+ [0, 2, 1])
num_stiffness = 1
ea = 4.8e8
ga = 3.231e8
gj = 1.0e6
ei = 9.346e6
base_stiffness = np.diag([ea, ga, ga, gj, ei, ei])
stiffness = np.zeros((num_stiffness, 6, 6))
for i in range(num_stiffness):
stiffness[i, :, :] = base_stiffness
elem_stiffness = np.zeros((num_elem,), dtype=int)
num_mass = 1
m_bar = 0.
j = 10
base_mass = np.diag([m_bar, m_bar, m_bar, j, j, j])
mass = np.zeros((num_mass, 6, 6))
for i in range(num_mass):
mass[i, :, :] = base_mass
elem_mass = np.zeros((num_elem,), dtype=int)
boundary_conditions = np.zeros((num_node, 1), dtype=int)
boundary_conditions[0] = 1
boundary_conditions[-1] = -1
beam_number = np.zeros((num_elem, 1), dtype=int)
app_forces = np.zeros((num_node, 6))
app_forces[-1, 2] = followerforce
n_lumped_mass = 1
lumped_mass_nodes = np.array([num_node - 1], dtype=int)
lumped_mass = np.zeros((n_lumped_mass, ))
lumped_mass[0] = deadforce/9.81
lumped_mass_inertia = np.zeros((n_lumped_mass, 3, 3))
lumped_mass_position = np.zeros((n_lumped_mass, 3))
with h5.File(route + '/' + case_name + '.fem.h5', 'a') as h5file:
coordinates = h5file.create_dataset('coordinates', data = np.column_stack((x, y, z)))
conectivities = h5file.create_dataset('connectivities', data = conn)
num_nodes_elem_handle = h5file.create_dataset(
'num_node_elem', data = num_node_elem)
num_nodes_handle = h5file.create_dataset(
'num_node', data = num_node)
num_elem_handle = h5file.create_dataset(
'num_elem', data = num_elem)
stiffness_db_handle = h5file.create_dataset(
'stiffness_db', data = stiffness)
stiffness_handle = h5file.create_dataset(
'elem_stiffness', data = elem_stiffness)
mass_db_handle = h5file.create_dataset(
'mass_db', data = mass)
mass_handle = h5file.create_dataset(
'elem_mass', data = elem_mass)
frame_of_reference_delta_handle = h5file.create_dataset(
'frame_of_reference_delta', data=frame_of_reference_delta)
structural_twist_handle = h5file.create_dataset(
'structural_twist', data=structural_twist)
bocos_handle = h5file.create_dataset(
'boundary_conditions', data=boundary_conditions)
beam_handle = h5file.create_dataset(
'beam_number', data=beam_number)
app_forces_handle = h5file.create_dataset(
'app_forces', data=app_forces)
lumped_mass_nodes_handle = h5file.create_dataset(
'lumped_mass_nodes', data=lumped_mass_nodes)
lumped_mass_handle = h5file.create_dataset(
'lumped_mass', data=lumped_mass)
lumped_mass_inertia_handle = h5file.create_dataset(
'lumped_mass_inertia', data=lumped_mass_inertia)
lumped_mass_position_handle = h5file.create_dataset(
'lumped_mass_position', data=lumped_mass_position)
return num_node, coordinates
def generate_solver_file (route, case_name):
file_name = route + '/' + case_name + '.sharpy'
import configobj
config = configobj.ConfigObj()
config.filename = file_name
config['SHARPy'] = {'case': case_name,
'route': route,
'flow': ['BeamLoader', 'NonLinearStatic'],
'write_screen': 'off',
'write_log': 'on',
'log_folder': route + '/output/',
'log_file': case_name + '.log'}
config['BeamLoader'] = {'unsteady': 'off'}
config['NonLinearStatic'] = {'print_info': 'off',
'max_iterations': 99,
'num_load_steps': 10,
'delta_curved': 1e-5,
'min_delta': 1e-8,
'gravity_on': 'on',
'gravity': 9.81}
config.write()
| true | true |
1c36b5d77d327254097737fe7e632bce94d69f33 | 498 | py | Python | src/openprocurement/tender/cfaua/models/submodels/cancellation.py | pontostroy/api | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | [
"Apache-2.0"
] | 10 | 2020-02-18T01:56:21.000Z | 2022-03-28T00:32:57.000Z | src/openprocurement/tender/cfaua/models/submodels/cancellation.py | pontostroy/api | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | [
"Apache-2.0"
] | 26 | 2018-07-16T09:30:44.000Z | 2021-02-02T17:51:30.000Z | src/openprocurement/tender/cfaua/models/submodels/cancellation.py | pontostroy/api | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | [
"Apache-2.0"
] | 15 | 2019-08-08T10:50:47.000Z | 2022-02-05T14:13:36.000Z | from openprocurement.api.models import ListType
from openprocurement.api.roles import RolesFromCsv
from openprocurement.tender.core.models import EUDocument
from openprocurement.tender.openua.models import Cancellation as BaseCancellation
from schematics.types.compound import ModelType
class Cancellation(BaseCancellation):
class Options:
roles = RolesFromCsv("Cancellation.csv", relative_to=__file__)
documents = ListType(ModelType(EUDocument, required=True), default=list())
| 35.571429 | 81 | 0.819277 | from openprocurement.api.models import ListType
from openprocurement.api.roles import RolesFromCsv
from openprocurement.tender.core.models import EUDocument
from openprocurement.tender.openua.models import Cancellation as BaseCancellation
from schematics.types.compound import ModelType
class Cancellation(BaseCancellation):
class Options:
roles = RolesFromCsv("Cancellation.csv", relative_to=__file__)
documents = ListType(ModelType(EUDocument, required=True), default=list())
| true | true |
1c36b5ffd0011dca80af93ebe5034e8dc7e7eb77 | 170 | py | Python | baobab/sim_utils/__init__.py | aymgal/baobab | 960ddbd55fc4391f2b857f2232af38c45c809ae8 | [
"MIT"
] | 8 | 2019-09-11T15:11:57.000Z | 2022-02-03T08:24:52.000Z | baobab/sim_utils/__init__.py | aymgal/baobab | 960ddbd55fc4391f2b857f2232af38c45c809ae8 | [
"MIT"
] | 52 | 2019-08-29T00:39:11.000Z | 2021-01-02T22:49:41.000Z | baobab/sim_utils/__init__.py | aymgal/baobab | 960ddbd55fc4391f2b857f2232af38c45c809ae8 | [
"MIT"
] | 2 | 2019-09-26T23:38:47.000Z | 2020-02-18T10:07:04.000Z | from .flux_utils import *
from .psf_utils import *
from .image_utils import *
from .metadata_utils import *
from .selection_utils import *
from .kinematics_utils import * | 28.333333 | 31 | 0.794118 | from .flux_utils import *
from .psf_utils import *
from .image_utils import *
from .metadata_utils import *
from .selection_utils import *
from .kinematics_utils import * | true | true |
1c36b6e18e1ae5e543710d930543619f2b5b83a6 | 850 | py | Python | tests/test_message.py | falsovsky/AnavNet | cfbfbd411d7d98a08132f92db1989ce9cb62c86f | [
"BSD-3-Clause"
] | 1 | 2020-11-21T23:07:45.000Z | 2020-11-21T23:07:45.000Z | tests/test_message.py | falsovsky/anavnet | cfbfbd411d7d98a08132f92db1989ce9cb62c86f | [
"BSD-3-Clause"
] | null | null | null | tests/test_message.py | falsovsky/anavnet | cfbfbd411d7d98a08132f92db1989ce9cb62c86f | [
"BSD-3-Clause"
] | null | null | null | import unittest
from anavnet.anavnet import AnavNet
class TestMessage(unittest.TestCase):
def setUp(self):
self.anavnet = AnavNet()
self.anavnet.set_port(16)
def test_get_message_no_port(self):
anavnet = AnavNet()
with self.assertRaises(RuntimeError):
anavnet.get_message(1)
def test_get_total_messages_no_port(self):
anavnet = AnavNet()
with self.assertRaises(RuntimeError):
anavnet.get_total_messages()
def test_get_message_invalid_index_small(self):
with self.assertRaises(IndexError):
self.anavnet.get_message(0)
def test_get_message_invalid_index_big(self):
with self.assertRaises(IndexError):
self.anavnet.get_message(self.anavnet.get_total_messages() + 1)
if __name__ == '__main__':
unittest.main()
| 27.419355 | 75 | 0.684706 | import unittest
from anavnet.anavnet import AnavNet
class TestMessage(unittest.TestCase):
def setUp(self):
self.anavnet = AnavNet()
self.anavnet.set_port(16)
def test_get_message_no_port(self):
anavnet = AnavNet()
with self.assertRaises(RuntimeError):
anavnet.get_message(1)
def test_get_total_messages_no_port(self):
anavnet = AnavNet()
with self.assertRaises(RuntimeError):
anavnet.get_total_messages()
def test_get_message_invalid_index_small(self):
with self.assertRaises(IndexError):
self.anavnet.get_message(0)
def test_get_message_invalid_index_big(self):
with self.assertRaises(IndexError):
self.anavnet.get_message(self.anavnet.get_total_messages() + 1)
if __name__ == '__main__':
unittest.main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.