max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
PlotMooringMultiInst_hresSST.py | shaunwbell/FOCI_Analysis | 0 | 6613751 | #!/usr/bin/env
"""
Background:
--------
PlotMooringMultiInst.py
Purpose:
--------
Plot Multiple Timeseries on same panel. MULTIPLOT Overlay
Use Case1:
Plot the same parameter (eg temperature) from multiple instruments on same mooring from
different depths. e.g. temperature from every MTR depth of a mooring deployment
Use Case2:
Plot the same parameter from different moorings at the same depth (not restricted to this).
e.g. temperature from the instrument closest to the surface over multiple deployments (onrunning SST plots)
Use Case3 (with -ctd flag):
Plot the discrete point from a ctd cast (nearby is most relevant) for QC puposes
Modifications:
--------------
2016-09-16: SW Bell - Add support for parsing yaml files and translating between yaml and json/pyini
Begin code cleanup from previous iterations of the routine. Merge so that one program can provide ctd cal
overlays.
"""
#System Stack
import datetime, sys, os
import argparse
#Science Stack
from netCDF4 import Dataset
import numpy as np
# Visual Stack
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import YearLocator, WeekdayLocator, MonthLocator, DayLocator, HourLocator, DateFormatter
import matplotlib.ticker as ticker
# User Stack
from io_utils import ConfigParserLocal
from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2014, 9, 11)
__modified__ = datetime.datetime(2016, 9, 5)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'Mooring', 'comparisons', 'Cruise', 'plots'
"""--------------------------------main Routines---------------------------------------"""
parser = argparse.ArgumentParser(description='SBE56 plotting')
parser.add_argument('PointerFile',
metavar='PointerFile',
type=str,
help='full path to pointer file')
parser.add_argument("-mt",'--manual_timebounds',
nargs='+',
type=str,
help='set times to specified values (d-m-Y)')
parser.add_argument("-md",'--manual_databounds',
nargs='+',
type=float,
help='set databounds to specified values')
parser.add_argument("-multi",'--multiplot_overlay',
action="store_true",
help='plot multiple mooring data on one panel')
parser.add_argument("-ctd",'--ctd_calibration_plots',
action="store_true",
help='plot CTD calibration point on timeseries')
args = parser.parse_args()
"""---------------------------------------------------------------------------------------
Get parameters from specified pointerfile -
an example is shown in the header description of
this program. It can be of the .pyini (json) form or .yaml form
"""
if args.PointerFile.split('.')[-1] == 'pyini':
pointer_file = ConfigParserLocal.get_config(args.PointerFile)
elif args.PointerFile.split('.')[-1] == 'yaml':
pointer_file = ConfigParserLocal.get_config_yaml(args.PointerFile)
else:
print "PointerFile format not recognized"
sys.exit()
MooringDataPath = pointer_file['mooring_data_path']
MooringID = pointer_file['MooringID']
color_options = pointer_file['colors']
label = pointer_file['legend']
legend_loc = pointer_file['legend_loc']
legend_off = pointer_file['legend_off']
datatype = pointer_file['dtype']
plot_var = pointer_file['EPIC_Key']
plot_var_ctd = pointer_file['EPIC_Key_ctd']
LocatorInterval = pointer_file['Date_Ticks']
Ylabel = pointer_file['Ylabel']
output_type = pointer_file['output_type']
MooringDataPath = pointer_file['mooring_data_path']
files = pointer_file['mooring_files']
files_path = [a+b for a,b in zip(MooringDataPath,files)]
CTDDataPath = pointer_file['ctd_data_path']
ctd_files = pointer_file['ctd_files']
ctd_files_path = [a+b for a,b in zip(CTDDataPath,ctd_files)]
### some mpl specif settings for fonts and plot style
mpl.rcParams['svg.fonttype'] = 'none'
plt.style.use(pointer_file['plot_stylesheet'])
#seaborn-poster -- fonts are smaller
#ggplot -- grey border, better axis frame
#bmh -- slightly heavier than ggplot for line weights
"""---------------------------------------------------------------------------------------
Plot Multiple Mooring Datastreams on one panel
"""
databounds={}
if args.multiplot_overlay:
### set title for plot
ptitle = ("Plotted on: {timestr} \n from {mooringid} ").format(timestr=datetime.datetime.now().strftime('%Y/%m/%d %H:%M'),
mooringid=MooringID )
### initialize plot
fig = plt.figure()
plt.subplot2grid((3, 1), (1, 0), colspan=1, rowspan=3)
### set arbitrary max and min bounds to be changed later based on data bounds
databounds['max_t'] = 0
databounds['min_t'] = 100000000
databounds['max_v'] = -50
databounds['min_v'] = 50
label_thin = []
### cycle through all files, retrieve data and plot
print files_path
for ind, ncfile in enumerate(files_path):
print "Working on {activefile}".format(activefile=ncfile)
#open/read netcdf files
df = EcoFOCI_netCDF(ncfile)
global_atts = df.get_global_atts()
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic()
df.close()
nctime = get_UDUNITS(EPIC2Datetime(ncdata['time'],ncdata['time2']),'days since 0001-01-01') + 1.
#find and replace missing values with nans so they don't plot
for var in plot_var:
try:
ncdata[var][np.where(ncdata[var] >1e30)] = np.nan
label_thin = label_thin + [label[ind]]
except KeyError:
pass
#Plot data
plt.hold(True)
for var in plot_var:
if var in ['T_25']:
try:
ncdata['ICEC_2025'][np.where(ncdata['ICEC_2025'] >1e30)] = np.nan
ncdata[var][np.where(ncdata['ICEC_2025'] > 1)] = np.nan
plt.plot(nctime, ncdata[var][:,0,0,0],color_options[ind],linewidth=0.25)
except KeyError: #if the file doesn't have the specified epic_key it will through an exception
print "Failed to plot {0}".format(var)
continue
else:
try:
plt.plot(nctime, ncdata[var][:,0,0,0],color_options[ind],linewidth=0.25)
except KeyError: #if the file doesn't have the specified epic_key it will through an exception
print "Failed to plot {0}".format(var)
continue
#setup bouds
for var in plot_var:
try:
if nctime.max() > databounds['max_t']:
databounds['max_t'] = nctime.max()
if nctime.min() < databounds['min_t']:
databounds['min_t'] = nctime.min()
if np.nanmax(ncdata[var][:,0,0,0]) > databounds['max_v']:
databounds['max_v'] = np.nanmax(ncdata[var][:,0,0,0])
if np.nanmin(ncdata[var][:,0,0,0]) < databounds['min_v']:
databounds['min_v'] = np.nanmin(ncdata[var][:,0,0,0])
except KeyError:
pass
#set bounds if estabilshed by user
if args.manual_timebounds:
databounds['min_t'] = datetime.datetime.strptime(args.manual_timebounds[0],'%Y-%m-%d').toordinal()
databounds['max_t'] = datetime.datetime.strptime(args.manual_timebounds[1],'%Y-%m-%d').toordinal()
#set bounds if estabilshed by user
if args.manual_databounds:
databounds['min_v'] = args.manual_databounds[0]
databounds['max_v'] = args.manual_databounds[1]
ax2 = plt.gca()
ax2.set_ylim(databounds['min_v'],databounds['max_v'])
ax2.set_xlim([databounds['min_t'],databounds['max_t']])
if not legend_off:
leg = ax2.legend(label_thin, loc=legend_loc, ncol=6, fontsize=8)
for legobj in leg.legendHandles:
legobj.set_linewidth(2.0)
plt.ylabel(Ylabel)
if LocatorInterval == 'multi_year':
ax2.xaxis.set_major_locator(YearLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonth=6))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter('%Y'))
ax2.tick_params(axis='both', which='minor', labelsize=12)
else:
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonth=[1,3,5,7,9,11], bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter('%b %y'))
ax2.tick_params(axis='both', which='minor', labelsize=12)
t = fig.suptitle(ptitle, fontsize=8)
t.set_y(0.03)
#fig.autofmt_xdate()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/'+ MooringID + '_'+plot_var[0]+'_'+datatype+'.'+output_type, bbox_inches='tight', dpi = (300))
plt.close()
| #!/usr/bin/env
"""
Background:
--------
PlotMooringMultiInst.py
Purpose:
--------
Plot Multiple Timeseries on same panel. MULTIPLOT Overlay
Use Case1:
Plot the same parameter (eg temperature) from multiple instruments on same mooring from
different depths. e.g. temperature from every MTR depth of a mooring deployment
Use Case2:
Plot the same parameter from different moorings at the same depth (not restricted to this).
e.g. temperature from the instrument closest to the surface over multiple deployments (onrunning SST plots)
Use Case3 (with -ctd flag):
Plot the discrete point from a ctd cast (nearby is most relevant) for QC puposes
Modifications:
--------------
2016-09-16: SW Bell - Add support for parsing yaml files and translating between yaml and json/pyini
Begin code cleanup from previous iterations of the routine. Merge so that one program can provide ctd cal
overlays.
"""
#System Stack
import datetime, sys, os
import argparse
#Science Stack
from netCDF4 import Dataset
import numpy as np
# Visual Stack
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import YearLocator, WeekdayLocator, MonthLocator, DayLocator, HourLocator, DateFormatter
import matplotlib.ticker as ticker
# User Stack
from io_utils import ConfigParserLocal
from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2014, 9, 11)
__modified__ = datetime.datetime(2016, 9, 5)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'Mooring', 'comparisons', 'Cruise', 'plots'
"""--------------------------------main Routines---------------------------------------"""
parser = argparse.ArgumentParser(description='SBE56 plotting')
parser.add_argument('PointerFile',
metavar='PointerFile',
type=str,
help='full path to pointer file')
parser.add_argument("-mt",'--manual_timebounds',
nargs='+',
type=str,
help='set times to specified values (d-m-Y)')
parser.add_argument("-md",'--manual_databounds',
nargs='+',
type=float,
help='set databounds to specified values')
parser.add_argument("-multi",'--multiplot_overlay',
action="store_true",
help='plot multiple mooring data on one panel')
parser.add_argument("-ctd",'--ctd_calibration_plots',
action="store_true",
help='plot CTD calibration point on timeseries')
args = parser.parse_args()
"""---------------------------------------------------------------------------------------
Get parameters from specified pointerfile -
an example is shown in the header description of
this program. It can be of the .pyini (json) form or .yaml form
"""
if args.PointerFile.split('.')[-1] == 'pyini':
pointer_file = ConfigParserLocal.get_config(args.PointerFile)
elif args.PointerFile.split('.')[-1] == 'yaml':
pointer_file = ConfigParserLocal.get_config_yaml(args.PointerFile)
else:
print "PointerFile format not recognized"
sys.exit()
MooringDataPath = pointer_file['mooring_data_path']
MooringID = pointer_file['MooringID']
color_options = pointer_file['colors']
label = pointer_file['legend']
legend_loc = pointer_file['legend_loc']
legend_off = pointer_file['legend_off']
datatype = pointer_file['dtype']
plot_var = pointer_file['EPIC_Key']
plot_var_ctd = pointer_file['EPIC_Key_ctd']
LocatorInterval = pointer_file['Date_Ticks']
Ylabel = pointer_file['Ylabel']
output_type = pointer_file['output_type']
MooringDataPath = pointer_file['mooring_data_path']
files = pointer_file['mooring_files']
files_path = [a+b for a,b in zip(MooringDataPath,files)]
CTDDataPath = pointer_file['ctd_data_path']
ctd_files = pointer_file['ctd_files']
ctd_files_path = [a+b for a,b in zip(CTDDataPath,ctd_files)]
### some mpl specif settings for fonts and plot style
mpl.rcParams['svg.fonttype'] = 'none'
plt.style.use(pointer_file['plot_stylesheet'])
#seaborn-poster -- fonts are smaller
#ggplot -- grey border, better axis frame
#bmh -- slightly heavier than ggplot for line weights
"""---------------------------------------------------------------------------------------
Plot Multiple Mooring Datastreams on one panel
"""
databounds={}
if args.multiplot_overlay:
### set title for plot
ptitle = ("Plotted on: {timestr} \n from {mooringid} ").format(timestr=datetime.datetime.now().strftime('%Y/%m/%d %H:%M'),
mooringid=MooringID )
### initialize plot
fig = plt.figure()
plt.subplot2grid((3, 1), (1, 0), colspan=1, rowspan=3)
### set arbitrary max and min bounds to be changed later based on data bounds
databounds['max_t'] = 0
databounds['min_t'] = 100000000
databounds['max_v'] = -50
databounds['min_v'] = 50
label_thin = []
### cycle through all files, retrieve data and plot
print files_path
for ind, ncfile in enumerate(files_path):
print "Working on {activefile}".format(activefile=ncfile)
#open/read netcdf files
df = EcoFOCI_netCDF(ncfile)
global_atts = df.get_global_atts()
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic()
df.close()
nctime = get_UDUNITS(EPIC2Datetime(ncdata['time'],ncdata['time2']),'days since 0001-01-01') + 1.
#find and replace missing values with nans so they don't plot
for var in plot_var:
try:
ncdata[var][np.where(ncdata[var] >1e30)] = np.nan
label_thin = label_thin + [label[ind]]
except KeyError:
pass
#Plot data
plt.hold(True)
for var in plot_var:
if var in ['T_25']:
try:
ncdata['ICEC_2025'][np.where(ncdata['ICEC_2025'] >1e30)] = np.nan
ncdata[var][np.where(ncdata['ICEC_2025'] > 1)] = np.nan
plt.plot(nctime, ncdata[var][:,0,0,0],color_options[ind],linewidth=0.25)
except KeyError: #if the file doesn't have the specified epic_key it will through an exception
print "Failed to plot {0}".format(var)
continue
else:
try:
plt.plot(nctime, ncdata[var][:,0,0,0],color_options[ind],linewidth=0.25)
except KeyError: #if the file doesn't have the specified epic_key it will through an exception
print "Failed to plot {0}".format(var)
continue
#setup bouds
for var in plot_var:
try:
if nctime.max() > databounds['max_t']:
databounds['max_t'] = nctime.max()
if nctime.min() < databounds['min_t']:
databounds['min_t'] = nctime.min()
if np.nanmax(ncdata[var][:,0,0,0]) > databounds['max_v']:
databounds['max_v'] = np.nanmax(ncdata[var][:,0,0,0])
if np.nanmin(ncdata[var][:,0,0,0]) < databounds['min_v']:
databounds['min_v'] = np.nanmin(ncdata[var][:,0,0,0])
except KeyError:
pass
#set bounds if estabilshed by user
if args.manual_timebounds:
databounds['min_t'] = datetime.datetime.strptime(args.manual_timebounds[0],'%Y-%m-%d').toordinal()
databounds['max_t'] = datetime.datetime.strptime(args.manual_timebounds[1],'%Y-%m-%d').toordinal()
#set bounds if estabilshed by user
if args.manual_databounds:
databounds['min_v'] = args.manual_databounds[0]
databounds['max_v'] = args.manual_databounds[1]
ax2 = plt.gca()
ax2.set_ylim(databounds['min_v'],databounds['max_v'])
ax2.set_xlim([databounds['min_t'],databounds['max_t']])
if not legend_off:
leg = ax2.legend(label_thin, loc=legend_loc, ncol=6, fontsize=8)
for legobj in leg.legendHandles:
legobj.set_linewidth(2.0)
plt.ylabel(Ylabel)
if LocatorInterval == 'multi_year':
ax2.xaxis.set_major_locator(YearLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonth=6))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter('%Y'))
ax2.tick_params(axis='both', which='minor', labelsize=12)
else:
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonth=[1,3,5,7,9,11], bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter('%b %y'))
ax2.tick_params(axis='both', which='minor', labelsize=12)
t = fig.suptitle(ptitle, fontsize=8)
t.set_y(0.03)
#fig.autofmt_xdate()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/'+ MooringID + '_'+plot_var[0]+'_'+datatype+'.'+output_type, bbox_inches='tight', dpi = (300))
plt.close()
| en | 0.673367 | #!/usr/bin/env Background: -------- PlotMooringMultiInst.py Purpose: -------- Plot Multiple Timeseries on same panel. MULTIPLOT Overlay Use Case1: Plot the same parameter (eg temperature) from multiple instruments on same mooring from different depths. e.g. temperature from every MTR depth of a mooring deployment Use Case2: Plot the same parameter from different moorings at the same depth (not restricted to this). e.g. temperature from the instrument closest to the surface over multiple deployments (onrunning SST plots) Use Case3 (with -ctd flag): Plot the discrete point from a ctd cast (nearby is most relevant) for QC puposes Modifications: -------------- 2016-09-16: SW Bell - Add support for parsing yaml files and translating between yaml and json/pyini Begin code cleanup from previous iterations of the routine. Merge so that one program can provide ctd cal overlays. #System Stack #Science Stack # Visual Stack # User Stack --------------------------------main Routines--------------------------------------- --------------------------------------------------------------------------------------- Get parameters from specified pointerfile - an example is shown in the header description of this program. It can be of the .pyini (json) form or .yaml form ### some mpl specif settings for fonts and plot style #seaborn-poster -- fonts are smaller #ggplot -- grey border, better axis frame #bmh -- slightly heavier than ggplot for line weights --------------------------------------------------------------------------------------- Plot Multiple Mooring Datastreams on one panel ### set title for plot ### initialize plot ### set arbitrary max and min bounds to be changed later based on data bounds ### cycle through all files, retrieve data and plot #open/read netcdf files #find and replace missing values with nans so they don't plot #Plot data #if the file doesn't have the specified epic_key it will through an exception #if the file doesn't have the specified epic_key it will through an exception #setup bouds #set bounds if estabilshed by user #set bounds if estabilshed by user #fig.autofmt_xdate() | 2.394917 | 2 |
RecoBTag/PerformanceDB/python/BTagPerformanceDB062012.py | ckamtsikis/cmssw | 852 | 6613752 | from RecoBTag.PerformanceDB.measure.Btag_btagMuJetsWp0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagMistag0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagTtbarDiscrim0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagTtbarWp0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagTtbarMc0612 import *
| from RecoBTag.PerformanceDB.measure.Btag_btagMuJetsWp0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagMistag0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagTtbarDiscrim0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagTtbarWp0612 import *
from RecoBTag.PerformanceDB.measure.Btag_btagTtbarMc0612 import *
| none | 1 | 1.12278 | 1 | |
apps/api/migrations/0005_vmwaretasks_msg.py | death-finger/get2unix | 0 | 6613753 | # Generated by Django 3.0.6 on 2020-06-02 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20200602_0843'),
]
operations = [
migrations.AddField(
model_name='vmwaretasks',
name='msg',
field=models.TextField(null=True),
),
]
| # Generated by Django 3.0.6 on 2020-06-02 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20200602_0843'),
]
operations = [
migrations.AddField(
model_name='vmwaretasks',
name='msg',
field=models.TextField(null=True),
),
]
| en | 0.844375 | # Generated by Django 3.0.6 on 2020-06-02 06:55 | 1.386818 | 1 |
AprendeAyudando/resources/migrations/0007_auto_20210103_1854.py | memoriasIT/AprendeAyudando | 1 | 6613754 | # Generated by Django 3.1.3 on 2021-01-03 17:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0006_auto_20210103_1802'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='file',
field=models.FileField(default=None, upload_to=''),
),
]
| # Generated by Django 3.1.3 on 2021-01-03 17:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0006_auto_20210103_1802'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='file',
field=models.FileField(default=None, upload_to=''),
),
]
| en | 0.819538 | # Generated by Django 3.1.3 on 2021-01-03 17:54 | 1.364641 | 1 |
learning/tests/metaTest.py | BlancNicolas/face_detection_SY32 | 0 | 6613755 | <reponame>BlancNicolas/face_detection_SY32
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from learning.test import *
from util.dataExtraction import importImages
sample_size = 200
pos = importImages(extracted_pos_faces_path)[0:sample_size]
neg = importImages(extracted_neg_faces_path)[0:sample_size]
clf = classifierTraining(pos, neg)
def test_detectFaces():
image = io.imread(img_train_path.replace('*', '0001'))
image = rgb2gray(image)
boxes, scores = detectFaces(image, clf)
print("# Boxes : {}".format(len(boxes)))
print("# Scores : {}".format(len(scores)))
# Display the print in test
assert not True
def test_validation():
images = importImages(img_train_path)[0:sample_size]
labels = np.loadtxt(label_path).astype('int')
err_rate, false_pos = validateFaceDetection(images, labels, clf)
print("Error rate : {}".format(err_rate))
print("# False Positive : {}".format(len(false_pos)))
# Display the print in test
assert not True
def test_applyClf():
test_images = importImages(img_test_path)
applyClfOnTestImages(test_images, clf, 0.5)
assert True
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
from learning.test import *
from util.dataExtraction import importImages
sample_size = 200
pos = importImages(extracted_pos_faces_path)[0:sample_size]
neg = importImages(extracted_neg_faces_path)[0:sample_size]
clf = classifierTraining(pos, neg)
def test_detectFaces():
image = io.imread(img_train_path.replace('*', '0001'))
image = rgb2gray(image)
boxes, scores = detectFaces(image, clf)
print("# Boxes : {}".format(len(boxes)))
print("# Scores : {}".format(len(scores)))
# Display the print in test
assert not True
def test_validation():
images = importImages(img_train_path)[0:sample_size]
labels = np.loadtxt(label_path).astype('int')
err_rate, false_pos = validateFaceDetection(images, labels, clf)
print("Error rate : {}".format(err_rate))
print("# False Positive : {}".format(len(false_pos)))
# Display the print in test
assert not True
def test_applyClf():
test_images = importImages(img_test_path)
applyClfOnTestImages(test_images, clf, 0.5)
assert True | en | 0.564866 | #!/usr/bin/python3 # -*- coding: utf-8 -*- # Display the print in test # Display the print in test | 2.829637 | 3 |
kansha/card_addons/due_date/view.py | AnomalistDesignLLC/kansha | 161 | 6613756 | <gh_stars>100-1000
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
import peak
import datetime
from nagare import presentation, security, ajax, i18n
from nagare.i18n import _, format_date
from .comp import DueDate
@peak.rules.when(ajax.py2js, (datetime.date,))
def py2js(value, h):
"""Generic method to transcode a Datetime
In:
- ``value`` -- the datetime object
- ``h`` -- the current renderer
Return:
- transcoded javascript
"""
dt = i18n.to_timezone(value)
return 'new Date("%s", "%s", "%s")' % (
dt.year, dt.month - 1, dt.day)
@peak.rules.when(ajax.py2js, (DueDate,))
def py2js(value, h):
if value.due_date:
return ajax.py2js(value.due_date, h)
return None
@presentation.render_for(DueDate)
def render_DueDate(self, h, comp, model):
return h.root
@presentation.render_for(DueDate, model='badge')
def render_DueDate_badge(self, h, *args):
"""Gallery badge for the card"""
if self.due_date:
with h.span(class_='badge'):
h << h.span(h.i(class_='icon-alarm'), ' ', self.get_days_count(), class_='label due-date ' + self.get_class(), title=format_date(self.due_date, 'full'))
return h.root
@presentation.render_for(DueDate, model='action')
def render_DueDate_button(self, h, comp, *args):
if security.has_permissions('due_date', self.card):
self._init_calendar()
id_ = h.generate_id()
if self.due_date:
classes = ['btn', 'btn-due-date', self.get_class()]
with h.a(class_=u' '.join(classes), id_=id_).action(self.calendar().toggle):
h << h.i(class_='icon-alarm duedate-icon')
h << format_date(self.due_date, 'short')
else:
with h.a(class_='btn', id_=id_).action(self.calendar().toggle):
h << h.i(class_='icon-alarm')
h << _('Due date')
h << self.calendar.on_answer(self.set_value)
return h.root
| #--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
import peak
import datetime
from nagare import presentation, security, ajax, i18n
from nagare.i18n import _, format_date
from .comp import DueDate
@peak.rules.when(ajax.py2js, (datetime.date,))
def py2js(value, h):
"""Generic method to transcode a Datetime
In:
- ``value`` -- the datetime object
- ``h`` -- the current renderer
Return:
- transcoded javascript
"""
dt = i18n.to_timezone(value)
return 'new Date("%s", "%s", "%s")' % (
dt.year, dt.month - 1, dt.day)
@peak.rules.when(ajax.py2js, (DueDate,))
def py2js(value, h):
if value.due_date:
return ajax.py2js(value.due_date, h)
return None
@presentation.render_for(DueDate)
def render_DueDate(self, h, comp, model):
return h.root
@presentation.render_for(DueDate, model='badge')
def render_DueDate_badge(self, h, *args):
"""Gallery badge for the card"""
if self.due_date:
with h.span(class_='badge'):
h << h.span(h.i(class_='icon-alarm'), ' ', self.get_days_count(), class_='label due-date ' + self.get_class(), title=format_date(self.due_date, 'full'))
return h.root
@presentation.render_for(DueDate, model='action')
def render_DueDate_button(self, h, comp, *args):
if security.has_permissions('due_date', self.card):
self._init_calendar()
id_ = h.generate_id()
if self.due_date:
classes = ['btn', 'btn-due-date', self.get_class()]
with h.a(class_=u' '.join(classes), id_=id_).action(self.calendar().toggle):
h << h.i(class_='icon-alarm duedate-icon')
h << format_date(self.due_date, 'short')
else:
with h.a(class_='btn', id_=id_).action(self.calendar().toggle):
h << h.i(class_='icon-alarm')
h << _('Due date')
h << self.calendar.on_answer(self.set_value)
return h.root | en | 0.843269 | #-- # Copyright (c) 2012-2014 Net-ng. # All rights reserved. # # This software is licensed under the BSD License, as described in # the file LICENSE.txt, which you should have received as part of # this distribution. #-- Generic method to transcode a Datetime In: - ``value`` -- the datetime object - ``h`` -- the current renderer Return: - transcoded javascript Gallery badge for the card | 2.081804 | 2 |
model/wave.py | fi-ksi/web-backend | 4 | 6613757 | import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from . import Base
from .year import Year
from .user import User
class Wave(Base):
__tablename__ = 'waves'
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8mb4'
}
id = Column(Integer, primary_key=True, nullable=False)
year = Column(Integer, ForeignKey(Year.id), nullable=False)
index = Column(Integer, nullable=False)
caption = Column(String(100), nullable=True)
garant = Column(Integer, ForeignKey(User.id), nullable=False)
time_published = Column(DateTime, default=datetime.datetime.utcnow,
nullable=False)
@hybrid_property
def public(self):
return self.time_published <= datetime.datetime.utcnow()
| import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from . import Base
from .year import Year
from .user import User
class Wave(Base):
__tablename__ = 'waves'
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8mb4'
}
id = Column(Integer, primary_key=True, nullable=False)
year = Column(Integer, ForeignKey(Year.id), nullable=False)
index = Column(Integer, nullable=False)
caption = Column(String(100), nullable=True)
garant = Column(Integer, ForeignKey(User.id), nullable=False)
time_published = Column(DateTime, default=datetime.datetime.utcnow,
nullable=False)
@hybrid_property
def public(self):
return self.time_published <= datetime.datetime.utcnow()
| none | 1 | 2.625929 | 3 | |
code/Ensemble-Learning/Classification-voting-ensemble.py | Knowledge-Precipitation-Tribe/Neural-network | 3 | 6613758 | <gh_stars>1-10
# -*- coding: utf-8 -*-#
'''
# Name: Classification-voting-ensemble
# Description:
# Author: super
# Date: 2020/6/5
'''
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import cross_val_score
from pathlib import Path
def load_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28 * 28)
x_test = x_test.reshape(x_test.shape[0], 28 * 28)
y_train = LabelEncoder().fit_transform(y_train.reshape(-1,1))
y_test = LabelEncoder().fit_transform(y_test.reshape(-1,1))
return (x_train, y_train), (x_test, y_test)
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'])
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'])
plt.show()
def build_model(hidden_units):
model = Sequential()
for index, unit in enumerate(hidden_units):
if index == 0:
model.add(Dense(unit, activation='relu', input_shape=(784, )))
else:
model.add(Dense(unit, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def build_model1():
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(784, )))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def build_model2():
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(784, )))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def build_model3():
model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(784, )))
model.add(Dense(16, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = load_data()
model1 = KerasClassifier(build_fn=build_model1, epochs=20, batch_size=64)
model1._estimator_type = "classifier"
model2 = KerasClassifier(build_fn=build_model2, epochs=20, batch_size=64)
model2._estimator_type = "classifier"
model3 = KerasClassifier(build_fn=build_model3, epochs=20, batch_size=64)
model3._estimator_type = "classifier"
# if ‘hard’, uses predicted class labels for majority rule voting.
# if ‘soft’, predicts the class label based on the argmax of the
# sums of the predicted probabilities,
# which is recommended for an ensemble of well-calibrated classifiers.
cls = VotingClassifier(estimators=(['model1', model1],
['model2', model2],
['model3', model3]),
voting='hard')
cls.fit(x_train, y_train)
print("score: ", cls.score(x_test, y_test)) | # -*- coding: utf-8 -*-#
'''
# Name: Classification-voting-ensemble
# Description:
# Author: super
# Date: 2020/6/5
'''
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import cross_val_score
from pathlib import Path
def load_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28 * 28)
x_test = x_test.reshape(x_test.shape[0], 28 * 28)
y_train = LabelEncoder().fit_transform(y_train.reshape(-1,1))
y_test = LabelEncoder().fit_transform(y_test.reshape(-1,1))
return (x_train, y_train), (x_test, y_test)
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'])
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'])
plt.show()
def build_model(hidden_units):
model = Sequential()
for index, unit in enumerate(hidden_units):
if index == 0:
model.add(Dense(unit, activation='relu', input_shape=(784, )))
else:
model.add(Dense(unit, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def build_model1():
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(784, )))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def build_model2():
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(784, )))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def build_model3():
model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(784, )))
model.add(Dense(16, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = load_data()
model1 = KerasClassifier(build_fn=build_model1, epochs=20, batch_size=64)
model1._estimator_type = "classifier"
model2 = KerasClassifier(build_fn=build_model2, epochs=20, batch_size=64)
model2._estimator_type = "classifier"
model3 = KerasClassifier(build_fn=build_model3, epochs=20, batch_size=64)
model3._estimator_type = "classifier"
# if ‘hard’, uses predicted class labels for majority rule voting.
# if ‘soft’, predicts the class label based on the argmax of the
# sums of the predicted probabilities,
# which is recommended for an ensemble of well-calibrated classifiers.
cls = VotingClassifier(estimators=(['model1', model1],
['model2', model2],
['model3', model3]),
voting='hard')
cls.fit(x_train, y_train)
print("score: ", cls.score(x_test, y_test)) | en | 0.77533 | # -*- coding: utf-8 -*-# # Name: Classification-voting-ensemble # Description: # Author: super # Date: 2020/6/5 #画出训练过程中训练和验证的精度与损失 # summarize history for accuracy # summarize history for loss # if ‘hard’, uses predicted class labels for majority rule voting. # if ‘soft’, predicts the class label based on the argmax of the # sums of the predicted probabilities, # which is recommended for an ensemble of well-calibrated classifiers. | 2.697456 | 3 |
Covid19_Vaccine_Analysis/Covid19_Vaccine_Analysis.py | rtewari056/Hacktoberfest2021 | 3 | 6613759 | #!/usr/bin/env python
# coding: utf-8
# # Covid19 Vaccine Analysis
# Many vaccines have been introduced so far to fight covid-19. No vaccine has guaranteed 100% accuracy so far, but most manufacturing companies claim their vaccine is not 100% accurate, but still, it will save your life by giving you immunity.
#
# Thus, each country tries to vaccinate a large part of its population so as not to depend on a single vaccine. That’s I am going to analysis in this project, which is how many vaccines each country is using to fight covid-19. In the section below, I have made my project on Covid-19 vaccines analysis with Python.
# ### Importing the necessary Python libraries and the dataset
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("country_vaccinations.csv")
data.head()
# ### Exploring this data before we start analyzing the vaccines taken by countries
# In[4]:
data.describe()
# In[5]:
pd.to_datetime(data.date)
# In[6]:
data.country.value_counts()
# The United Kingdom is made up of England, Scotland, Wales, and Northern Ireland. But in the above data, these countries are mentioned separately with the same values as in the United Kingdom. So this may be an error while recording this data. So for fixing this error:
# In[7]:
data = data[data.country.apply(lambda x: x not in ["England", "Scotland", "Wales", "Northern Ireland"])]
data.country.value_counts()
# ### Exploring the vaccines available in this dataset
# In[8]:
data.vaccines.value_counts()
# So we have almost all the Covid-19 vaccines available in this dataset. Now I will create a new DataFrame by only selecting the vaccine and the country columns to explore which vaccine is taken by which country:
# In[9]:
df = data[['vaccines', 'country']]
df.head()
# #### Now let’s see how many countries are taking each of the vaccines mentioned in this data:
# In[10]:
dict_ = {}
for i in df.vaccines.unique():
dict_[i] = [df['country'][j] for j in df[df['vaccines'] == i].index]
vaccines = {}
for key, value in dict_.items():
vaccines[key] = set(value)
for i,j in vaccines.items():
print(f'{i}:>>{j}')
# ### Now let’s visualize this data to have a look at what combination of vaccines every country is using:
# In[11]:
import plotly.express as px
import plotly.offline as py
vaccine_map = px.choropleth(data, locations = 'iso_code', color = 'vaccines')
vaccine_map.update_layout(height = 300, margin = {'r':0, 't':0, 'l':0, 'b':0})
vaccine_map.show()
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# # Covid19 Vaccine Analysis
# Many vaccines have been introduced so far to fight covid-19. No vaccine has guaranteed 100% accuracy so far, but most manufacturing companies claim their vaccine is not 100% accurate, but still, it will save your life by giving you immunity.
#
# Thus, each country tries to vaccinate a large part of its population so as not to depend on a single vaccine. That’s I am going to analysis in this project, which is how many vaccines each country is using to fight covid-19. In the section below, I have made my project on Covid-19 vaccines analysis with Python.
# ### Importing the necessary Python libraries and the dataset
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("country_vaccinations.csv")
data.head()
# ### Exploring this data before we start analyzing the vaccines taken by countries
# In[4]:
data.describe()
# In[5]:
pd.to_datetime(data.date)
# In[6]:
data.country.value_counts()
# The United Kingdom is made up of England, Scotland, Wales, and Northern Ireland. But in the above data, these countries are mentioned separately with the same values as in the United Kingdom. So this may be an error while recording this data. So for fixing this error:
# In[7]:
data = data[data.country.apply(lambda x: x not in ["England", "Scotland", "Wales", "Northern Ireland"])]
data.country.value_counts()
# ### Exploring the vaccines available in this dataset
# In[8]:
data.vaccines.value_counts()
# So we have almost all the Covid-19 vaccines available in this dataset. Now I will create a new DataFrame by only selecting the vaccine and the country columns to explore which vaccine is taken by which country:
# In[9]:
df = data[['vaccines', 'country']]
df.head()
# #### Now let’s see how many countries are taking each of the vaccines mentioned in this data:
# In[10]:
dict_ = {}
for i in df.vaccines.unique():
dict_[i] = [df['country'][j] for j in df[df['vaccines'] == i].index]
vaccines = {}
for key, value in dict_.items():
vaccines[key] = set(value)
for i,j in vaccines.items():
print(f'{i}:>>{j}')
# ### Now let’s visualize this data to have a look at what combination of vaccines every country is using:
# In[11]:
import plotly.express as px
import plotly.offline as py
vaccine_map = px.choropleth(data, locations = 'iso_code', color = 'vaccines')
vaccine_map.update_layout(height = 300, margin = {'r':0, 't':0, 'l':0, 'b':0})
vaccine_map.show()
# In[ ]:
| en | 0.904126 | #!/usr/bin/env python # coding: utf-8 # # Covid19 Vaccine Analysis # Many vaccines have been introduced so far to fight covid-19. No vaccine has guaranteed 100% accuracy so far, but most manufacturing companies claim their vaccine is not 100% accurate, but still, it will save your life by giving you immunity. # # Thus, each country tries to vaccinate a large part of its population so as not to depend on a single vaccine. That’s I am going to analysis in this project, which is how many vaccines each country is using to fight covid-19. In the section below, I have made my project on Covid-19 vaccines analysis with Python. # ### Importing the necessary Python libraries and the dataset # In[3]: # ### Exploring this data before we start analyzing the vaccines taken by countries # In[4]: # In[5]: # In[6]: # The United Kingdom is made up of England, Scotland, Wales, and Northern Ireland. But in the above data, these countries are mentioned separately with the same values as in the United Kingdom. So this may be an error while recording this data. So for fixing this error: # In[7]: # ### Exploring the vaccines available in this dataset # In[8]: # So we have almost all the Covid-19 vaccines available in this dataset. Now I will create a new DataFrame by only selecting the vaccine and the country columns to explore which vaccine is taken by which country: # In[9]: # #### Now let’s see how many countries are taking each of the vaccines mentioned in this data: # In[10]: # ### Now let’s visualize this data to have a look at what combination of vaccines every country is using: # In[11]: # In[ ]: | 3.922893 | 4 |
cogs/games.py | bossbadi/Sirk | 0 | 6613760 | <gh_stars>0
import discord, random, json, time, asyncio, random
from discord.ext import commands, menus
from discord.ext.commands.cooldowns import BucketType
from copy import deepcopy as dc
import async_cleverbot as ac
config = "tools/config.json"
with open(config) as f:
data = json.load(f)
cb = data['CLEVERBOT']
cleverbot = ac.Cleverbot(cb)
def rps_winner(userOneChoice, userTwoChoice):
if userOneChoice == "\U0001faa8":
if userTwoChoice == "\U00002702": return "You won!"
if userTwoChoice == "\U0001faa8": return "Tie!"
if userTwoChoice == "\U0001f4f0": return "I won!"
elif userOneChoice == "\U00002702":
if userTwoChoice == "\U00002702": return "Tie!"
if userTwoChoice == "\U0001faa8": return "I won!"
if userTwoChoice == "\U0001f4f0": return "You Won!"
elif userOneChoice == "\U0001f4f0":
if userTwoChoice == "\U00002702": return "I won!"
if userTwoChoice == "\U0001faa8": return "You won!"
if userTwoChoice == "\U0001f4f0": return "Tie!"
else: return "error"
class BasketballMenu(menus.Menu):
def __init__(self, **kwargs):
super().__init__(delete_message_after=True, **kwargs)
self.score = 0
self.footer = 'Use the reactions below to try and score'
async def send_initial_message(self, ctx, channel: discord.TextChannel):
return await channel.send(embed=discord.Embed(title='Basketball!', description='Play basketball against an AI!\n\nUse the reactions below to play:\n\n🗑️ = Take a shot\n🏀 = Pass the ball\n⛹️ = Drive to get a layup\n📟 = See the score\n❓ = See this message\n❌ = Stop the game\n\n**Note this command is a work in progress.**',color=self.ctx.bot.color))
@menus.button('🗑️')
async def do_shot(self, _):
if self.score >= 21:
await self.message.edit(embed=discord.Embed(title='You reached 21!', description="Good job. Thats it for now. Come play again later.", color=self.ctx.bot.color))
await asyncio.sleep(5)
self.stop()
else:
lucky = random.choice([True, False])
if lucky:
three = random.choice([True, False])
if three:
self.score += 3
d = 'You swished a three! Good job. +3 to your score.'
else:
d = 'You made a jump shot! +2 to your score.'
self.score += 2
else:
d = 'You missed.'
pass
embed = discord.Embed(title='Shot...', description=d, color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('🏀')
async def do_pass(self, _):
p = random.choice(['Bob', 'Joe', 'Tim', 'Jordan', 'Jokic'])
lucky = random.choice([True, False])
if lucky:
d = f'Nice give and go! You passed the ball to **{p}** and they scored a layup. +2 to your score.'
else:
d = f'You passed the ball to **{p}** but they dropped it.'
embed = discord.Embed(title='Pass...', description=d, color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('⛹️')
async def do_drive(self, _):
embed = discord.Embed(title='Drive...', color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('📟')
async def do_score(self, _):
embed = discord.Embed(title='Current Score', description=f'**{self.ctx.author.name}** - {self.score}', color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('❓')
async def do_help(self, _):
await self.message.edit(embed=discord.Embed(title='Basketball!', description='Play basketball against an AI!\n\nUse the reactions below to play:\n\n🗑️ = Take a shot\n🏀 = Pass the ball\n⛹️ = Drive to get a layup\n📟 = See the score\n❓ = See this message\n❌ = Stop the game\n\n**Note this command is a work in progress.**',color=self.ctx.bot.color))
@menus.button('❌')
async def do_end(self, _):
self.stop()
class games(commands.Cog):
'''Game Commands'''
def __init__(self, bot):
self.bot = bot
@commands.max_concurrency(1, per=BucketType.channel, wait=False)
@commands.command()
async def basketball(self, ctx):
'''Play basketball in a D&D style'''
await BasketballMenu().start(ctx)
@commands.max_concurrency(1, per=BucketType.channel, wait=False)
@commands.command(aliases=['cb'])
async def chatbot(self, ctx):
'''Talk to chatbot'''
talk = True
await ctx.send('Chatbot Started!\nType `cancel` to end.')
while talk is True:
try:
m = await self.bot.wait_for('message', timeout=30, check=lambda m:(ctx.author == m.author and ctx.channel == m.channel))
except asyncio.TimeoutError:
await ctx.send('Timeout Error')
talk = False
else:
if m.content.lower() == "cancel":
talk = False
await ctx.send('Chatbot Session Ended.')
else:
async with ctx.channel.typing():
response = await cleverbot.ask(m.content) # Ask a question, returns async_cleverbot.cleverbot.Response
await ctx.send(response.text)
@commands.command()
@commands.cooldown(1,3,BucketType.user)
async def dice(self, ctx):
'''Roll a dice'''
dice = ['1', '2', '3', '4', '5', '6', 'off the table...\n*You Found The Mystery!*']
embed = discord.Embed(title="Dice", description=f'The Dice Rolled {random.choice(dice)}', color=self.bot.color)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/758138226874908705/766312838910181421/unknown.png")
embed.set_footer(text=self.bot.footer)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.cooldown(1,3,BucketType.user)
@commands.command()
async def rps(self, ctx):
"""Rock paper scissors, either play against the bot or against a user"""
choices = ["\U0001f4f0", "\U0001faa8", "\U00002702"]
s = m = await ctx.send(embed = discord.Embed(title = f"Rock, Paper, Scissors.", description = f" {str(ctx.author)} Choose your weapon!", color=self.bot.color))
for i in choices:
await m.add_reaction(i)
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in choices
try:
reaction = await self.bot.wait_for('reaction_add', timeout = 30.0, check = check)
reaction = reaction[0].emoji
botChoice = random.choice(choices)
result = rps_winner(reaction, botChoice)
await s.edit(embed= discord.Embed(title =result , description = f"I picked {botChoice} and you picked {reaction}.", color=self.bot.color))
except asyncio.TimeoutError: return await ctx.send("You didn't add a reaction in time!")
@commands.max_concurrency(1, per=commands.BucketType.channel)
@commands.command() #aliases=["c"]
async def cookie(self, ctx):
"""
Yum yum.
"""
cookie = "🍪"
embed = discord.Embed(description=f"Fastest person to eat the {cookie} wins!", colour=self.bot.color)
message = await ctx.send(embed=embed)
await asyncio.sleep(4)
for i in reversed(range(1, 4)):
await message.edit(embed=discord.Embed(description=str(i), colour=self.bot.color))
await asyncio.sleep(1)
await asyncio.sleep(random.randint(0, 3))
await message.edit(embed=discord.Embed(description="Eat the cookie!", colour=self.bot.color))
await message.add_reaction(cookie)
start = time.perf_counter()
try:
_, user = await ctx.bot.wait_for(
"reaction_add",
check=lambda _reaction, user: _reaction.message.guild == ctx.guild
and _reaction.message.channel == ctx.message.channel
and _reaction.message == message and str(_reaction.emoji) == cookie and user != ctx.bot.user
and not user.bot,
timeout=60,)
except asyncio.TimeoutError:
return await message.edit(embed=discord.Embed(description="No one ate the cookie...",
colour=self.bot.color))
end = time.perf_counter()
await message.edit(embed=discord.Embed(description=f"**{user}** ate the cookie in `{end - start:.3f}` seconds!", colour=self.bot.color))
@commands.max_concurrency(1, per=BucketType.guild, wait=False)
@commands.command(aliases=['2048', '24'])
async def twenty(self, ctx):
"""Starts a 2048 game inside of Discord.
Join the support server to post your score!"""
board = [
["_", "_", "_", "_"],
["_", "_", "_", "_"],
["_", "_", "_", "_"],
["_", "_", "_", 2],
]
score = 0
total = 0
embed=discord.Embed(title="2048", description=f"If a reaction is not received every 2 minutes, the game will time out.\n\n```{self.print_board(board)}```", color=self.bot.color)
message = await ctx.send(embed=embed)
await message.add_reaction("\u2B06")
await message.add_reaction("\u2B07")
await message.add_reaction("\u2B05")
await message.add_reaction("\u27A1")
await message.add_reaction("\u274C")
def check(reaction, user):
return (
(user.id == ctx.author.id)
and (str(reaction.emoji) in ["\u2B06", "\u2B07", "\u2B05", "\u27A1", "\u274C"])
and (reaction.message.id == message.id)
)
while True:
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=120.0
)
except asyncio.TimeoutError:
await ctx.send(f"Ending game.\nYour score was **{score}**")
await message.delete()
return
else:
try:
await message.remove_reaction(str(reaction.emoji), ctx.author)
except discord.errors.Forbidden:
pass
if str(reaction.emoji) == "\u2B06":
msg, nb, total = self.execute_move("up", board)
elif str(reaction.emoji) == "\u2B07":
msg, nb, total = self.execute_move("down", board)
elif str(reaction.emoji) == "\u2B05":
msg, nb, total = self.execute_move("left", board)
elif str(reaction.emoji) == "\u27A1":
msg, nb, total = self.execute_move("right", board)
elif str(reaction.emoji) == "\u274C":
await ctx.send(f"Ending game.\nYour score was **{score}**")
await message.delete()
return
score += total
if msg == "Lost":
await ctx.send(
f"Oh no! It appears you have lost {ctx.author.mention}. You finished with a score of {score}!"
)
await message.delete()
return
board = nb
sem=discord.Embed(title=f"Score: **{score}**", description=f"```{self.print_board(board)}```", color=self.bot.color)
'await message.edit(content=f"Score: **{score}**```{self.print_board(board)}```")'
await message.edit(embed=sem)
def print_board(self, board):
col_width = max(len(str(word)) for row in board for word in row) + 2 # padding
whole_thing = ""
for row in board:
whole_thing += "".join(str(word).ljust(col_width) for word in row) + "\n"
return whole_thing
def execute_move(self, move, pboard):
board = dc(pboard)
total = 0
if move.lower() == "left":
nb, total = self.check_left(board)
for x in range(len(nb)):
while nb[x][0] == "_" and (nb[x][1] != "_" or nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][0] = nb[x][1]
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][1] == "_" and (nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][2] == "_" and (nb[x][3] != "_"):
nb[x][2] = nb[x][3]
nb[x][3] = "_"
if move.lower() == "right":
nb, total = self.check_right(board)
for x in range(len(nb)):
while nb[x][3] == "_" and (nb[x][2] != "_" or nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][3] = nb[x][2]
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][2] == "_" and (nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][1] == "_" and (nb[x][0] != "_"):
nb[x][1] = nb[x][0]
nb[x][0] = "_"
if move.lower() == "down":
nb = self.columize(board)
nb, total = self.check_down(nb)
for x in range(len(nb)):
while nb[x][0] == "_" and (nb[x][1] != "_" or nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][0] = nb[x][1]
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][1] == "_" and (nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][2] == "_" and (nb[x][3] != "_"):
nb[x][2] = nb[x][3]
nb[x][3] = "_"
nb = self.rowize(nb)
if move.lower() == "up":
nb = self.columize(board)
nb, total = self.check_up(nb)
for x in range(len(nb)):
while nb[x][3] == "_" and (nb[x][2] != "_" or nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][3] = nb[x][2]
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][2] == "_" and (nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][1] == "_" and (nb[x][0] != "_"):
nb[x][1] = nb[x][0]
nb[x][0] = "_"
nb = self.rowize(nb)
if (
nb != pboard
): # So the user doesn't make a move that doesn't change anything, and just add a number
some_message, nb = self.add_number(nb)
else:
some_message = ""
if some_message.startswith("Lost"):
return "Lost", nb, total
else:
return "", nb, total
def add_number(self, board):
try:
row = random.randint(0, 3)
except RecursionError:
return "Lost", board
if "_" in board[row]:
number_of_zeroes = board[row].count("_")
if number_of_zeroes == 1:
column = board[row].index("_")
else:
column = random.randint(0, 3)
while board[row][column] != "_":
column = random.randint(0, 3)
else:
result, board = self.add_number(board)
return result, board
joining = random.randint(0, 100)
if joining < 85:
joining = 2
else:
joining = 4
board[row][column] = joining
return "", board
def columize(self, board):
new_board = [[], [], [], []]
# Make first column
new_board[0].append(board[3][0])
new_board[0].append(board[2][0])
new_board[0].append(board[1][0])
new_board[0].append(board[0][0])
# Make second column
new_board[1].append(board[3][1])
new_board[1].append(board[2][1])
new_board[1].append(board[1][1])
new_board[1].append(board[0][1])
# Make third column
new_board[2].append(board[3][2])
new_board[2].append(board[2][2])
new_board[2].append(board[1][2])
new_board[2].append(board[0][2])
# Make fourth column
new_board[3].append(board[3][3])
new_board[3].append(board[2][3])
new_board[3].append(board[1][3])
new_board[3].append(board[0][3])
board = new_board
return board
def rowize(self, board):
new_board = [[], [], [], []]
# Make first row
new_board[0].append(board[0][3])
new_board[0].append(board[1][3])
new_board[0].append(board[2][3])
new_board[0].append(board[3][3])
# Make second row
new_board[1].append(board[0][2])
new_board[1].append(board[1][2])
new_board[1].append(board[2][2])
new_board[1].append(board[3][2])
# Make third row
new_board[2].append(board[0][1])
new_board[2].append(board[1][1])
new_board[2].append(board[2][1])
new_board[2].append(board[3][1])
# Make fourth row
new_board[3].append(board[0][0])
new_board[3].append(board[1][0])
new_board[3].append(board[2][0])
new_board[3].append(board[3][0])
board = new_board
return board
def check_left(self, board):
total = 0
for x in range(len(board)):
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
return board, total
def check_right(self, board):
total = 0
for x in range(len(board)):
board[x].reverse()
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
board[x].reverse()
return board, total
def check_up(self, board):
total = 0
for x in range(len(board)):
board[x].reverse()
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
board[x].reverse()
return board, total
def check_down(self, board):
total = 0
for x in range(len(board)):
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
return board, total
@commands.max_concurrency(1, per=BucketType.guild, wait=False)
@commands.command()
async def simon(self, ctx):
"""Start a game of Simon."""
await ctx.send(
"Starting game...\n**RULES:**\n```1. When you are ready for the sequence, click the green checkmark.\n2. Watch the sequence carefully, then repeat it back into chat. For example, if the 1 then the 2 changed, I would type 12.\n3. You are given 10 seconds to repeat the sequence.\n4. When waiting for confirmation for next sequence, click the green check within 5 minutes of the bot being ready.\n5. Answer as soon as you can once the bot adds the stop watch emoji.```"
)
board = [[1, 2], [3, 4]]
level = [1, 4]
points = 0
message = await ctx.send("```" + self.print_board(board) + "```")
await message.add_reaction("\u2705")
await message.add_reaction("\u274C")
await ctx.send("Click the Green Check Reaction when you are ready for the sequence.")
def check(reaction, user):
return (
(user.id == ctx.author.id)
and (str(reaction.emoji) in ["\u2705", "\u274C"])
and (reaction.message.id == message.id)
)
randoms = []
for x in range(4):
randoms.append(random.randint(1, 4))
while True:
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=300.0
)
except asyncio.TimeoutError:
await message.delete()
await ctx.send(
f"Game has ended due to no response for starting the next sequence. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
return
else:
if str(reaction.emoji) == "\u274C":
await message.delete()
await ctx.send(
f"Game has ended due to no response. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
return
await message.remove_reaction("\u2705", self.bot.user)
await message.remove_reaction("\u274C", self.bot.user)
try:
await message.remove_reaction("\u2705", ctx.author)
except discord.errors.Forbidden:
pass
await message.add_reaction("\u26A0")
for x in randoms:
await asyncio.sleep(1)
if x == 1:
board[0][0] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[0][0] = 1
elif x == 2:
board[0][1] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[0][1] = 2
elif x == 3:
board[1][0] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[1][0] = 3
elif x == 4:
board[1][1] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[1][1] = 4
await message.edit(content="```" + self.print_board(board) + "```")
await message.remove_reaction("\u26A0", self.bot.user)
answer = "".join(list(map(str, randoms)))
await message.add_reaction("\u23F1")
def check_t(m):
return (m.author.id == ctx.author.id) and (m.content.isdigit())
try:
user_answer = await self.bot.wait_for("message", check=check_t, timeout=10.0)
except asyncio.TimeoutError:
await ctx.send(
f"Sorry {ctx.author.mention}! You took too long to answer. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
await message.remove_reaction("\u23F1", self.bot.user)
return
else:
try:
await user_answer.delete()
except discord.errors.Forbidden:
pass
await message.remove_reaction("\u23F1", self.bot.user)
if str(user_answer.content) == str(answer):
await message.add_reaction("\U0001F44D")
else:
await message.add_reaction("\U0001F6AB")
await ctx.send(
f"Sorry, but that was the incorrect pattern. The pattern was {answer}. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
return
another_message = await ctx.send("Sequence was correct.")
points += 1
await asyncio.sleep(3)
await message.remove_reaction("\U0001F44D", self.bot.user)
await message.add_reaction("\u2705")
await message.add_reaction("\u274C")
await another_message.delete()
level[0] *= 0.90
randoms.append(random.randint(1, 4))
def print_board(self, board):
col_width = max(len(str(word)) for row in board for word in row) + 2 # padding
whole_thing = ""
for row in board:
whole_thing += "".join(str(word).ljust(col_width) for word in row) + "\n"
return whole_thing
def setup(bot):
bot.add_cog(games(bot))
| import discord, random, json, time, asyncio, random
from discord.ext import commands, menus
from discord.ext.commands.cooldowns import BucketType
from copy import deepcopy as dc
import async_cleverbot as ac
config = "tools/config.json"
with open(config) as f:
data = json.load(f)
cb = data['CLEVERBOT']
cleverbot = ac.Cleverbot(cb)
def rps_winner(userOneChoice, userTwoChoice):
if userOneChoice == "\U0001faa8":
if userTwoChoice == "\U00002702": return "You won!"
if userTwoChoice == "\U0001faa8": return "Tie!"
if userTwoChoice == "\U0001f4f0": return "I won!"
elif userOneChoice == "\U00002702":
if userTwoChoice == "\U00002702": return "Tie!"
if userTwoChoice == "\U0001faa8": return "I won!"
if userTwoChoice == "\U0001f4f0": return "You Won!"
elif userOneChoice == "\U0001f4f0":
if userTwoChoice == "\U00002702": return "I won!"
if userTwoChoice == "\U0001faa8": return "You won!"
if userTwoChoice == "\U0001f4f0": return "Tie!"
else: return "error"
class BasketballMenu(menus.Menu):
def __init__(self, **kwargs):
super().__init__(delete_message_after=True, **kwargs)
self.score = 0
self.footer = 'Use the reactions below to try and score'
async def send_initial_message(self, ctx, channel: discord.TextChannel):
return await channel.send(embed=discord.Embed(title='Basketball!', description='Play basketball against an AI!\n\nUse the reactions below to play:\n\n🗑️ = Take a shot\n🏀 = Pass the ball\n⛹️ = Drive to get a layup\n📟 = See the score\n❓ = See this message\n❌ = Stop the game\n\n**Note this command is a work in progress.**',color=self.ctx.bot.color))
@menus.button('🗑️')
async def do_shot(self, _):
if self.score >= 21:
await self.message.edit(embed=discord.Embed(title='You reached 21!', description="Good job. Thats it for now. Come play again later.", color=self.ctx.bot.color))
await asyncio.sleep(5)
self.stop()
else:
lucky = random.choice([True, False])
if lucky:
three = random.choice([True, False])
if three:
self.score += 3
d = 'You swished a three! Good job. +3 to your score.'
else:
d = 'You made a jump shot! +2 to your score.'
self.score += 2
else:
d = 'You missed.'
pass
embed = discord.Embed(title='Shot...', description=d, color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('🏀')
async def do_pass(self, _):
p = random.choice(['Bob', 'Joe', 'Tim', 'Jordan', 'Jokic'])
lucky = random.choice([True, False])
if lucky:
d = f'Nice give and go! You passed the ball to **{p}** and they scored a layup. +2 to your score.'
else:
d = f'You passed the ball to **{p}** but they dropped it.'
embed = discord.Embed(title='Pass...', description=d, color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('⛹️')
async def do_drive(self, _):
embed = discord.Embed(title='Drive...', color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('📟')
async def do_score(self, _):
embed = discord.Embed(title='Current Score', description=f'**{self.ctx.author.name}** - {self.score}', color=self.ctx.bot.color)
embed.set_footer(text=self.footer)
await self.message.edit(embed = embed)
@menus.button('❓')
async def do_help(self, _):
await self.message.edit(embed=discord.Embed(title='Basketball!', description='Play basketball against an AI!\n\nUse the reactions below to play:\n\n🗑️ = Take a shot\n🏀 = Pass the ball\n⛹️ = Drive to get a layup\n📟 = See the score\n❓ = See this message\n❌ = Stop the game\n\n**Note this command is a work in progress.**',color=self.ctx.bot.color))
@menus.button('❌')
async def do_end(self, _):
self.stop()
class games(commands.Cog):
'''Game Commands'''
def __init__(self, bot):
self.bot = bot
@commands.max_concurrency(1, per=BucketType.channel, wait=False)
@commands.command()
async def basketball(self, ctx):
'''Play basketball in a D&D style'''
await BasketballMenu().start(ctx)
@commands.max_concurrency(1, per=BucketType.channel, wait=False)
@commands.command(aliases=['cb'])
async def chatbot(self, ctx):
'''Talk to chatbot'''
talk = True
await ctx.send('Chatbot Started!\nType `cancel` to end.')
while talk is True:
try:
m = await self.bot.wait_for('message', timeout=30, check=lambda m:(ctx.author == m.author and ctx.channel == m.channel))
except asyncio.TimeoutError:
await ctx.send('Timeout Error')
talk = False
else:
if m.content.lower() == "cancel":
talk = False
await ctx.send('Chatbot Session Ended.')
else:
async with ctx.channel.typing():
response = await cleverbot.ask(m.content) # Ask a question, returns async_cleverbot.cleverbot.Response
await ctx.send(response.text)
@commands.command()
@commands.cooldown(1,3,BucketType.user)
async def dice(self, ctx):
'''Roll a dice'''
dice = ['1', '2', '3', '4', '5', '6', 'off the table...\n*You Found The Mystery!*']
embed = discord.Embed(title="Dice", description=f'The Dice Rolled {random.choice(dice)}', color=self.bot.color)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/758138226874908705/766312838910181421/unknown.png")
embed.set_footer(text=self.bot.footer)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.cooldown(1,3,BucketType.user)
@commands.command()
async def rps(self, ctx):
"""Rock paper scissors, either play against the bot or against a user"""
choices = ["\U0001f4f0", "\U0001faa8", "\U00002702"]
s = m = await ctx.send(embed = discord.Embed(title = f"Rock, Paper, Scissors.", description = f" {str(ctx.author)} Choose your weapon!", color=self.bot.color))
for i in choices:
await m.add_reaction(i)
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in choices
try:
reaction = await self.bot.wait_for('reaction_add', timeout = 30.0, check = check)
reaction = reaction[0].emoji
botChoice = random.choice(choices)
result = rps_winner(reaction, botChoice)
await s.edit(embed= discord.Embed(title =result , description = f"I picked {botChoice} and you picked {reaction}.", color=self.bot.color))
except asyncio.TimeoutError: return await ctx.send("You didn't add a reaction in time!")
@commands.max_concurrency(1, per=commands.BucketType.channel)
@commands.command() #aliases=["c"]
async def cookie(self, ctx):
"""
Yum yum.
"""
cookie = "🍪"
embed = discord.Embed(description=f"Fastest person to eat the {cookie} wins!", colour=self.bot.color)
message = await ctx.send(embed=embed)
await asyncio.sleep(4)
for i in reversed(range(1, 4)):
await message.edit(embed=discord.Embed(description=str(i), colour=self.bot.color))
await asyncio.sleep(1)
await asyncio.sleep(random.randint(0, 3))
await message.edit(embed=discord.Embed(description="Eat the cookie!", colour=self.bot.color))
await message.add_reaction(cookie)
start = time.perf_counter()
try:
_, user = await ctx.bot.wait_for(
"reaction_add",
check=lambda _reaction, user: _reaction.message.guild == ctx.guild
and _reaction.message.channel == ctx.message.channel
and _reaction.message == message and str(_reaction.emoji) == cookie and user != ctx.bot.user
and not user.bot,
timeout=60,)
except asyncio.TimeoutError:
return await message.edit(embed=discord.Embed(description="No one ate the cookie...",
colour=self.bot.color))
end = time.perf_counter()
await message.edit(embed=discord.Embed(description=f"**{user}** ate the cookie in `{end - start:.3f}` seconds!", colour=self.bot.color))
@commands.max_concurrency(1, per=BucketType.guild, wait=False)
@commands.command(aliases=['2048', '24'])
async def twenty(self, ctx):
"""Starts a 2048 game inside of Discord.
Join the support server to post your score!"""
board = [
["_", "_", "_", "_"],
["_", "_", "_", "_"],
["_", "_", "_", "_"],
["_", "_", "_", 2],
]
score = 0
total = 0
embed=discord.Embed(title="2048", description=f"If a reaction is not received every 2 minutes, the game will time out.\n\n```{self.print_board(board)}```", color=self.bot.color)
message = await ctx.send(embed=embed)
await message.add_reaction("\u2B06")
await message.add_reaction("\u2B07")
await message.add_reaction("\u2B05")
await message.add_reaction("\u27A1")
await message.add_reaction("\u274C")
def check(reaction, user):
return (
(user.id == ctx.author.id)
and (str(reaction.emoji) in ["\u2B06", "\u2B07", "\u2B05", "\u27A1", "\u274C"])
and (reaction.message.id == message.id)
)
while True:
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=120.0
)
except asyncio.TimeoutError:
await ctx.send(f"Ending game.\nYour score was **{score}**")
await message.delete()
return
else:
try:
await message.remove_reaction(str(reaction.emoji), ctx.author)
except discord.errors.Forbidden:
pass
if str(reaction.emoji) == "\u2B06":
msg, nb, total = self.execute_move("up", board)
elif str(reaction.emoji) == "\u2B07":
msg, nb, total = self.execute_move("down", board)
elif str(reaction.emoji) == "\u2B05":
msg, nb, total = self.execute_move("left", board)
elif str(reaction.emoji) == "\u27A1":
msg, nb, total = self.execute_move("right", board)
elif str(reaction.emoji) == "\u274C":
await ctx.send(f"Ending game.\nYour score was **{score}**")
await message.delete()
return
score += total
if msg == "Lost":
await ctx.send(
f"Oh no! It appears you have lost {ctx.author.mention}. You finished with a score of {score}!"
)
await message.delete()
return
board = nb
sem=discord.Embed(title=f"Score: **{score}**", description=f"```{self.print_board(board)}```", color=self.bot.color)
'await message.edit(content=f"Score: **{score}**```{self.print_board(board)}```")'
await message.edit(embed=sem)
def print_board(self, board):
col_width = max(len(str(word)) for row in board for word in row) + 2 # padding
whole_thing = ""
for row in board:
whole_thing += "".join(str(word).ljust(col_width) for word in row) + "\n"
return whole_thing
def execute_move(self, move, pboard):
board = dc(pboard)
total = 0
if move.lower() == "left":
nb, total = self.check_left(board)
for x in range(len(nb)):
while nb[x][0] == "_" and (nb[x][1] != "_" or nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][0] = nb[x][1]
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][1] == "_" and (nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][2] == "_" and (nb[x][3] != "_"):
nb[x][2] = nb[x][3]
nb[x][3] = "_"
if move.lower() == "right":
nb, total = self.check_right(board)
for x in range(len(nb)):
while nb[x][3] == "_" and (nb[x][2] != "_" or nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][3] = nb[x][2]
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][2] == "_" and (nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][1] == "_" and (nb[x][0] != "_"):
nb[x][1] = nb[x][0]
nb[x][0] = "_"
if move.lower() == "down":
nb = self.columize(board)
nb, total = self.check_down(nb)
for x in range(len(nb)):
while nb[x][0] == "_" and (nb[x][1] != "_" or nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][0] = nb[x][1]
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][1] == "_" and (nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][2] == "_" and (nb[x][3] != "_"):
nb[x][2] = nb[x][3]
nb[x][3] = "_"
nb = self.rowize(nb)
if move.lower() == "up":
nb = self.columize(board)
nb, total = self.check_up(nb)
for x in range(len(nb)):
while nb[x][3] == "_" and (nb[x][2] != "_" or nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][3] = nb[x][2]
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][2] == "_" and (nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][1] == "_" and (nb[x][0] != "_"):
nb[x][1] = nb[x][0]
nb[x][0] = "_"
nb = self.rowize(nb)
if (
nb != pboard
): # So the user doesn't make a move that doesn't change anything, and just add a number
some_message, nb = self.add_number(nb)
else:
some_message = ""
if some_message.startswith("Lost"):
return "Lost", nb, total
else:
return "", nb, total
def add_number(self, board):
try:
row = random.randint(0, 3)
except RecursionError:
return "Lost", board
if "_" in board[row]:
number_of_zeroes = board[row].count("_")
if number_of_zeroes == 1:
column = board[row].index("_")
else:
column = random.randint(0, 3)
while board[row][column] != "_":
column = random.randint(0, 3)
else:
result, board = self.add_number(board)
return result, board
joining = random.randint(0, 100)
if joining < 85:
joining = 2
else:
joining = 4
board[row][column] = joining
return "", board
def columize(self, board):
new_board = [[], [], [], []]
# Make first column
new_board[0].append(board[3][0])
new_board[0].append(board[2][0])
new_board[0].append(board[1][0])
new_board[0].append(board[0][0])
# Make second column
new_board[1].append(board[3][1])
new_board[1].append(board[2][1])
new_board[1].append(board[1][1])
new_board[1].append(board[0][1])
# Make third column
new_board[2].append(board[3][2])
new_board[2].append(board[2][2])
new_board[2].append(board[1][2])
new_board[2].append(board[0][2])
# Make fourth column
new_board[3].append(board[3][3])
new_board[3].append(board[2][3])
new_board[3].append(board[1][3])
new_board[3].append(board[0][3])
board = new_board
return board
def rowize(self, board):
new_board = [[], [], [], []]
# Make first row
new_board[0].append(board[0][3])
new_board[0].append(board[1][3])
new_board[0].append(board[2][3])
new_board[0].append(board[3][3])
# Make second row
new_board[1].append(board[0][2])
new_board[1].append(board[1][2])
new_board[1].append(board[2][2])
new_board[1].append(board[3][2])
# Make third row
new_board[2].append(board[0][1])
new_board[2].append(board[1][1])
new_board[2].append(board[2][1])
new_board[2].append(board[3][1])
# Make fourth row
new_board[3].append(board[0][0])
new_board[3].append(board[1][0])
new_board[3].append(board[2][0])
new_board[3].append(board[3][0])
board = new_board
return board
def check_left(self, board):
total = 0
for x in range(len(board)):
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
return board, total
def check_right(self, board):
total = 0
for x in range(len(board)):
board[x].reverse()
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
board[x].reverse()
return board, total
def check_up(self, board):
total = 0
for x in range(len(board)):
board[x].reverse()
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
board[x].reverse()
return board, total
def check_down(self, board):
total = 0
for x in range(len(board)):
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
return board, total
@commands.max_concurrency(1, per=BucketType.guild, wait=False)
@commands.command()
async def simon(self, ctx):
"""Start a game of Simon."""
await ctx.send(
"Starting game...\n**RULES:**\n```1. When you are ready for the sequence, click the green checkmark.\n2. Watch the sequence carefully, then repeat it back into chat. For example, if the 1 then the 2 changed, I would type 12.\n3. You are given 10 seconds to repeat the sequence.\n4. When waiting for confirmation for next sequence, click the green check within 5 minutes of the bot being ready.\n5. Answer as soon as you can once the bot adds the stop watch emoji.```"
)
board = [[1, 2], [3, 4]]
level = [1, 4]
points = 0
message = await ctx.send("```" + self.print_board(board) + "```")
await message.add_reaction("\u2705")
await message.add_reaction("\u274C")
await ctx.send("Click the Green Check Reaction when you are ready for the sequence.")
def check(reaction, user):
return (
(user.id == ctx.author.id)
and (str(reaction.emoji) in ["\u2705", "\u274C"])
and (reaction.message.id == message.id)
)
randoms = []
for x in range(4):
randoms.append(random.randint(1, 4))
while True:
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=300.0
)
except asyncio.TimeoutError:
await message.delete()
await ctx.send(
f"Game has ended due to no response for starting the next sequence. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
return
else:
if str(reaction.emoji) == "\u274C":
await message.delete()
await ctx.send(
f"Game has ended due to no response. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
return
await message.remove_reaction("\u2705", self.bot.user)
await message.remove_reaction("\u274C", self.bot.user)
try:
await message.remove_reaction("\u2705", ctx.author)
except discord.errors.Forbidden:
pass
await message.add_reaction("\u26A0")
for x in randoms:
await asyncio.sleep(1)
if x == 1:
board[0][0] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[0][0] = 1
elif x == 2:
board[0][1] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[0][1] = 2
elif x == 3:
board[1][0] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[1][0] = 3
elif x == 4:
board[1][1] = "-"
await message.edit(content="```" + self.print_board(board) + "```")
await asyncio.sleep(level[0])
board[1][1] = 4
await message.edit(content="```" + self.print_board(board) + "```")
await message.remove_reaction("\u26A0", self.bot.user)
answer = "".join(list(map(str, randoms)))
await message.add_reaction("\u23F1")
def check_t(m):
return (m.author.id == ctx.author.id) and (m.content.isdigit())
try:
user_answer = await self.bot.wait_for("message", check=check_t, timeout=10.0)
except asyncio.TimeoutError:
await ctx.send(
f"Sorry {ctx.author.mention}! You took too long to answer. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
await message.remove_reaction("\u23F1", self.bot.user)
return
else:
try:
await user_answer.delete()
except discord.errors.Forbidden:
pass
await message.remove_reaction("\u23F1", self.bot.user)
if str(user_answer.content) == str(answer):
await message.add_reaction("\U0001F44D")
else:
await message.add_reaction("\U0001F6AB")
await ctx.send(
f"Sorry, but that was the incorrect pattern. The pattern was {answer}. You got {points} sequence{'s' if points != 1 else ''} correct!"
)
return
another_message = await ctx.send("Sequence was correct.")
points += 1
await asyncio.sleep(3)
await message.remove_reaction("\U0001F44D", self.bot.user)
await message.add_reaction("\u2705")
await message.add_reaction("\u274C")
await another_message.delete()
level[0] *= 0.90
randoms.append(random.randint(1, 4))
def print_board(self, board):
col_width = max(len(str(word)) for row in board for word in row) + 2 # padding
whole_thing = ""
for row in board:
whole_thing += "".join(str(word).ljust(col_width) for word in row) + "\n"
return whole_thing
def setup(bot):
bot.add_cog(games(bot)) | en | 0.787434 | Game Commands Play basketball in a D&D style Talk to chatbot # Ask a question, returns async_cleverbot.cleverbot.Response Roll a dice Rock paper scissors, either play against the bot or against a user #aliases=["c"] Yum yum. Starts a 2048 game inside of Discord. Join the support server to post your score! # padding # So the user doesn't make a move that doesn't change anything, and just add a number # Make first column # Make second column # Make third column # Make fourth column # Make first row # Make second row # Make third row # Make fourth row Start a game of Simon. # padding | 2.433173 | 2 |
androcmd/plot.py | jonathansick/androcmd | 0 | 6613761 | #!/usr/bin/env python
# encoding: utf-8
"""
Plotting utilities for androcmd
"""
import string
import numpy as np
from palettable.cubehelix import perceptual_rainbow_16
from palettable.colorbrewer.diverging import RdBu_11
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.gridspec as gridspec
from astroML.plotting import scatter_contour
def contour_hess(ax, c, m, xlim, ylim,
threshold=20, levels=10, bins=100, log_counts=True,
plot_args=None, contour_args=None):
"""Plot a CMD as a contour Hess diagram in high-density regions, and
as a scatter plot in low density regions.
Parameters
----------
ax :
The matplotlib Axes instance.
c : ndarray
The colour (x) coordinates of stars
m : ndarray
The magnitude (y) coordinates of stars
"""
default_plot_args = {'ms': 2.0, 'mfc': 'k', 'mec': 'None',
'rasterized': True, 'alpha': 0.3}
if plot_args is not None:
default_plot_args.update(plot_args)
default_contour_args = {'cmap': mpl.cm.gray_r,
'linestyles': 'None',
'linewidths': 0.,
'alpha': 1.}
if contour_args is not None:
default_contour_args.append(contour_args)
scatter_contour(c, m, levels=levels, threshold=threshold,
log_counts=log_counts,
histogram2d_args={'bins': bins,
'range': [[min(xlim), max(xlim)],
[min(ylim), max(ylim)]]},
plot_args=default_plot_args,
contour_args=default_contour_args,
ax=ax)
def plot_fit_grid(pipeline, dataset, fit_keys, plane_keys, plot_path,
ysize=3.5):
n_y = len(fit_keys) + 1
height_ratios = [0.1] + [1] * len(fit_keys)
if len(fit_keys) > 1:
multi_panel = True
else:
multi_panel = False
fig = Figure(figsize=(7, ysize), frameon=False, dpi=300)
canvas = FigureCanvas(fig)
gs = gridspec.GridSpec(n_y, 4, wspace=0.15, hspace=0.2,
left=0.08, bottom=0.15, right=0.95,
width_ratios=(1, 1, 1, 1),
height_ratios=height_ratios)
for i, (fit_key, plane_key) in enumerate(zip(fit_keys, plane_keys)):
if i == n_y - 2:
last = True
else:
last = False
_plot_plane(pipeline, dataset, fit_key, plane_key, i, fig, gs,
last=last, multi_panel=multi_panel)
gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)
canvas.print_figure(plot_path + ".pdf", format="pdf")
def _plot_plane(pipeline, dataset, fit_key, plane_key, i, fig, gs,
last=False, multi_panel=True):
obs_hess = pipeline.make_obs_hess(dataset, plane_key)
fit_hess = pipeline.make_fit_hess(fit_key, plane_key)
sigma = np.sqrt(obs_hess.hess)
chi = ((obs_hess.hess - fit_hess.hess) / sigma) ** 2.
diff = obs_hess.hess - fit_hess.hess
ax_obs = fig.add_subplot(gs[i + 1, 0])
ax_model = fig.add_subplot(gs[i + 1, 1])
ax_chi = fig.add_subplot(gs[i + 1, 2])
ax_diff = fig.add_subplot(gs[i + 1, 3])
cube_map = perceptual_rainbow_16.mpl_colormap
div_map = RdBu_11.mpl_colormap
fit_map = pipeline.plot_fit_hess(ax_model, fit_key, plane_key,
imshow=dict(vmin=0, vmax=3.,
cmap=cube_map))
ax_model.yaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_model.set_ylabel('')
obs_map = pipeline.plot_obs_hess(ax_obs, dataset, plane_key,
imshow=dict(vmin=0, vmax=3.,
cmap=cube_map))
chi_map = pipeline.plot_hess_array(ax_chi, chi, plane_key, log=False,
imshow=dict(vmax=20, cmap=cube_map))
ax_chi.yaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_chi.set_ylabel('')
diff_map = pipeline.plot_hess_array(ax_diff, diff, plane_key, log=False,
imshow=dict(vmin=-50, vmax=50,
cmap=div_map))
ax_diff.yaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_diff.set_ylabel('')
if not last:
ax_diff.set_xlabel('')
ax_chi.set_xlabel('')
ax_model.set_xlabel('')
ax_obs.set_xlabel('')
ax_obs.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_model.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_chi.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_diff.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
if i == 0: # colorbar for first row only
ax_obs_cb = fig.add_subplot(gs[0, 0])
ax_model_cb = fig.add_subplot(gs[0, 1])
ax_chi_cb = fig.add_subplot(gs[0, 2])
ax_diff_cb = fig.add_subplot(gs[0, 3])
obs_cb = fig.colorbar(obs_map, cax=ax_obs_cb, orientation='horizontal')
obs_cb.set_label(r"$\log(N_*)$ Obs.", size=9)
obs_cb.ax.xaxis.set_ticks_position('top')
obs_cb.locator = mpl.ticker.MultipleLocator(1.0)
for tl in obs_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
obs_cb.update_ticks()
fit_cb = fig.colorbar(fit_map, cax=ax_model_cb,
orientation='horizontal')
fit_cb.set_label(r"$\log(N_*)$ Model", size=9)
fit_cb.ax.xaxis.set_ticks_position('top')
fit_cb.locator = mpl.ticker.MultipleLocator(1.0)
for tl in fit_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
fit_cb.update_ticks()
chi_cb = fig.colorbar(chi_map, cax=ax_chi_cb, orientation='horizontal')
chi_cb.set_label(r"$\chi^2$", size=9)
chi_cb.ax.xaxis.set_ticks_position('top')
chi_cb.locator = mpl.ticker.MultipleLocator(5)
for tl in chi_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
chi_cb.update_ticks()
diff_cb = fig.colorbar(diff_map, cax=ax_diff_cb,
orientation='horizontal')
diff_cb.set_label(r"$\Delta_\mathrm{obs-model}$ ($N_*$)", size=9)
diff_cb.ax.xaxis.set_ticks_position('top')
diff_cb.locator = mpl.ticker.MultipleLocator(20)
for tl in diff_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
diff_cb.update_ticks()
if multi_panel:
# more than one row; add subfig annotations
alphanum = dict(zip(range(1, 27), string.ascii_lowercase))
alpha = alphanum[i + 1]
txt = '({0})'.format(alpha)
ax_obs.text(-0.38, 1.0, txt,
transform=ax_obs.transAxes,
ha='left',
va='top',
size=11)
| #!/usr/bin/env python
# encoding: utf-8
"""
Plotting utilities for androcmd
"""
import string
import numpy as np
from palettable.cubehelix import perceptual_rainbow_16
from palettable.colorbrewer.diverging import RdBu_11
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.gridspec as gridspec
from astroML.plotting import scatter_contour
def contour_hess(ax, c, m, xlim, ylim,
threshold=20, levels=10, bins=100, log_counts=True,
plot_args=None, contour_args=None):
"""Plot a CMD as a contour Hess diagram in high-density regions, and
as a scatter plot in low density regions.
Parameters
----------
ax :
The matplotlib Axes instance.
c : ndarray
The colour (x) coordinates of stars
m : ndarray
The magnitude (y) coordinates of stars
"""
default_plot_args = {'ms': 2.0, 'mfc': 'k', 'mec': 'None',
'rasterized': True, 'alpha': 0.3}
if plot_args is not None:
default_plot_args.update(plot_args)
default_contour_args = {'cmap': mpl.cm.gray_r,
'linestyles': 'None',
'linewidths': 0.,
'alpha': 1.}
if contour_args is not None:
default_contour_args.append(contour_args)
scatter_contour(c, m, levels=levels, threshold=threshold,
log_counts=log_counts,
histogram2d_args={'bins': bins,
'range': [[min(xlim), max(xlim)],
[min(ylim), max(ylim)]]},
plot_args=default_plot_args,
contour_args=default_contour_args,
ax=ax)
def plot_fit_grid(pipeline, dataset, fit_keys, plane_keys, plot_path,
ysize=3.5):
n_y = len(fit_keys) + 1
height_ratios = [0.1] + [1] * len(fit_keys)
if len(fit_keys) > 1:
multi_panel = True
else:
multi_panel = False
fig = Figure(figsize=(7, ysize), frameon=False, dpi=300)
canvas = FigureCanvas(fig)
gs = gridspec.GridSpec(n_y, 4, wspace=0.15, hspace=0.2,
left=0.08, bottom=0.15, right=0.95,
width_ratios=(1, 1, 1, 1),
height_ratios=height_ratios)
for i, (fit_key, plane_key) in enumerate(zip(fit_keys, plane_keys)):
if i == n_y - 2:
last = True
else:
last = False
_plot_plane(pipeline, dataset, fit_key, plane_key, i, fig, gs,
last=last, multi_panel=multi_panel)
gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)
canvas.print_figure(plot_path + ".pdf", format="pdf")
def _plot_plane(pipeline, dataset, fit_key, plane_key, i, fig, gs,
last=False, multi_panel=True):
obs_hess = pipeline.make_obs_hess(dataset, plane_key)
fit_hess = pipeline.make_fit_hess(fit_key, plane_key)
sigma = np.sqrt(obs_hess.hess)
chi = ((obs_hess.hess - fit_hess.hess) / sigma) ** 2.
diff = obs_hess.hess - fit_hess.hess
ax_obs = fig.add_subplot(gs[i + 1, 0])
ax_model = fig.add_subplot(gs[i + 1, 1])
ax_chi = fig.add_subplot(gs[i + 1, 2])
ax_diff = fig.add_subplot(gs[i + 1, 3])
cube_map = perceptual_rainbow_16.mpl_colormap
div_map = RdBu_11.mpl_colormap
fit_map = pipeline.plot_fit_hess(ax_model, fit_key, plane_key,
imshow=dict(vmin=0, vmax=3.,
cmap=cube_map))
ax_model.yaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_model.set_ylabel('')
obs_map = pipeline.plot_obs_hess(ax_obs, dataset, plane_key,
imshow=dict(vmin=0, vmax=3.,
cmap=cube_map))
chi_map = pipeline.plot_hess_array(ax_chi, chi, plane_key, log=False,
imshow=dict(vmax=20, cmap=cube_map))
ax_chi.yaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_chi.set_ylabel('')
diff_map = pipeline.plot_hess_array(ax_diff, diff, plane_key, log=False,
imshow=dict(vmin=-50, vmax=50,
cmap=div_map))
ax_diff.yaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_diff.set_ylabel('')
if not last:
ax_diff.set_xlabel('')
ax_chi.set_xlabel('')
ax_model.set_xlabel('')
ax_obs.set_xlabel('')
ax_obs.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_model.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_chi.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
ax_diff.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
if i == 0: # colorbar for first row only
ax_obs_cb = fig.add_subplot(gs[0, 0])
ax_model_cb = fig.add_subplot(gs[0, 1])
ax_chi_cb = fig.add_subplot(gs[0, 2])
ax_diff_cb = fig.add_subplot(gs[0, 3])
obs_cb = fig.colorbar(obs_map, cax=ax_obs_cb, orientation='horizontal')
obs_cb.set_label(r"$\log(N_*)$ Obs.", size=9)
obs_cb.ax.xaxis.set_ticks_position('top')
obs_cb.locator = mpl.ticker.MultipleLocator(1.0)
for tl in obs_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
obs_cb.update_ticks()
fit_cb = fig.colorbar(fit_map, cax=ax_model_cb,
orientation='horizontal')
fit_cb.set_label(r"$\log(N_*)$ Model", size=9)
fit_cb.ax.xaxis.set_ticks_position('top')
fit_cb.locator = mpl.ticker.MultipleLocator(1.0)
for tl in fit_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
fit_cb.update_ticks()
chi_cb = fig.colorbar(chi_map, cax=ax_chi_cb, orientation='horizontal')
chi_cb.set_label(r"$\chi^2$", size=9)
chi_cb.ax.xaxis.set_ticks_position('top')
chi_cb.locator = mpl.ticker.MultipleLocator(5)
for tl in chi_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
chi_cb.update_ticks()
diff_cb = fig.colorbar(diff_map, cax=ax_diff_cb,
orientation='horizontal')
diff_cb.set_label(r"$\Delta_\mathrm{obs-model}$ ($N_*$)", size=9)
diff_cb.ax.xaxis.set_ticks_position('top')
diff_cb.locator = mpl.ticker.MultipleLocator(20)
for tl in diff_cb.ax.get_xmajorticklabels():
tl.set_size(8.)
diff_cb.update_ticks()
if multi_panel:
# more than one row; add subfig annotations
alphanum = dict(zip(range(1, 27), string.ascii_lowercase))
alpha = alphanum[i + 1]
txt = '({0})'.format(alpha)
ax_obs.text(-0.38, 1.0, txt,
transform=ax_obs.transAxes,
ha='left',
va='top',
size=11)
| en | 0.685413 | #!/usr/bin/env python # encoding: utf-8 Plotting utilities for androcmd Plot a CMD as a contour Hess diagram in high-density regions, and as a scatter plot in low density regions. Parameters ---------- ax : The matplotlib Axes instance. c : ndarray The colour (x) coordinates of stars m : ndarray The magnitude (y) coordinates of stars # colorbar for first row only # more than one row; add subfig annotations | 2.404042 | 2 |
tark/entities/model_setup.py | TreeboHotels/Tark | 0 | 6613762 | <reponame>TreeboHotels/Tark<gh_stars>0
from tark.entities.constants import DatabaseType
from tark.entities.models.policy import Policy, PolicyArchive
from tark.entities.models.policy_group import PolicyGroup
from tark.entities.models.rule_equation import RuleEquation, RuleEquationToRuleMap
from tark.entities.models.action import Action
from tark.entities.models.rule_variable import RuleVariable
from tark.entities.models.rule import Rule
models_list = [Policy, Rule, RuleVariable, RuleEquation, RuleEquationToRuleMap, Action, PolicyGroup, PolicyArchive]
def init_database(db_settings, app_name="default"):
"""
Initialze the database
:param db_settings:
:param app_name
:return:
"""
db_handler = DatabaseType.db_mapping.get(db_settings.db_type)
if not db_handler:
raise KeyError("Invalid db type: {}".format(db_settings.db_type))
db = db_handler(db_settings.db_name, **db_settings.db_configuration)
for model in models_list:
model._meta.database = db
model._meta.db_table = app_name.lower() + "_tark_" + model.__name__.lower()
db.connect()
db.create_tables(models_list, safe=True)
return db
| from tark.entities.constants import DatabaseType
from tark.entities.models.policy import Policy, PolicyArchive
from tark.entities.models.policy_group import PolicyGroup
from tark.entities.models.rule_equation import RuleEquation, RuleEquationToRuleMap
from tark.entities.models.action import Action
from tark.entities.models.rule_variable import RuleVariable
from tark.entities.models.rule import Rule
models_list = [Policy, Rule, RuleVariable, RuleEquation, RuleEquationToRuleMap, Action, PolicyGroup, PolicyArchive]
def init_database(db_settings, app_name="default"):
"""
Initialze the database
:param db_settings:
:param app_name
:return:
"""
db_handler = DatabaseType.db_mapping.get(db_settings.db_type)
if not db_handler:
raise KeyError("Invalid db type: {}".format(db_settings.db_type))
db = db_handler(db_settings.db_name, **db_settings.db_configuration)
for model in models_list:
model._meta.database = db
model._meta.db_table = app_name.lower() + "_tark_" + model.__name__.lower()
db.connect()
db.create_tables(models_list, safe=True)
return db | en | 0.48709 | Initialze the database :param db_settings: :param app_name :return: | 1.98086 | 2 |
examples/pytorch/pointcloud/pointnet/pointnet2_partseg.py | ketyi/dgl | 9,516 | 6613763 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from pointnet2 import SAModule, SAMSGModule, PointNet2FP
class PointNet2SSGPartSeg(nn.Module):
def __init__(self, output_classes, batch_size, input_dims=6):
super(PointNet2SSGPartSeg, self).__init__()
#if normal_channel == true, input_dims = 6+3
self.input_dims = input_dims
self.sa_module1 = SAModule(512, batch_size, 0.2, [input_dims, 64, 64, 128], n_neighbor=32)
self.sa_module2 = SAModule(128, batch_size, 0.4, [128 + 3, 128, 128, 256])
self.sa_module3 = SAModule(None, batch_size, None, [256 + 3, 256, 512, 1024],
group_all=True)
self.fp3 = PointNet2FP(1280, [256, 256])
self.fp2 = PointNet2FP(384, [256, 128])
# if normal_channel == true, 128+16+6+3
self.fp1 = PointNet2FP(128+16+6, [128, 128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, output_classes, 1)
def forward(self, x, cat_vec=None):
if x.shape[-1] > 3:
l0_pos = x[:, :, :3]
l0_feat = x
else:
l0_pos = x
l0_feat = x
# Set Abstraction layers
l1_pos, l1_feat = self.sa_module1(l0_pos, l0_feat) # l1_feat: [B, N, D]
l2_pos, l2_feat = self.sa_module2(l1_pos, l1_feat)
l3_pos, l3_feat = self.sa_module3(l2_pos, l2_feat) # [B, N, C], [B, D]
# Feature Propagation layers
l2_feat = self.fp3(l2_pos, l3_pos, l2_feat, l3_feat.unsqueeze(1)) # l2_feat: [B, D, N]
l1_feat = self.fp2(l1_pos, l2_pos, l1_feat, l2_feat.permute(0, 2, 1))
l0_feat = torch.cat([cat_vec.permute(0, 2, 1), l0_pos, l0_feat], 2)
l0_feat = self.fp1(l0_pos, l1_pos, l0_feat, l1_feat.permute(0, 2, 1))
# FC layers
feat = F.relu(self.bn1(self.conv1(l0_feat)))
out = self.drop1(feat)
out = self.conv2(out) # [B, output_classes, N]
return out
class PointNet2MSGPartSeg(nn.Module):
def __init__(self, output_classes, batch_size, input_dims=6):
super(PointNet2MSGPartSeg, self).__init__()
self.sa_msg_module1 = SAMSGModule(512, batch_size, [0.1, 0.2, 0.4], [32, 64, 128],
[[input_dims, 32, 32, 64], [input_dims, 64, 64, 128],
[input_dims, 64, 96, 128]])
self.sa_msg_module2 = SAMSGModule(128, batch_size, [0.4, 0.8], [64, 128],
[[128+128+64 +3, 128, 128, 256], [128+128+64 +3, 128, 196, 256]])
self.sa_module3 = SAModule(None, batch_size, None, [512 + 3, 256, 512, 1024],
group_all=True)
self.fp3 = PointNet2FP(1536, [256, 256])
self.fp2 = PointNet2FP(576, [256, 128])
# if normal_channel == true, 150 + 3
self.fp1 = PointNet2FP(150, [128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, output_classes, 1)
def forward(self, x, cat_vec=None):
if x.shape[-1] > 3:
l0_pos = x[:, :, :3]
l0_feat = x
else:
l0_pos = x
l0_feat = x
# Set Abstraction layers
l1_pos, l1_feat = self.sa_msg_module1(l0_pos, l0_feat)
l2_pos, l2_feat = self.sa_msg_module2(l1_pos, l1_feat)
l3_pos, l3_feat = self.sa_module3(l2_pos, l2_feat)
# Feature Propagation layers
l2_feat = self.fp3(l2_pos, l3_pos, l2_feat, l3_feat.unsqueeze(1))
l1_feat = self.fp2(l1_pos, l2_pos, l1_feat, l2_feat.permute(0, 2, 1))
l0_feat = torch.cat([cat_vec.permute(0, 2, 1), l0_pos, l0_feat], 2)
l0_feat = self.fp1(l0_pos, l1_pos, l0_feat, l1_feat.permute(0, 2, 1))
# FC layers
feat = F.relu(self.bn1(self.conv1(l0_feat)))
out = self.drop1(feat)
out = self.conv2(out)
return out
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from pointnet2 import SAModule, SAMSGModule, PointNet2FP
class PointNet2SSGPartSeg(nn.Module):
def __init__(self, output_classes, batch_size, input_dims=6):
super(PointNet2SSGPartSeg, self).__init__()
#if normal_channel == true, input_dims = 6+3
self.input_dims = input_dims
self.sa_module1 = SAModule(512, batch_size, 0.2, [input_dims, 64, 64, 128], n_neighbor=32)
self.sa_module2 = SAModule(128, batch_size, 0.4, [128 + 3, 128, 128, 256])
self.sa_module3 = SAModule(None, batch_size, None, [256 + 3, 256, 512, 1024],
group_all=True)
self.fp3 = PointNet2FP(1280, [256, 256])
self.fp2 = PointNet2FP(384, [256, 128])
# if normal_channel == true, 128+16+6+3
self.fp1 = PointNet2FP(128+16+6, [128, 128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, output_classes, 1)
def forward(self, x, cat_vec=None):
if x.shape[-1] > 3:
l0_pos = x[:, :, :3]
l0_feat = x
else:
l0_pos = x
l0_feat = x
# Set Abstraction layers
l1_pos, l1_feat = self.sa_module1(l0_pos, l0_feat) # l1_feat: [B, N, D]
l2_pos, l2_feat = self.sa_module2(l1_pos, l1_feat)
l3_pos, l3_feat = self.sa_module3(l2_pos, l2_feat) # [B, N, C], [B, D]
# Feature Propagation layers
l2_feat = self.fp3(l2_pos, l3_pos, l2_feat, l3_feat.unsqueeze(1)) # l2_feat: [B, D, N]
l1_feat = self.fp2(l1_pos, l2_pos, l1_feat, l2_feat.permute(0, 2, 1))
l0_feat = torch.cat([cat_vec.permute(0, 2, 1), l0_pos, l0_feat], 2)
l0_feat = self.fp1(l0_pos, l1_pos, l0_feat, l1_feat.permute(0, 2, 1))
# FC layers
feat = F.relu(self.bn1(self.conv1(l0_feat)))
out = self.drop1(feat)
out = self.conv2(out) # [B, output_classes, N]
return out
class PointNet2MSGPartSeg(nn.Module):
def __init__(self, output_classes, batch_size, input_dims=6):
super(PointNet2MSGPartSeg, self).__init__()
self.sa_msg_module1 = SAMSGModule(512, batch_size, [0.1, 0.2, 0.4], [32, 64, 128],
[[input_dims, 32, 32, 64], [input_dims, 64, 64, 128],
[input_dims, 64, 96, 128]])
self.sa_msg_module2 = SAMSGModule(128, batch_size, [0.4, 0.8], [64, 128],
[[128+128+64 +3, 128, 128, 256], [128+128+64 +3, 128, 196, 256]])
self.sa_module3 = SAModule(None, batch_size, None, [512 + 3, 256, 512, 1024],
group_all=True)
self.fp3 = PointNet2FP(1536, [256, 256])
self.fp2 = PointNet2FP(576, [256, 128])
# if normal_channel == true, 150 + 3
self.fp1 = PointNet2FP(150, [128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, output_classes, 1)
def forward(self, x, cat_vec=None):
if x.shape[-1] > 3:
l0_pos = x[:, :, :3]
l0_feat = x
else:
l0_pos = x
l0_feat = x
# Set Abstraction layers
l1_pos, l1_feat = self.sa_msg_module1(l0_pos, l0_feat)
l2_pos, l2_feat = self.sa_msg_module2(l1_pos, l1_feat)
l3_pos, l3_feat = self.sa_module3(l2_pos, l2_feat)
# Feature Propagation layers
l2_feat = self.fp3(l2_pos, l3_pos, l2_feat, l3_feat.unsqueeze(1))
l1_feat = self.fp2(l1_pos, l2_pos, l1_feat, l2_feat.permute(0, 2, 1))
l0_feat = torch.cat([cat_vec.permute(0, 2, 1), l0_pos, l0_feat], 2)
l0_feat = self.fp1(l0_pos, l1_pos, l0_feat, l1_feat.permute(0, 2, 1))
# FC layers
feat = F.relu(self.bn1(self.conv1(l0_feat)))
out = self.drop1(feat)
out = self.conv2(out)
return out
| en | 0.51362 | #if normal_channel == true, input_dims = 6+3 # if normal_channel == true, 128+16+6+3 # Set Abstraction layers # l1_feat: [B, N, D] # [B, N, C], [B, D] # Feature Propagation layers # l2_feat: [B, D, N] # FC layers # [B, output_classes, N] # if normal_channel == true, 150 + 3 # Set Abstraction layers # Feature Propagation layers # FC layers | 2.100003 | 2 |
tests/conftest.py | mjt91/pufo-twitter-bot | 0 | 6613764 | <filename>tests/conftest.py<gh_stars>0
"""Package-wide test fixtures."""
import os
from typing import Any
from unittest.mock import Mock
import pytest
import tweepy # type: ignore
from _pytest.config import Config
from pytest_mock import MockFixture
def pytest_configure(config: Config) -> None:
"""Pytest configuration hook."""
config.addinivalue_line("markers", "e2e: mark as end-to-end test.")
@pytest.fixture
def mock_requests_get(mocker: MockFixture) -> Mock:
"""Fixture for mocking requests.get."""
mock = mocker.patch("requests.get")
mock.return_value.__enter__.return_value.json.return_value = [
{
"firstname": "Peter",
"lastname": "Lorem",
},
{
"firstname": "Lisa",
"lastname": "Ipsum",
},
]
return mock
@pytest.fixture
def mock_randomnames_random_authors(mocker: MockFixture) -> Mock:
"""Fixture for mocking authors.randomname.random_authors."""
return mocker.patch("pufo_twitter_bot.authors.randomnames.random_authors")
@pytest.fixture
def mock_buchtitelgenerator(mocker: MockFixture) -> Mock:
"""Fixture for mocking books.randombuch.buchtitelgenerator."""
mock = mocker.patch(
# return a list of 5 books to avoid calling online api
"pufo_twitter_bot.books.randombuch.buchtitelgenerator",
return_value=["Foo", "Bar", "FooFoo", "BarBar", "FooBar"],
)
return mock
@pytest.fixture
def mock_tweepy_api(mocker: MockFixture) -> Mock:
"""Fixture for mocking tweepy.api object."""
return mocker.patch.object(tweepy, "API", autospec=True)
@pytest.fixture
def mock_environ_variables(mocker: MockFixture) -> Any:
"""Fixture for mocking the environment variables for twitter api."""
return mocker.patch.dict(
os.environ,
{
"CONSUMER_KEY": "consumer_test_key",
"CONSUMER_SECRET": "consumer_test_secret_Key",
"ACCESS_TOKEN": "access_test_token",
"ACCESS_TOKEN_SECRET": "access_test_token_secret",
},
)
| <filename>tests/conftest.py<gh_stars>0
"""Package-wide test fixtures."""
import os
from typing import Any
from unittest.mock import Mock
import pytest
import tweepy # type: ignore
from _pytest.config import Config
from pytest_mock import MockFixture
def pytest_configure(config: Config) -> None:
"""Pytest configuration hook."""
config.addinivalue_line("markers", "e2e: mark as end-to-end test.")
@pytest.fixture
def mock_requests_get(mocker: MockFixture) -> Mock:
"""Fixture for mocking requests.get."""
mock = mocker.patch("requests.get")
mock.return_value.__enter__.return_value.json.return_value = [
{
"firstname": "Peter",
"lastname": "Lorem",
},
{
"firstname": "Lisa",
"lastname": "Ipsum",
},
]
return mock
@pytest.fixture
def mock_randomnames_random_authors(mocker: MockFixture) -> Mock:
"""Fixture for mocking authors.randomname.random_authors."""
return mocker.patch("pufo_twitter_bot.authors.randomnames.random_authors")
@pytest.fixture
def mock_buchtitelgenerator(mocker: MockFixture) -> Mock:
"""Fixture for mocking books.randombuch.buchtitelgenerator."""
mock = mocker.patch(
# return a list of 5 books to avoid calling online api
"pufo_twitter_bot.books.randombuch.buchtitelgenerator",
return_value=["Foo", "Bar", "FooFoo", "BarBar", "FooBar"],
)
return mock
@pytest.fixture
def mock_tweepy_api(mocker: MockFixture) -> Mock:
"""Fixture for mocking tweepy.api object."""
return mocker.patch.object(tweepy, "API", autospec=True)
@pytest.fixture
def mock_environ_variables(mocker: MockFixture) -> Any:
"""Fixture for mocking the environment variables for twitter api."""
return mocker.patch.dict(
os.environ,
{
"CONSUMER_KEY": "consumer_test_key",
"CONSUMER_SECRET": "consumer_test_secret_Key",
"ACCESS_TOKEN": "access_test_token",
"ACCESS_TOKEN_SECRET": "access_test_token_secret",
},
)
| en | 0.533721 | Package-wide test fixtures. # type: ignore Pytest configuration hook. Fixture for mocking requests.get. Fixture for mocking authors.randomname.random_authors. Fixture for mocking books.randombuch.buchtitelgenerator. # return a list of 5 books to avoid calling online api Fixture for mocking tweepy.api object. Fixture for mocking the environment variables for twitter api. | 2.208524 | 2 |
main.py | gowtham758550/credit-card-validator | 8 | 6613765 | <reponame>gowtham758550/credit-card-validator<filename>main.py<gh_stars>1-10
#author = <<EMAIL>>
#Check a credit card is valid or not
#Luhn Algorithm - With this algorithm credit, debit card numbera are generated
#Make use of that algorithm logic to validate card numbers
#Many websites gives free card numbers based on this algorithm
#Some of indentification number issued by the government follow this algorithm
#If u found any bug in this program or logical error
#please make pull request or an issue
#import requires os module
import os
#function to clear screen
def clear_screen():
x = input("\n\nPress enter to continue")
os.system('clear' if os.name == 'posix' else 'cls')
#Function to check a input is a valid card number
def check(card,length):
sum = 0
list = [int(x) for x in card]
second = False
for i in list:
if second == True:
i = i*2
#to sum the digits if it is more than 9
sum += i//10
sum += i%10
#to make the above if condition true
second = not(second)
if sum%10 == 0:
return True
else:
return False
while(True):
print(" ____ _ _ _ _ _ ")
print(" / ___|__ _ _ __ __| | __ ____ _| (_) __| | __ _| |_ ___ _ __ ")
print("| | / _` | '__/ _` | \ \ / / _` | | |/ _` |/ _` | __/ _ \| '__|")
print("| |__| (_| | | | (_| | \ V / (_| | | | (_| | (_| | || (_) | | ")
print(" \____\__,_|_| \__,_| \_/ \__,_|_|_|\__,_|\__,_|\__\___/|_| ")
print("\n\nEnter the credit card number : ",end = '')
while(True):
try:
card = int(input())
break
except:
print("\n\nEnter the credit card number : ",end = '')
card = str(card)
card = card[::-1]
length = len(card)
condition1 = check(card,length)
condition2 = True if length == 16 else False
#check is the card is issued by airlines
if card.endswith('1' or '2') and condition1:
print("\n\nIt is a valid card\nCard Issued by Airlines")
#check if the card is issued by american express
elif card.endswith("3") and length == 15 and condition1:
print("\n\nIt is a valid card\nFor travel and entertainment\nIssued by American Express")
#check if the card is issued by visa
elif card.endswith("4") and condition1 and condition2:
print("\n\nIt is a valid card\nBank card\nIssued by Visa Debit and Credit cards")
#check if the card is issued by mastercards
elif card.endswith("5") and condition1 and condition2:
print("\n\nIt is a valid card\nBank card\nIssued by Mastercards")
#check if the card is issued by Discover
elif card.endswith("6") and condition1 and condition2:
print("\n\nIt is a valid card\nBank card\nIssued by Discover credit cards")
#check for the number satisfied luhn algorithm
elif condition1:
print("\n\nYour number follows a luhns algorithm but not a credit or debit card. It may be any other number following luhns algorithm.")
#If all the above condition is false then it is not a valid number generated using luhn algorithm
else:
print("\n\nIt is not a valid card number\nOr you may enter a wrong number")
print("\n\nWant to check another (y/n) : ",end = '')
if input().lower() == 'y':
clear_screen()
pass
else:
break
| #author = <<EMAIL>>
#Check a credit card is valid or not
#Luhn Algorithm - With this algorithm credit, debit card numbera are generated
#Make use of that algorithm logic to validate card numbers
#Many websites gives free card numbers based on this algorithm
#Some of indentification number issued by the government follow this algorithm
#If u found any bug in this program or logical error
#please make pull request or an issue
#import requires os module
import os
#function to clear screen
def clear_screen():
x = input("\n\nPress enter to continue")
os.system('clear' if os.name == 'posix' else 'cls')
#Function to check a input is a valid card number
def check(card,length):
sum = 0
list = [int(x) for x in card]
second = False
for i in list:
if second == True:
i = i*2
#to sum the digits if it is more than 9
sum += i//10
sum += i%10
#to make the above if condition true
second = not(second)
if sum%10 == 0:
return True
else:
return False
while(True):
print(" ____ _ _ _ _ _ ")
print(" / ___|__ _ _ __ __| | __ ____ _| (_) __| | __ _| |_ ___ _ __ ")
print("| | / _` | '__/ _` | \ \ / / _` | | |/ _` |/ _` | __/ _ \| '__|")
print("| |__| (_| | | | (_| | \ V / (_| | | | (_| | (_| | || (_) | | ")
print(" \____\__,_|_| \__,_| \_/ \__,_|_|_|\__,_|\__,_|\__\___/|_| ")
print("\n\nEnter the credit card number : ",end = '')
while(True):
try:
card = int(input())
break
except:
print("\n\nEnter the credit card number : ",end = '')
card = str(card)
card = card[::-1]
length = len(card)
condition1 = check(card,length)
condition2 = True if length == 16 else False
#check is the card is issued by airlines
if card.endswith('1' or '2') and condition1:
print("\n\nIt is a valid card\nCard Issued by Airlines")
#check if the card is issued by american express
elif card.endswith("3") and length == 15 and condition1:
print("\n\nIt is a valid card\nFor travel and entertainment\nIssued by American Express")
#check if the card is issued by visa
elif card.endswith("4") and condition1 and condition2:
print("\n\nIt is a valid card\nBank card\nIssued by Visa Debit and Credit cards")
#check if the card is issued by mastercards
elif card.endswith("5") and condition1 and condition2:
print("\n\nIt is a valid card\nBank card\nIssued by Mastercards")
#check if the card is issued by Discover
elif card.endswith("6") and condition1 and condition2:
print("\n\nIt is a valid card\nBank card\nIssued by Discover credit cards")
#check for the number satisfied luhn algorithm
elif condition1:
print("\n\nYour number follows a luhns algorithm but not a credit or debit card. It may be any other number following luhns algorithm.")
#If all the above condition is false then it is not a valid number generated using luhn algorithm
else:
print("\n\nIt is not a valid card number\nOr you may enter a wrong number")
print("\n\nWant to check another (y/n) : ",end = '')
if input().lower() == 'y':
clear_screen()
pass
else:
break | en | 0.896663 | #author = <<EMAIL>> #Check a credit card is valid or not #Luhn Algorithm - With this algorithm credit, debit card numbera are generated #Make use of that algorithm logic to validate card numbers #Many websites gives free card numbers based on this algorithm #Some of indentification number issued by the government follow this algorithm #If u found any bug in this program or logical error #please make pull request or an issue #import requires os module #function to clear screen #Function to check a input is a valid card number #to sum the digits if it is more than 9 #to make the above if condition true #check is the card is issued by airlines #check if the card is issued by american express #check if the card is issued by visa #check if the card is issued by mastercards #check if the card is issued by Discover #check for the number satisfied luhn algorithm #If all the above condition is false then it is not a valid number generated using luhn algorithm | 4.147654 | 4 |
14_Sort/Step02/wowo0709.py | StudyForCoding/BEAKJOON | 0 | 6613766 | import sys
N = int(input())
num_list = []
for i in range(N):
num_list.append(int(sys.stdin.readline()))
for i in sorted(num_list):
sys.stdout.write(str(i)+'\n') | import sys
N = int(input())
num_list = []
for i in range(N):
num_list.append(int(sys.stdin.readline()))
for i in sorted(num_list):
sys.stdout.write(str(i)+'\n') | none | 1 | 3.203408 | 3 | |
src/harness/testcases/cu_pass/dpa_calculator/features/steps/dpa_neighborhood/aggregate_interference/maximum_azimuth.py | NSF-Swift/Spectrum-Access-System | 0 | 6613767 | <filename>src/harness/testcases/cu_pass/dpa_calculator/features/steps/dpa_neighborhood/aggregate_interference/maximum_azimuth.py
from dataclasses import dataclass
from math import isclose
from typing import List
from behave import *
from cu_pass.dpa_calculator.aggregate_interference_calculator.aggregate_interference_calculator_ntia.helpers.interference_at_azimuth_with_maximum_gain_calculator import \
InterferenceAtAzimuthWithMaximumGainCalculator
from cu_pass.dpa_calculator.aggregate_interference_calculator.aggregate_interference_calculator_ntia.helpers.cbsd_interference_calculator.variables import \
GainAtAzimuth, InterferenceComponents
from reference_models.interference.interference import dbToLinear, linearToDb
from testcases.cu_pass.dpa_calculator.features.environment.hooks import ContextSas
MILLIWATTS_PER_WATT_DB = 30
use_step_matcher("parse")
@dataclass
class ContextAggregateInterference(ContextSas):
interference_components: List[InterferenceComponents]
@given("CBSDs at distances {distances:NumberList} each with gains {gains:NumberListList} at azimuths {azimuths:NumberList}")
def step_impl(context: ContextAggregateInterference, distances: List[float], gains: List[List[float]], azimuths: List[float]):
"""
Args:
context (behave.runner.Context):
"""
context.interference_components = [
InterferenceComponents(
distance_in_kilometers=distance,
eirp=0,
frequency_dependent_rejection=0,
gain_receiver={azimuth: GainAtAzimuth(azimuth=azimuth, gain=gain) for azimuth, gain in
zip(azimuths, gains[cbsd_number])},
loss_building=0,
loss_clutter=0,
loss_propagation=0,
loss_receiver=0,
loss_transmitter=0
)
for cbsd_number, distance in enumerate(distances)
]
@then("the returned interference with minimum distance {distance:Number} should be the aggregate of interference from CBSDs {expected_cbsd_numbers:IntegerList} at azimuth {expected_azimuth:Number}")
def step_impl(context: ContextAggregateInterference, distance: float, expected_cbsd_numbers: List[int], expected_azimuth: float):
"""
Args:
context (behave.runner.Context):
distance (str):
expected_azimuth (str):
"""
components_to_include = [components for cbsd_number, components in enumerate(context.interference_components) if cbsd_number in expected_cbsd_numbers]
expected_interference = linearToDb(sum(dbToLinear(component.total_interference(azimuth=expected_azimuth)) for component in components_to_include)) - MILLIWATTS_PER_WATT_DB
aggregate_interference = InterferenceAtAzimuthWithMaximumGainCalculator(minimum_distance=distance, interference_components=context.interference_components).calculate()
assert isclose(aggregate_interference, expected_interference), f'{aggregate_interference} != {expected_azimuth}'
| <filename>src/harness/testcases/cu_pass/dpa_calculator/features/steps/dpa_neighborhood/aggregate_interference/maximum_azimuth.py
from dataclasses import dataclass
from math import isclose
from typing import List
from behave import *
from cu_pass.dpa_calculator.aggregate_interference_calculator.aggregate_interference_calculator_ntia.helpers.interference_at_azimuth_with_maximum_gain_calculator import \
InterferenceAtAzimuthWithMaximumGainCalculator
from cu_pass.dpa_calculator.aggregate_interference_calculator.aggregate_interference_calculator_ntia.helpers.cbsd_interference_calculator.variables import \
GainAtAzimuth, InterferenceComponents
from reference_models.interference.interference import dbToLinear, linearToDb
from testcases.cu_pass.dpa_calculator.features.environment.hooks import ContextSas
MILLIWATTS_PER_WATT_DB = 30
use_step_matcher("parse")
@dataclass
class ContextAggregateInterference(ContextSas):
interference_components: List[InterferenceComponents]
@given("CBSDs at distances {distances:NumberList} each with gains {gains:NumberListList} at azimuths {azimuths:NumberList}")
def step_impl(context: ContextAggregateInterference, distances: List[float], gains: List[List[float]], azimuths: List[float]):
"""
Args:
context (behave.runner.Context):
"""
context.interference_components = [
InterferenceComponents(
distance_in_kilometers=distance,
eirp=0,
frequency_dependent_rejection=0,
gain_receiver={azimuth: GainAtAzimuth(azimuth=azimuth, gain=gain) for azimuth, gain in
zip(azimuths, gains[cbsd_number])},
loss_building=0,
loss_clutter=0,
loss_propagation=0,
loss_receiver=0,
loss_transmitter=0
)
for cbsd_number, distance in enumerate(distances)
]
@then("the returned interference with minimum distance {distance:Number} should be the aggregate of interference from CBSDs {expected_cbsd_numbers:IntegerList} at azimuth {expected_azimuth:Number}")
def step_impl(context: ContextAggregateInterference, distance: float, expected_cbsd_numbers: List[int], expected_azimuth: float):
"""
Args:
context (behave.runner.Context):
distance (str):
expected_azimuth (str):
"""
components_to_include = [components for cbsd_number, components in enumerate(context.interference_components) if cbsd_number in expected_cbsd_numbers]
expected_interference = linearToDb(sum(dbToLinear(component.total_interference(azimuth=expected_azimuth)) for component in components_to_include)) - MILLIWATTS_PER_WATT_DB
aggregate_interference = InterferenceAtAzimuthWithMaximumGainCalculator(minimum_distance=distance, interference_components=context.interference_components).calculate()
assert isclose(aggregate_interference, expected_interference), f'{aggregate_interference} != {expected_azimuth}'
| en | 0.139145 | Args: context (behave.runner.Context): Args: context (behave.runner.Context): distance (str): expected_azimuth (str): | 2.224157 | 2 |
rftk/cloud_functions/services/schemas.py | jmwoloso/rftk | 1 | 6613768 | """
schemas.py: Schemas for the BiqQuery tables where the enriched data is
stored.
"""
__author__ = "<NAME> <<EMAIL>>"
__license__ = "BSD 3 clause"
import google.cloud.bigquery as bq
# mobile friendly test
MOBILE_FRIENDLY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="test_results",
field_type="string",
mode="required")
]
# clearbit tag history
CLEARBIT_TAGS_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="tag",
field_type="string",
mode="required")
]
# clearbit tech history
CLEARBIT_TECH_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="tech",
field_type="string",
mode="required")
]
# clearbit tech history
CLEARBIT_EMAILS_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="email",
field_type="string",
mode="required")
]
# clearbit tech history
CLEARBIT_PHONES_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="phone",
field_type="string",
mode="required")
]
# crawler tech history
WORDPRESS_ASSET_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="asset",
field_type="string",
mode="required"),
bq.SchemaField(name="type",
field_type="string",
mode="required"),
bq.SchemaField(name="html_used",
field_type="string",
mode="required")
]
# crawler payload
CRAWLER_DOMAIN_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="refinery_person_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="all_links",
field_type="string",
mode="nullable"),
bq.SchemaField(name="internal_links",
field_type="string",
mode="nullable"),
bq.SchemaField(name="external_links",
field_type="string",
mode="nullable"),
bq.SchemaField(name="href_emails",
field_type="string",
mode="nullable"),
bq.SchemaField(name="href_phones",
field_type="string",
mode="nullable"),
bq.SchemaField(name="href_socials",
field_type="string",
mode="nullable"),
bq.SchemaField(name="meta_keywords",
field_type="string",
mode="nullable"),
bq.SchemaField(name="meta_description",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tier1_classification",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tier2_classification",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tier3_classification",
field_type="string",
mode="nullable"),
bq.SchemaField(name="classification_likelihood",
field_type="string",
mode="nullable"),
bq.SchemaField(name="html_string",
field_type="string",
mode="nullable"),
]
# clearbit person
CLEARBIT_PERSON_SCHEMA = [
bq.SchemaField(name="refinery_person_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="clearbit_person_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="clearbit_indexed_at",
field_type="timestamp",
mode="nullable"),
bq.SchemaField(name="full_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="first_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="last_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="email",
field_type="string",
mode="nullable"),
bq.SchemaField(name="location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="time_zone",
field_type="string",
mode="nullable"),
bq.SchemaField(name="utc_offset",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="city",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="latitude",
field_type="string",
mode="nullable"),
bq.SchemaField(name="longitude",
field_type="string",
mode="nullable"),
bq.SchemaField(name="bio",
field_type="string",
mode="nullable"),
bq.SchemaField(name="site",
field_type="string",
mode="nullable"),
bq.SchemaField(name="avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_title",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_role",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_seniority",
field_type="string",
mode="nullable"),
bq.SchemaField(name="facebook_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_company",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_blog",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_followers",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="github_following",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_bio",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_followers",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_following",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_site",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="linkedin_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="googleplus_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_url_titles",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_urls",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_avatar_types",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_avatar_urls",
field_type="string",
mode="nullable"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="is_email_provider",
field_type="boolean",
mode="nullable")
]
# clearbit company
CLEARBIT_COMPANY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="refinery_person_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_address",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="clearbit_company_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="clearbit_indexed_at",
field_type="timestamp",
mode="nullable"),
bq.SchemaField(name="company_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="legal_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="company_domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain_aliases",
field_type="string",
mode="nullable"),
bq.SchemaField(name="industry",
field_type="string",
mode="nullable"),
bq.SchemaField(name="industry_group",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sub_industry",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sector",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sic_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="naics_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="description",
field_type="string",
mode="nullable"),
bq.SchemaField(name="year_founded",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="street_number",
field_type="string",
mode="nullable"),
bq.SchemaField(name="street_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sub_premise",
field_type="string",
mode="nullable"),
bq.SchemaField(name="city",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="postal_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="latitude",
field_type="float",
mode="nullable"),
bq.SchemaField(name="longitude",
field_type="float",
mode="nullable"),
bq.SchemaField(name="time_zone",
field_type="string",
mode="nullable"),
bq.SchemaField(name="utc_offset",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="company_phone",
field_type="string",
mode="nullable"),
bq.SchemaField(name="number_of_employees",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="number_of_employees_range",
field_type="string",
mode="nullable"),
bq.SchemaField(name="fiscal_year_ends",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="market_cap",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="total_raised",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="company_type",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ticker_symbol",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tax_ein",
field_type="string",
mode="nullable"),
bq.SchemaField(name="annual_revenue",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="estimated_annual_revenue",
field_type="string",
mode="nullable"),
bq.SchemaField(name="company_logo",
field_type="string",
mode="nullable"),
bq.SchemaField(name="crunchbase_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="alexa_us_rank",
field_type="string",
mode="nullable"),
bq.SchemaField(name="alexa_global_rank",
field_type="string",
mode="nullable"),
bq.SchemaField(name="parent_domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="facebook_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="facebook_likes",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="linkedin_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_bio",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_follower_count",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_following_count",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_site_url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="is_email_provider",
field_type="boolean",
mode="nullable"),
]
# wp lookup table schema
WP_PLUGIN_LOOKUP_SCHEMA = [
bq.SchemaField(name="plugin",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tag",
field_type="string",
mode="nullable"),
bq.SchemaField(name="description",
field_type="string",
mode="nullable")
]
# wp lookup table error schema
WP_PLUGIN_LOOKUP_ERROR_SCHEMA = [
bq.SchemaField(name="plugin",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required")
]
# email provider lookup table schema
EMAIL_PROVIDER_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="mx_record",
field_type="string",
mode="required"),
bq.SchemaField(name="email_provider",
field_type="string",
mode="required")
]
# ip lookup table schema
IP_LOOKUP_SCHEMA = [
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="ip_address",
field_type="string",
mode="required"),
bq.SchemaField(name="reason",
field_type="string",
mode="nullable"),
bq.SchemaField(name="source",
field_type="string",
mode="required")
]
# app enrichment error lookup
APP_ENRICHMENT_ERROR_SCHEMA = [
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="email",
field_type="string",
mode="nullable")
]
# wp asset errors
WORDPRESS_ENRICHMENT_ERROR_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="error_type",
field_type="string",
mode="required"),
bq.SchemaField(name="html_used",
field_type="string",
mode="required")
]
# lead enrichment errors
LEAD_ENRICHMENT_ERROR_SCHEMA = [
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="required"),
bq.SchemaField(name="email",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="nullable")
] | """
schemas.py: Schemas for the BiqQuery tables where the enriched data is
stored.
"""
__author__ = "<NAME> <<EMAIL>>"
__license__ = "BSD 3 clause"
import google.cloud.bigquery as bq
# mobile friendly test
MOBILE_FRIENDLY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="test_results",
field_type="string",
mode="required")
]
# clearbit tag history
CLEARBIT_TAGS_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="tag",
field_type="string",
mode="required")
]
# clearbit tech history
CLEARBIT_TECH_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="tech",
field_type="string",
mode="required")
]
# clearbit tech history
CLEARBIT_EMAILS_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="email",
field_type="string",
mode="required")
]
# clearbit tech history
CLEARBIT_PHONES_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="phone",
field_type="string",
mode="required")
]
# crawler tech history
WORDPRESS_ASSET_HISTORY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="asset",
field_type="string",
mode="required"),
bq.SchemaField(name="type",
field_type="string",
mode="required"),
bq.SchemaField(name="html_used",
field_type="string",
mode="required")
]
# crawler payload
CRAWLER_DOMAIN_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="refinery_person_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="all_links",
field_type="string",
mode="nullable"),
bq.SchemaField(name="internal_links",
field_type="string",
mode="nullable"),
bq.SchemaField(name="external_links",
field_type="string",
mode="nullable"),
bq.SchemaField(name="href_emails",
field_type="string",
mode="nullable"),
bq.SchemaField(name="href_phones",
field_type="string",
mode="nullable"),
bq.SchemaField(name="href_socials",
field_type="string",
mode="nullable"),
bq.SchemaField(name="meta_keywords",
field_type="string",
mode="nullable"),
bq.SchemaField(name="meta_description",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tier1_classification",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tier2_classification",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tier3_classification",
field_type="string",
mode="nullable"),
bq.SchemaField(name="classification_likelihood",
field_type="string",
mode="nullable"),
bq.SchemaField(name="html_string",
field_type="string",
mode="nullable"),
]
# clearbit person
CLEARBIT_PERSON_SCHEMA = [
bq.SchemaField(name="refinery_person_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="clearbit_person_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="clearbit_indexed_at",
field_type="timestamp",
mode="nullable"),
bq.SchemaField(name="full_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="first_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="last_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="email",
field_type="string",
mode="nullable"),
bq.SchemaField(name="location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="time_zone",
field_type="string",
mode="nullable"),
bq.SchemaField(name="utc_offset",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="city",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="latitude",
field_type="string",
mode="nullable"),
bq.SchemaField(name="longitude",
field_type="string",
mode="nullable"),
bq.SchemaField(name="bio",
field_type="string",
mode="nullable"),
bq.SchemaField(name="site",
field_type="string",
mode="nullable"),
bq.SchemaField(name="avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_title",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_role",
field_type="string",
mode="nullable"),
bq.SchemaField(name="employment_seniority",
field_type="string",
mode="nullable"),
bq.SchemaField(name="facebook_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_company",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_blog",
field_type="string",
mode="nullable"),
bq.SchemaField(name="github_followers",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="github_following",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_bio",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_followers",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_following",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_site",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="linkedin_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="googleplus_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_url_titles",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_urls",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_avatar_types",
field_type="string",
mode="nullable"),
bq.SchemaField(name="gravatar_avatar_urls",
field_type="string",
mode="nullable"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="is_email_provider",
field_type="boolean",
mode="nullable")
]
# clearbit company
CLEARBIT_COMPANY_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="refinery_person_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_address",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ip_revealed",
field_type="boolean",
mode="required"),
bq.SchemaField(name="clearbit_company_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="clearbit_indexed_at",
field_type="timestamp",
mode="nullable"),
bq.SchemaField(name="company_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="legal_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="company_domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain_aliases",
field_type="string",
mode="nullable"),
bq.SchemaField(name="industry",
field_type="string",
mode="nullable"),
bq.SchemaField(name="industry_group",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sub_industry",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sector",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sic_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="naics_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="description",
field_type="string",
mode="nullable"),
bq.SchemaField(name="year_founded",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="street_number",
field_type="string",
mode="nullable"),
bq.SchemaField(name="street_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sub_premise",
field_type="string",
mode="nullable"),
bq.SchemaField(name="city",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state",
field_type="string",
mode="nullable"),
bq.SchemaField(name="state_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="postal_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country",
field_type="string",
mode="nullable"),
bq.SchemaField(name="country_code",
field_type="string",
mode="nullable"),
bq.SchemaField(name="latitude",
field_type="float",
mode="nullable"),
bq.SchemaField(name="longitude",
field_type="float",
mode="nullable"),
bq.SchemaField(name="time_zone",
field_type="string",
mode="nullable"),
bq.SchemaField(name="utc_offset",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="company_phone",
field_type="string",
mode="nullable"),
bq.SchemaField(name="number_of_employees",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="number_of_employees_range",
field_type="string",
mode="nullable"),
bq.SchemaField(name="fiscal_year_ends",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="market_cap",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="total_raised",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="company_type",
field_type="string",
mode="nullable"),
bq.SchemaField(name="ticker_symbol",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tax_ein",
field_type="string",
mode="nullable"),
bq.SchemaField(name="annual_revenue",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="estimated_annual_revenue",
field_type="string",
mode="nullable"),
bq.SchemaField(name="company_logo",
field_type="string",
mode="nullable"),
bq.SchemaField(name="crunchbase_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="alexa_us_rank",
field_type="string",
mode="nullable"),
bq.SchemaField(name="alexa_global_rank",
field_type="string",
mode="nullable"),
bq.SchemaField(name="parent_domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="facebook_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="facebook_likes",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="linkedin_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_handle",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_avatar",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_bio",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_follower_count",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_following_count",
field_type="integer",
mode="nullable"),
bq.SchemaField(name="twitter_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_location",
field_type="string",
mode="nullable"),
bq.SchemaField(name="twitter_site_url",
field_type="string",
mode="nullable"),
bq.SchemaField(name="fuzzy_match",
field_type="boolean",
mode="nullable"),
bq.SchemaField(name="is_email_provider",
field_type="boolean",
mode="nullable"),
]
# wp lookup table schema
WP_PLUGIN_LOOKUP_SCHEMA = [
bq.SchemaField(name="plugin",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="tag",
field_type="string",
mode="nullable"),
bq.SchemaField(name="description",
field_type="string",
mode="nullable")
]
# wp lookup table error schema
WP_PLUGIN_LOOKUP_ERROR_SCHEMA = [
bq.SchemaField(name="plugin",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required")
]
# email provider lookup table schema
EMAIL_PROVIDER_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="domain",
field_type="string",
mode="required"),
bq.SchemaField(name="mx_record",
field_type="string",
mode="required"),
bq.SchemaField(name="email_provider",
field_type="string",
mode="required")
]
# ip lookup table schema
IP_LOOKUP_SCHEMA = [
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="ip_address",
field_type="string",
mode="required"),
bq.SchemaField(name="reason",
field_type="string",
mode="nullable"),
bq.SchemaField(name="source",
field_type="string",
mode="required")
]
# app enrichment error lookup
APP_ENRICHMENT_ERROR_SCHEMA = [
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_contact_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_asset_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_oppty_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="sfdc_acct_id",
field_type="string",
mode="nullable"),
bq.SchemaField(name="app_name",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="nullable"),
bq.SchemaField(name="email",
field_type="string",
mode="nullable")
]
# wp asset errors
WORDPRESS_ENRICHMENT_ERROR_SCHEMA = [
bq.SchemaField(name="refinery_company_id",
field_type="string",
mode="required"),
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="error_type",
field_type="string",
mode="required"),
bq.SchemaField(name="html_used",
field_type="string",
mode="required")
]
# lead enrichment errors
LEAD_ENRICHMENT_ERROR_SCHEMA = [
bq.SchemaField(name="refined_at",
field_type="timestamp",
mode="required"),
bq.SchemaField(name="refined_date",
field_type="date",
mode="required"),
bq.SchemaField(name="sfdc_lead_id",
field_type="string",
mode="required"),
bq.SchemaField(name="email",
field_type="string",
mode="nullable"),
bq.SchemaField(name="domain",
field_type="string",
mode="nullable")
] | en | 0.693002 | schemas.py: Schemas for the BiqQuery tables where the enriched data is stored. # mobile friendly test # clearbit tag history # clearbit tech history # clearbit tech history # clearbit tech history # crawler tech history # crawler payload # clearbit person # clearbit company # wp lookup table schema # wp lookup table error schema # email provider lookup table schema # ip lookup table schema # app enrichment error lookup # wp asset errors # lead enrichment errors | 2.108913 | 2 |
bytecode.py | gvx/deja | 15 | 6613769 | <reponame>gvx/deja
from collect import *
import struct
HEADER = '\x07DV'
VERSION = (0, 3)
OP_SIZE = 5
OPCODES = {
'PUSH_LITERAL': '00000000',
'PUSH_INTEGER': '00000001',
'PUSH_WORD': '00000010',
'SET': '00000011',
'SET_LOCAL': '00000100',
'SET_GLOBAL': '00000101',
'GET': '00000110',
'GET_GLOBAL': '00000111',
'JMP': '00010000',
'JMPZ': '00010001',
'RETURN': '00010010',
'RECURSE': '00010011',
'JMPEQ': '00010100',
'JMPNE': '00010101',
'LABDA': '00100000',
'ENTER_SCOPE': '00100001',
'LEAVE_SCOPE': '00100010',
'NEW_LIST': '00110000',
'POP_FROM': '00110001',
'PUSH_TO': '00110010',
'PUSH_THROUGH': '00110011',
'DROP': '01000000',
'DUP': '01000001',
'SWAP': '01000010',
'ROT': '01000011',
'OVER': '01000100',
'LINE_NUMBER': '01010000',
'SOURCE_FILE': '01010010',
'ENTER_ERRHAND': '01100000',
'LEAVE_ERRHAND': '01100001',
'RAISE': '01100010',
'RERAISE': '01100011',
'NEW_DICT': '01110000',
'HAS_DICT': '01110001',
'GET_DICT': '01110010',
'SET_DICT': '01110011',
'CALL': '10000000',
}
for k in OPCODES:
OPCODES[k] = int(OPCODES[k], 2) * 0x1000000
TYPES = {
'ident': '00000000',
'str': '00000001',
'num': '00000010',
'frac': '00000111',
'short-ident': '10000000',
'short-str': '10000001',
'short-frac': '10000111',
}
for k in TYPES:
TYPES[k] = chr(int(TYPES[k], 2))
signed_char_s = struct.Struct('>b')
def signed_char(x):
return signed_char_s.pack(x)
signed_int_s = struct.Struct('>i')
def signed_int(x):
return signed_int_s.pack(x)
unsigned_int_s = struct.Struct('>I')
def unsigned_int(x):
return unsigned_int_s.pack(x)
signed_long_int_s = struct.Struct('>q')
def signed_long_int(x):
return signed_long_int_s.pack(x)
unsigned_long_int_s = struct.Struct('>Q')
def unsigned_long_int(x):
return unsigned_long_int_s.pack(x)
double_s = struct.Struct('>d')
def double(x):
return double_s.pack(x)
def write_code(code, acc):
for op in code:
acc.append(unsigned_int(OPCODES[op.opcode] | (op.ref & 0xFFFFFF)))
def write_literals(literals, acc):
for literal in literals:
acc.append(TYPES[literal[0]])
if literal[0] == 'num':
acc.append(double(literal[1]))
elif literal[0] == 'frac':
n, d = literal[1]
if -128 <= n < 128 and d < 256:
acc[-1] = TYPES['short-frac']
acc.append(signed_char(n))
acc.append(chr(d))
else:
acc.append(signed_long_int(n))
acc.append(unsigned_long_int(d))
elif len(literal[1]) < 256:
acc[-1] = TYPES['short-' + literal[0]]
acc.append(chr(len(literal[1])))
acc.append(literal[1])
else:
acc.append(unsigned_int(len(literal[1])))
acc.append(literal[1])
def write_bytecode(flat_code):
code, literals = flat_code
acc = [HEADER, chr(VERSION[0] * 16 + VERSION[1]), unsigned_int(len(code))]
write_code(code, acc)
write_literals(literals, acc)
return ''.join(acc)
| from collect import *
import struct
HEADER = '\x07DV'
VERSION = (0, 3)
OP_SIZE = 5
OPCODES = {
'PUSH_LITERAL': '00000000',
'PUSH_INTEGER': '00000001',
'PUSH_WORD': '00000010',
'SET': '00000011',
'SET_LOCAL': '00000100',
'SET_GLOBAL': '00000101',
'GET': '00000110',
'GET_GLOBAL': '00000111',
'JMP': '00010000',
'JMPZ': '00010001',
'RETURN': '00010010',
'RECURSE': '00010011',
'JMPEQ': '00010100',
'JMPNE': '00010101',
'LABDA': '00100000',
'ENTER_SCOPE': '00100001',
'LEAVE_SCOPE': '00100010',
'NEW_LIST': '00110000',
'POP_FROM': '00110001',
'PUSH_TO': '00110010',
'PUSH_THROUGH': '00110011',
'DROP': '01000000',
'DUP': '01000001',
'SWAP': '01000010',
'ROT': '01000011',
'OVER': '01000100',
'LINE_NUMBER': '01010000',
'SOURCE_FILE': '01010010',
'ENTER_ERRHAND': '01100000',
'LEAVE_ERRHAND': '01100001',
'RAISE': '01100010',
'RERAISE': '01100011',
'NEW_DICT': '01110000',
'HAS_DICT': '01110001',
'GET_DICT': '01110010',
'SET_DICT': '01110011',
'CALL': '10000000',
}
for k in OPCODES:
OPCODES[k] = int(OPCODES[k], 2) * 0x1000000
TYPES = {
'ident': '00000000',
'str': '00000001',
'num': '00000010',
'frac': '00000111',
'short-ident': '10000000',
'short-str': '10000001',
'short-frac': '10000111',
}
for k in TYPES:
TYPES[k] = chr(int(TYPES[k], 2))
signed_char_s = struct.Struct('>b')
def signed_char(x):
return signed_char_s.pack(x)
signed_int_s = struct.Struct('>i')
def signed_int(x):
return signed_int_s.pack(x)
unsigned_int_s = struct.Struct('>I')
def unsigned_int(x):
return unsigned_int_s.pack(x)
signed_long_int_s = struct.Struct('>q')
def signed_long_int(x):
return signed_long_int_s.pack(x)
unsigned_long_int_s = struct.Struct('>Q')
def unsigned_long_int(x):
return unsigned_long_int_s.pack(x)
double_s = struct.Struct('>d')
def double(x):
return double_s.pack(x)
def write_code(code, acc):
for op in code:
acc.append(unsigned_int(OPCODES[op.opcode] | (op.ref & 0xFFFFFF)))
def write_literals(literals, acc):
for literal in literals:
acc.append(TYPES[literal[0]])
if literal[0] == 'num':
acc.append(double(literal[1]))
elif literal[0] == 'frac':
n, d = literal[1]
if -128 <= n < 128 and d < 256:
acc[-1] = TYPES['short-frac']
acc.append(signed_char(n))
acc.append(chr(d))
else:
acc.append(signed_long_int(n))
acc.append(unsigned_long_int(d))
elif len(literal[1]) < 256:
acc[-1] = TYPES['short-' + literal[0]]
acc.append(chr(len(literal[1])))
acc.append(literal[1])
else:
acc.append(unsigned_int(len(literal[1])))
acc.append(literal[1])
def write_bytecode(flat_code):
code, literals = flat_code
acc = [HEADER, chr(VERSION[0] * 16 + VERSION[1]), unsigned_int(len(code))]
write_code(code, acc)
write_literals(literals, acc)
return ''.join(acc) | none | 1 | 2.098549 | 2 | |
page_sample/cgi-bin/invalid.py | aprilmayjune135/42_web_server | 2 | 6613770 | # no output is invalid
if __name__ == '__main__':
exit(0)
| # no output is invalid
if __name__ == '__main__':
exit(0)
| en | 0.552736 | # no output is invalid | 1.337576 | 1 |
zfs/pool.py | mcclung/zfsp | 600 | 6613771 | <filename>zfs/pool.py
import os.path
from typing import List
from typing import Union
from typing import overload
from . import constants
from . import objectset
from . import readcontext
from . import datasets
from . import vdevs
from . import posix
def vdev_list_to_dict(vdevs):
d = {}
for v in vdevs:
d[v.id] = v
return d
class Pool(object):
def __init__(self, vdevs: List[vdevs.VDev], try_config=None) -> None:
self.vdevs = vdev_list_to_dict(vdevs)
self.default_compression = constants.Compression.LZJB
self.default_checksum = constants.Checksum.FLETCHER_4
self.ashift = self.first_vdev().best_label[b'vdev_tree'][b'ashift']
self.version = self.first_vdev().best_label[b'version']
self.try_config = try_config or set()
self._meta_object_sets = {}
def first_vdev(self) -> vdevs.VDev:
return list(self.vdevs.values())[0]
def context(self) -> readcontext.ReadContext:
return readcontext.ReadContext(
self.vdevs,
self.default_compression,
self.default_checksum,
self.ashift,
self.try_config
)
def read_block(self, blkptr) -> bytes:
return self.context().read_block(blkptr)
def read_indirect(self, blkptr) -> bytes:
return self.context().read_indirect(blkptr)
def read_dnode(self, dnode) -> bytes:
return self.context().read_dnode(dnode)
def read_file(self, path: str) -> bytes:
pathes = os.path.split(path)
if len(pathes) != 2:
raise NotImplementedError
filename = pathes[-1]
dir_ents = self.open(pathes[0])
if filename not in dir_ents:
raise OSError("file not found: {}".format(filename))
return dir_ents[filename].read()
@overload
def objset_for_vdev(self, vdev: int) -> objectset.ObjectSet:
...
@overload
def objset_for_vdev(self, vdev: vdevs.VDev) -> objectset.ObjectSet:
...
def objset_for_vdev(self, vdev) -> objectset.ObjectSet:
if isinstance(vdev, int):
vdev = self.vdevs[vdev]
root = self.read_indirect(vdev.active_uberblock.root)
vdev_id = vdev.id
if vdev_id not in self._meta_object_sets:
self._meta_object_sets[vdev_id] = objectset.ObjectSet.from_block(self, root)
return self._meta_object_sets[vdev_id]
@property
def root_dataset(self) -> datasets.Dataset:
objset = self.objset_for_vdev(self.first_vdev())
dir_index = objset[1]['root_dataset']
dataset = objset[dir_index]
return dataset
def metaslab_array(self):
location = self.first_vdev().best_label[b'metaslab_array']
return self.objset_for_vdev(self.first_vdev())[location]
def dataset_for(self, dataset_expr: str) -> datasets.Dataset:
if '@' not in dataset_expr:
dataset_expr += '@'
dataset_name, snapshot_name = dataset_expr.split('@', 1)
ds = self.open(dataset_name)
if isinstance(ds, datasets.Dataset):
return ds.snapshots.get(snapshot_name, ds)
else:
raise KeyError
def open(self, path: str) -> Union[datasets.Dataset, posix.Directory]:
paths = path.lstrip('/').split('/')
current = self.root_dataset
if paths == ['']:
return current
for next_dir in paths:
current = current[next_dir]
return current
| <filename>zfs/pool.py
import os.path
from typing import List
from typing import Union
from typing import overload
from . import constants
from . import objectset
from . import readcontext
from . import datasets
from . import vdevs
from . import posix
def vdev_list_to_dict(vdevs):
d = {}
for v in vdevs:
d[v.id] = v
return d
class Pool(object):
def __init__(self, vdevs: List[vdevs.VDev], try_config=None) -> None:
self.vdevs = vdev_list_to_dict(vdevs)
self.default_compression = constants.Compression.LZJB
self.default_checksum = constants.Checksum.FLETCHER_4
self.ashift = self.first_vdev().best_label[b'vdev_tree'][b'ashift']
self.version = self.first_vdev().best_label[b'version']
self.try_config = try_config or set()
self._meta_object_sets = {}
def first_vdev(self) -> vdevs.VDev:
return list(self.vdevs.values())[0]
def context(self) -> readcontext.ReadContext:
return readcontext.ReadContext(
self.vdevs,
self.default_compression,
self.default_checksum,
self.ashift,
self.try_config
)
def read_block(self, blkptr) -> bytes:
return self.context().read_block(blkptr)
def read_indirect(self, blkptr) -> bytes:
return self.context().read_indirect(blkptr)
def read_dnode(self, dnode) -> bytes:
return self.context().read_dnode(dnode)
def read_file(self, path: str) -> bytes:
pathes = os.path.split(path)
if len(pathes) != 2:
raise NotImplementedError
filename = pathes[-1]
dir_ents = self.open(pathes[0])
if filename not in dir_ents:
raise OSError("file not found: {}".format(filename))
return dir_ents[filename].read()
@overload
def objset_for_vdev(self, vdev: int) -> objectset.ObjectSet:
...
@overload
def objset_for_vdev(self, vdev: vdevs.VDev) -> objectset.ObjectSet:
...
def objset_for_vdev(self, vdev) -> objectset.ObjectSet:
if isinstance(vdev, int):
vdev = self.vdevs[vdev]
root = self.read_indirect(vdev.active_uberblock.root)
vdev_id = vdev.id
if vdev_id not in self._meta_object_sets:
self._meta_object_sets[vdev_id] = objectset.ObjectSet.from_block(self, root)
return self._meta_object_sets[vdev_id]
@property
def root_dataset(self) -> datasets.Dataset:
objset = self.objset_for_vdev(self.first_vdev())
dir_index = objset[1]['root_dataset']
dataset = objset[dir_index]
return dataset
def metaslab_array(self):
location = self.first_vdev().best_label[b'metaslab_array']
return self.objset_for_vdev(self.first_vdev())[location]
def dataset_for(self, dataset_expr: str) -> datasets.Dataset:
if '@' not in dataset_expr:
dataset_expr += '@'
dataset_name, snapshot_name = dataset_expr.split('@', 1)
ds = self.open(dataset_name)
if isinstance(ds, datasets.Dataset):
return ds.snapshots.get(snapshot_name, ds)
else:
raise KeyError
def open(self, path: str) -> Union[datasets.Dataset, posix.Directory]:
paths = path.lstrip('/').split('/')
current = self.root_dataset
if paths == ['']:
return current
for next_dir in paths:
current = current[next_dir]
return current
| none | 1 | 2.190268 | 2 | |
main.py | JavierOramas/PNServer | 3 | 6613772 |
import os
from flask import redirect
from get_ip import get_ip
from scan import load_services, check_local_services, check_network_machines
from dbmodel import Users, Properties, Services, app, db
from components.base.content_endpoints import debug, root, logout, login, scan_network, get_machines_service
from components.base.content_endpoints import manage_page_users, manage_page_services, loged_user
from components.base.content_endpoints import get_user_access, get_services, get_data, init_db, delete_entry
from components.base.content_endpoints import edit_entry, delete_entry
# from components.temp_monitor.api import get_temps, measure, clean, temp_api
@app.route('/debug')
def route_debug():
return debug()
@app.route('/')
def route_root():
return root()
@app.route('/scan/<string:username>')
@app.route('/scan')
def route_scan(username='guest'):
return scan_network(username)
@app.route('/services')
def return_active_services():
return check_local_services(db)
@app.route('/login', methods=['GET', 'POST'])
@app.route('/register', methods=['GET', 'POST'])
def route_login():
return login()
@app.route('/logout')
def route_logout():
return logout()
#TODO Check user access
@app.route('/<string:service>')
def redirect_service(service:str):
return get_machines_service(service)
@app.route('/manage', methods=['GET', 'POST'])
@app.route('/manage/users' , methods=['GET', 'POST'])
def route_manage_users():
return manage_page_users()
@app.route('/manage/services', methods=['GET', 'POST'])
def route_manage_sevices():
return manage_page_services()
@app.route('/delete/<string:table>/<int:id>', methods=['GET', 'POST'])
def route_delete_entry(table, id):
return delete_entry(table, id)
@app.route('/edit/<string:table>/<int:id>', methods=['GET', 'POST'])
def route_edit_entry(table, id):
return edit_entry(table, id)
#TODO see how to rediect port 9998 to this route
## Temp Monitor
# @app.route('/temps')
# def route_temps():
# return get_temps()
# @app.route('/measure')
# def route_measure():
# return measure()
# @app.route('/clean')
# def route_temps():
# return temps()
if __name__ == '__main__':
db.create_all()
init_db()
os.popen("sass static/scss/style.scss:static/css/style.css")
session = {}
app.run(debug=True,host=get_ip(), port=2357) |
import os
from flask import redirect
from get_ip import get_ip
from scan import load_services, check_local_services, check_network_machines
from dbmodel import Users, Properties, Services, app, db
from components.base.content_endpoints import debug, root, logout, login, scan_network, get_machines_service
from components.base.content_endpoints import manage_page_users, manage_page_services, loged_user
from components.base.content_endpoints import get_user_access, get_services, get_data, init_db, delete_entry
from components.base.content_endpoints import edit_entry, delete_entry
# from components.temp_monitor.api import get_temps, measure, clean, temp_api
@app.route('/debug')
def route_debug():
return debug()
@app.route('/')
def route_root():
return root()
@app.route('/scan/<string:username>')
@app.route('/scan')
def route_scan(username='guest'):
return scan_network(username)
@app.route('/services')
def return_active_services():
return check_local_services(db)
@app.route('/login', methods=['GET', 'POST'])
@app.route('/register', methods=['GET', 'POST'])
def route_login():
return login()
@app.route('/logout')
def route_logout():
return logout()
#TODO Check user access
@app.route('/<string:service>')
def redirect_service(service:str):
return get_machines_service(service)
@app.route('/manage', methods=['GET', 'POST'])
@app.route('/manage/users' , methods=['GET', 'POST'])
def route_manage_users():
return manage_page_users()
@app.route('/manage/services', methods=['GET', 'POST'])
def route_manage_sevices():
return manage_page_services()
@app.route('/delete/<string:table>/<int:id>', methods=['GET', 'POST'])
def route_delete_entry(table, id):
return delete_entry(table, id)
@app.route('/edit/<string:table>/<int:id>', methods=['GET', 'POST'])
def route_edit_entry(table, id):
return edit_entry(table, id)
#TODO see how to rediect port 9998 to this route
## Temp Monitor
# @app.route('/temps')
# def route_temps():
# return get_temps()
# @app.route('/measure')
# def route_measure():
# return measure()
# @app.route('/clean')
# def route_temps():
# return temps()
if __name__ == '__main__':
db.create_all()
init_db()
os.popen("sass static/scss/style.scss:static/css/style.css")
session = {}
app.run(debug=True,host=get_ip(), port=2357) | en | 0.47122 | # from components.temp_monitor.api import get_temps, measure, clean, temp_api #TODO Check user access #TODO see how to rediect port 9998 to this route ## Temp Monitor # @app.route('/temps') # def route_temps(): # return get_temps() # @app.route('/measure') # def route_measure(): # return measure() # @app.route('/clean') # def route_temps(): # return temps() | 1.986925 | 2 |
facial_recognition/sphereface.py | qfgaohao/pytorch-facial-recognition | 4 | 6613773 | import torch
from torch import nn
from .mobilenet_v2 import MobileNetV2
class Block(nn.Module):
def __init__(self, num_residual_layers, in_channels, out_channels,
kernel_size=3, stride=2, padding=1, remove_last_relu=False):
super(Block, self).__init__()
if remove_last_relu and num_residual_layers == 0:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
layers = []
for i in range(num_residual_layers):
if remove_last_relu and i + 1 == num_residual_layers:
layer = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.PReLU(),
nn.BatchNorm2d(out_channels),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels)
)
else:
layer = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, x):
x = self.conv(x)
for layer in self.layers:
residual = layer(x)
x = x + residual
return x
class AngularLinear(nn.Module):
def __init__(self, in_channels, out_channels):
super(AngularLinear, self).__init__()
self.fc = nn.Linear(in_channels, out_channels, bias=False)
def forward(self, x):
logits = self.fc(x)
weight_norm = (self.fc.weight ** 2).sum(dim=1, keepdim=True).sqrt()
logits = logits / weight_norm.t()
return logits
class SpereFaceNet(nn.Module):
def __init__(self, input_size, dim: int, num_residual_layers_per_block, out_channels_per_block):
super(SpereFaceNet, self).__init__()
blocks = []
in_channels = 3
for i, (num, out_channels) in enumerate(zip(num_residual_layers_per_block, out_channels_per_block)):
remove_last_relu = (i + 1 == len(num_residual_layers_per_block))
block = Block(num, in_channels, out_channels, remove_last_relu=remove_last_relu)
in_channels = out_channels
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
if isinstance(input_size, int):
input_size = (input_size, input_size)
assert len(input_size) == 2
assert input_size[0] % 16 == 0
assert input_size[1] % 16 == 0
feature_map_size = (int(input_size[0]/16), int(input_size[1]/16))
self.fc = nn.Linear(feature_map_size[0] * feature_map_size[1] * out_channels_per_block[-1], dim)
def forward(self, x):
for block in self.blocks:
x = block(x)
x = x.view(x.size(0), -1)
features = self.fc(x)
return features
class SphereFace(nn.Module):
def __init__(self, base_net, dim: int, num_classes: int=None):
super(SphereFace, self).__init__()
self.base_net = base_net
if num_classes is not None:
self.fc = AngularLinear(dim, num_classes)
def forward(self, x):
x = self.base_net(x)
if self.training:
# normalize weight per class
logits = self.fc(x)
return x, logits
else:
return x
def save(self, model_path: str):
torch.save(self.state_dict(), model_path)
def load(self, model):
state_dict = torch.load(model, map_location=lambda storage, loc: storage)
if not hasattr(self, 'fc'):
state_dict = {k: v for k, v in state_dict.items() if k not in set(["fc.fc.weight"])}
self.load_state_dict(state_dict)
def mobilenet_sphereface(dim=512, input_size=160, num_classes: int=None):
base_net = MobileNetV2(n_class=dim, input_size=input_size, width_mult=1.,
use_batch_norm=True, onnx_compatible=True)
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface4(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [0, 0, 0, 0], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface10(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [0, 1, 2, 0], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface20(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [1, 2, 4, 1], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface36(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [1, 4, 8, 2], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface64(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [3, 8, 16, 3], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
| import torch
from torch import nn
from .mobilenet_v2 import MobileNetV2
class Block(nn.Module):
def __init__(self, num_residual_layers, in_channels, out_channels,
kernel_size=3, stride=2, padding=1, remove_last_relu=False):
super(Block, self).__init__()
if remove_last_relu and num_residual_layers == 0:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
layers = []
for i in range(num_residual_layers):
if remove_last_relu and i + 1 == num_residual_layers:
layer = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.PReLU(),
nn.BatchNorm2d(out_channels),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels)
)
else:
layer = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, x):
x = self.conv(x)
for layer in self.layers:
residual = layer(x)
x = x + residual
return x
class AngularLinear(nn.Module):
def __init__(self, in_channels, out_channels):
super(AngularLinear, self).__init__()
self.fc = nn.Linear(in_channels, out_channels, bias=False)
def forward(self, x):
logits = self.fc(x)
weight_norm = (self.fc.weight ** 2).sum(dim=1, keepdim=True).sqrt()
logits = logits / weight_norm.t()
return logits
class SpereFaceNet(nn.Module):
def __init__(self, input_size, dim: int, num_residual_layers_per_block, out_channels_per_block):
super(SpereFaceNet, self).__init__()
blocks = []
in_channels = 3
for i, (num, out_channels) in enumerate(zip(num_residual_layers_per_block, out_channels_per_block)):
remove_last_relu = (i + 1 == len(num_residual_layers_per_block))
block = Block(num, in_channels, out_channels, remove_last_relu=remove_last_relu)
in_channels = out_channels
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
if isinstance(input_size, int):
input_size = (input_size, input_size)
assert len(input_size) == 2
assert input_size[0] % 16 == 0
assert input_size[1] % 16 == 0
feature_map_size = (int(input_size[0]/16), int(input_size[1]/16))
self.fc = nn.Linear(feature_map_size[0] * feature_map_size[1] * out_channels_per_block[-1], dim)
def forward(self, x):
for block in self.blocks:
x = block(x)
x = x.view(x.size(0), -1)
features = self.fc(x)
return features
class SphereFace(nn.Module):
def __init__(self, base_net, dim: int, num_classes: int=None):
super(SphereFace, self).__init__()
self.base_net = base_net
if num_classes is not None:
self.fc = AngularLinear(dim, num_classes)
def forward(self, x):
x = self.base_net(x)
if self.training:
# normalize weight per class
logits = self.fc(x)
return x, logits
else:
return x
def save(self, model_path: str):
torch.save(self.state_dict(), model_path)
def load(self, model):
state_dict = torch.load(model, map_location=lambda storage, loc: storage)
if not hasattr(self, 'fc'):
state_dict = {k: v for k, v in state_dict.items() if k not in set(["fc.fc.weight"])}
self.load_state_dict(state_dict)
def mobilenet_sphereface(dim=512, input_size=160, num_classes: int=None):
base_net = MobileNetV2(n_class=dim, input_size=input_size, width_mult=1.,
use_batch_norm=True, onnx_compatible=True)
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface4(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [0, 0, 0, 0], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface10(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [0, 1, 2, 0], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface20(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [1, 2, 4, 1], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface36(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [1, 4, 8, 2], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface64(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [3, 8, 16, 3], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
| en | 0.298102 | # normalize weight per class | 2.686116 | 3 |
cirtorch/backbones/densenet.py | Tarekbouamer/Image-Retrieval-for-Image-Based-Localization | 3 | 6613774 | <filename>cirtorch/backbones/densenet.py
import sys
from collections import OrderedDict
from functools import partial
import torch.nn as nn
from inplace_abn import ABN
from .misc import GlobalAvgPool2d, DenseModule
from .util import try_index, convert
CONV_PARAMS = ["weight"]
BN_PARAMS = ["weight", "bias", "running_mean", "running_var"]
FC_PARAMS = ["weight", "bias"]
class DenseNet(nn.Module):
def __init__(self,
structure,
norm_act=ABN,
config=None,
input_3x3=False,
growth=32,
theta=0.5,
classes=0,
dilation=1):
"""DenseNet
Parameters
----------
structure : list of int
Number of layers in each of the four dense blocks of the network.
norm_act : callable
Function to create normalization / activation Module.
input_3x3 : bool
If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one.
growth : int
Number of channels in each layer, i.e. the "growth" factor of the DenseNet.
theta : float
Reduction factor for the transition blocks.
classes : int
If not `0` also include globalFeatures average pooling and a fully-connected layer with `classes` outputs at the end
of the network.
dilation : int or list of int
List of dilation factors, or `1` to ignore dilation. If the dilation factor for a module is greater than `1`
skip the pooling in the transition block right before it.
"""
super(DenseNet, self).__init__()
self.structure = structure
if len(structure) != 4:
raise ValueError("Expected a structure with four values")
self.bottleneck = input_3x3
# Initial layers
if input_3x3:
layers = [
("conv1", nn.Conv2d(3, growth * 2, 3, stride=2, padding=1, bias=False)),
("bn1", norm_act(growth * 2)),
("conv2", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),
("bn2", norm_act(growth * 2)),
("conv3", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
else:
layers = [
("conv1", nn.Conv2d(3, growth * 2, 7, stride=2, padding=3, bias=False)),
("bn1", norm_act(growth * 2)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
self.mod1 = nn.Sequential(OrderedDict(layers))
in_channels = growth * 2
for mod_id in range(4):
d = try_index(dilation, mod_id)
s = 2 if d == 1 and mod_id > 0 else 1
# Create transition module
if mod_id > 0:
out_channels = int(in_channels * theta)
layers = [
("bn", norm_act(in_channels)),
("conv", nn.Conv2d(in_channels, out_channels, 1, bias=False))
]
if s == 2:
layers.append(("pool", nn.AvgPool2d(2, 2)))
self.add_module("tra%d" % (mod_id + 1), nn.Sequential(OrderedDict(layers)))
in_channels = out_channels
# Create dense module
mod = DenseModule(in_channels, growth, structure[mod_id], norm_act=norm_act, dilation=d)
self.add_module("mod%d" % (mod_id + 2), mod)
in_channels = mod.out_channels
# Pooling and predictor
self.bn_out = norm_act(in_channels)
if classes != 0:
self.classifier = nn.Sequential(OrderedDict([
("avg_pool", GlobalAvgPool2d()),
("fc", nn.Linear(in_channels, classes))
]))
def copy_layer(self, inm, outm, name_in, name_out, params):
for param_name in params:
outm[name_out + "." + param_name] = inm[name_in + "." + param_name]
#input()
def convert(self, model):
out = dict()
num_convs = 2
# Initial module
if not self.bottleneck:
self.copy_layer(model, out, "features.conv0", "mod1.conv1", CONV_PARAMS)
self.copy_layer(model, out, "features.norm0", "mod1.bn1", BN_PARAMS)
else:
raise ValueError(" Not implemented yet ")
# Other modules
for mod_id, num in enumerate(self.structure):
for block_id in range(num):
for conv_id in range(num_convs):
self.copy_layer(model, out,
"features.denseblock{}.denselayer{}.conv.{}".format(mod_id + 1, block_id + 1, conv_id + 1),
"mod{}.convs{}.{}.conv".format(mod_id + 2, conv_id + 1, block_id),
CONV_PARAMS)
self.copy_layer(model, out,
"features.denseblock{}.denselayer{}.norm.{}".format(mod_id + 1, block_id +1 , conv_id + 1),
"mod{}.convs{}.{}.bn".format(mod_id + 2, conv_id + 1, block_id),
BN_PARAMS)
# Try copying projection module
try:
self.copy_layer(model, out,
"features.layer{}.{}.downsample.0".format(mod_id + 1, block_id + 1, conv_id + 1),
"mod{}.convs{}.{}.proj_conv".format(mod_id + 2, conv_id + 1, block_id),
CONV_PARAMS)
self.copy_layer(model, out,
"features.layer{}.{}.downsample.1".format(mod_id + 1, block_id + 1, conv_id + 1),
"mod{}.convs{}.{}.proj_bn".format(mod_id + 2, conv_id + 1, block_id),
BN_PARAMS)
except KeyError:
pass
# Pass transitions modules
try:
self.copy_layer(model, out,
"features.transition{}.conv".format(mod_id + 1),
"tra{}.conv".format(mod_id + 2),
CONV_PARAMS)
self.copy_layer(model, out,
"features.transition{}.norm".format(mod_id + 1),
"tra{}.bn".format(mod_id + 2),
BN_PARAMS)
except KeyError:
pass
# Pooling and predictor
self.copy_layer(model, out, "features.norm5", "bn_out", BN_PARAMS)
if hasattr(self, "classifier"):
self.copy_layer(model, out, "classifier", "classifier.fc", FC_PARAMS)
return out
def forward(self, x):
x = self.mod1(x)
x = self.mod2(x)
x = self.tra2(x)
x = self.mod3(x)
x = self.tra3(x)
x = self.mod4(x)
x = self.tra4(x)
x = self.mod5(x)
x = self.bn_out(x)
if hasattr(self, "classifier"):
x = self.classifier(x)
return x
_NETS = {
"121": {"structure": [6, 12, 24, 16], "growth": 32},
"161": {"structure": [6, 12, 36, 24], "growth": 48},
"169": {"structure": [6, 12, 32, 32], "growth": 32},
"201": {"structure": [6, 12, 48, 32], "growth": 32},
"264": {"structure": [6, 12, 64, 48], "growth": 32},
}
__all__ = []
for name, params in _NETS.items():
net_name = "densenet" + name
setattr(sys.modules[__name__], net_name, partial(DenseNet, **params))
__all__.append(net_name) | <filename>cirtorch/backbones/densenet.py
import sys
from collections import OrderedDict
from functools import partial
import torch.nn as nn
from inplace_abn import ABN
from .misc import GlobalAvgPool2d, DenseModule
from .util import try_index, convert
CONV_PARAMS = ["weight"]
BN_PARAMS = ["weight", "bias", "running_mean", "running_var"]
FC_PARAMS = ["weight", "bias"]
class DenseNet(nn.Module):
def __init__(self,
structure,
norm_act=ABN,
config=None,
input_3x3=False,
growth=32,
theta=0.5,
classes=0,
dilation=1):
"""DenseNet
Parameters
----------
structure : list of int
Number of layers in each of the four dense blocks of the network.
norm_act : callable
Function to create normalization / activation Module.
input_3x3 : bool
If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one.
growth : int
Number of channels in each layer, i.e. the "growth" factor of the DenseNet.
theta : float
Reduction factor for the transition blocks.
classes : int
If not `0` also include globalFeatures average pooling and a fully-connected layer with `classes` outputs at the end
of the network.
dilation : int or list of int
List of dilation factors, or `1` to ignore dilation. If the dilation factor for a module is greater than `1`
skip the pooling in the transition block right before it.
"""
super(DenseNet, self).__init__()
self.structure = structure
if len(structure) != 4:
raise ValueError("Expected a structure with four values")
self.bottleneck = input_3x3
# Initial layers
if input_3x3:
layers = [
("conv1", nn.Conv2d(3, growth * 2, 3, stride=2, padding=1, bias=False)),
("bn1", norm_act(growth * 2)),
("conv2", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),
("bn2", norm_act(growth * 2)),
("conv3", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
else:
layers = [
("conv1", nn.Conv2d(3, growth * 2, 7, stride=2, padding=3, bias=False)),
("bn1", norm_act(growth * 2)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
self.mod1 = nn.Sequential(OrderedDict(layers))
in_channels = growth * 2
for mod_id in range(4):
d = try_index(dilation, mod_id)
s = 2 if d == 1 and mod_id > 0 else 1
# Create transition module
if mod_id > 0:
out_channels = int(in_channels * theta)
layers = [
("bn", norm_act(in_channels)),
("conv", nn.Conv2d(in_channels, out_channels, 1, bias=False))
]
if s == 2:
layers.append(("pool", nn.AvgPool2d(2, 2)))
self.add_module("tra%d" % (mod_id + 1), nn.Sequential(OrderedDict(layers)))
in_channels = out_channels
# Create dense module
mod = DenseModule(in_channels, growth, structure[mod_id], norm_act=norm_act, dilation=d)
self.add_module("mod%d" % (mod_id + 2), mod)
in_channels = mod.out_channels
# Pooling and predictor
self.bn_out = norm_act(in_channels)
if classes != 0:
self.classifier = nn.Sequential(OrderedDict([
("avg_pool", GlobalAvgPool2d()),
("fc", nn.Linear(in_channels, classes))
]))
def copy_layer(self, inm, outm, name_in, name_out, params):
for param_name in params:
outm[name_out + "." + param_name] = inm[name_in + "." + param_name]
#input()
def convert(self, model):
out = dict()
num_convs = 2
# Initial module
if not self.bottleneck:
self.copy_layer(model, out, "features.conv0", "mod1.conv1", CONV_PARAMS)
self.copy_layer(model, out, "features.norm0", "mod1.bn1", BN_PARAMS)
else:
raise ValueError(" Not implemented yet ")
# Other modules
for mod_id, num in enumerate(self.structure):
for block_id in range(num):
for conv_id in range(num_convs):
self.copy_layer(model, out,
"features.denseblock{}.denselayer{}.conv.{}".format(mod_id + 1, block_id + 1, conv_id + 1),
"mod{}.convs{}.{}.conv".format(mod_id + 2, conv_id + 1, block_id),
CONV_PARAMS)
self.copy_layer(model, out,
"features.denseblock{}.denselayer{}.norm.{}".format(mod_id + 1, block_id +1 , conv_id + 1),
"mod{}.convs{}.{}.bn".format(mod_id + 2, conv_id + 1, block_id),
BN_PARAMS)
# Try copying projection module
try:
self.copy_layer(model, out,
"features.layer{}.{}.downsample.0".format(mod_id + 1, block_id + 1, conv_id + 1),
"mod{}.convs{}.{}.proj_conv".format(mod_id + 2, conv_id + 1, block_id),
CONV_PARAMS)
self.copy_layer(model, out,
"features.layer{}.{}.downsample.1".format(mod_id + 1, block_id + 1, conv_id + 1),
"mod{}.convs{}.{}.proj_bn".format(mod_id + 2, conv_id + 1, block_id),
BN_PARAMS)
except KeyError:
pass
# Pass transitions modules
try:
self.copy_layer(model, out,
"features.transition{}.conv".format(mod_id + 1),
"tra{}.conv".format(mod_id + 2),
CONV_PARAMS)
self.copy_layer(model, out,
"features.transition{}.norm".format(mod_id + 1),
"tra{}.bn".format(mod_id + 2),
BN_PARAMS)
except KeyError:
pass
# Pooling and predictor
self.copy_layer(model, out, "features.norm5", "bn_out", BN_PARAMS)
if hasattr(self, "classifier"):
self.copy_layer(model, out, "classifier", "classifier.fc", FC_PARAMS)
return out
def forward(self, x):
x = self.mod1(x)
x = self.mod2(x)
x = self.tra2(x)
x = self.mod3(x)
x = self.tra3(x)
x = self.mod4(x)
x = self.tra4(x)
x = self.mod5(x)
x = self.bn_out(x)
if hasattr(self, "classifier"):
x = self.classifier(x)
return x
_NETS = {
"121": {"structure": [6, 12, 24, 16], "growth": 32},
"161": {"structure": [6, 12, 36, 24], "growth": 48},
"169": {"structure": [6, 12, 32, 32], "growth": 32},
"201": {"structure": [6, 12, 48, 32], "growth": 32},
"264": {"structure": [6, 12, 64, 48], "growth": 32},
}
__all__ = []
for name, params in _NETS.items():
net_name = "densenet" + name
setattr(sys.modules[__name__], net_name, partial(DenseNet, **params))
__all__.append(net_name) | en | 0.689623 | DenseNet Parameters ---------- structure : list of int Number of layers in each of the four dense blocks of the network. norm_act : callable Function to create normalization / activation Module. input_3x3 : bool If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one. growth : int Number of channels in each layer, i.e. the "growth" factor of the DenseNet. theta : float Reduction factor for the transition blocks. classes : int If not `0` also include globalFeatures average pooling and a fully-connected layer with `classes` outputs at the end of the network. dilation : int or list of int List of dilation factors, or `1` to ignore dilation. If the dilation factor for a module is greater than `1` skip the pooling in the transition block right before it. # Initial layers # Create transition module # Create dense module # Pooling and predictor #input() # Initial module # Other modules # Try copying projection module # Pass transitions modules # Pooling and predictor | 2.378468 | 2 |
utils/scripts/OOOlevelGen/src/levels/Watch_Out.py | fullscreennl/monkeyswipe | 0 | 6613775 | <gh_stars>0
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Beam.BeamSprite(x=147, y=261,width=306,height=14,angle='-4',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=23, y=297,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Beam.BeamSprite(x=238, y=165,width=201,height=14,angle='56',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=285, y=125,width=342,height=14,angle='56',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=451, y=260,width=154,height=14,angle='-4',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=315, y=227,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=273, y=278,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Star.StarSprite(x=447, y=291,width=32,height=32))
lb.addObject(Hero.HeroSprite(x=23, y=24,width=32,height=32))
lb.render() | import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Beam.BeamSprite(x=147, y=261,width=306,height=14,angle='-4',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=23, y=297,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Beam.BeamSprite(x=238, y=165,width=201,height=14,angle='56',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=285, y=125,width=342,height=14,angle='56',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=451, y=260,width=154,height=14,angle='-4',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=315, y=227,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=273, y=278,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Star.StarSprite(x=447, y=291,width=32,height=32))
lb.addObject(Hero.HeroSprite(x=23, y=24,width=32,height=32))
lb.render() | none | 1 | 2.538557 | 3 | |
plots/phase_space_plots.py | fstrnad/pyDRLinWESM | 6 | 6613776 | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
from ays_general import __version__, __version_info__
import ays_model as aws
import ays_general
from pyviability import helper
import numpy as np
import scipy.integrate as integ
import scipy.optimize as opt
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as plt3d
import matplotlib.ticker as ticker
from matplotlib import animation
import warnings as warn
import heapq as hq
import operator as op
import argparse, argcomplete
import pickle
import functools as ft
green_fp=[0,1,1]
final_radius=0.1
brown_fp=[0.6,0.4,0]
def good_final_state(state):
a,y,s=state
if np.abs(a - green_fp[0]) < final_radius and np.abs(y - green_fp[1]) < final_radius and np.abs(s-green_fp[2])< final_radius:
return True
else:
return False
management_options=['default', 'LG' , 'ET','LG+ET' ]
management_actions=[(False, False), (True, False), (False, True), (True, True)]
def get_parameters(action_number=0):
"""
This function is needed to return the parameter set for the chosen management option.
Here the action numbers are really transformed to parameter lists, according to the chosen
management option.
Parameters:
-action: Number of the action in the actionset.
Can be transformed into: 'default', 'degrowth' ,'energy-transformation' or both DG and ET at the same time
"""
# AYS example from Kittel et al. 2017:
tau_A = 50
tau_S = 50
beta = 0.03
beta_LG = 0.015
eps = 147
theta = beta /(350) # beta / ( 950 - A_offset(=600) )
rho = 2.
sigma = 4e12
sigma_ET = sigma*0.5**(1/rho)
phi = 4.7e10
AYS0 = [240, 7e13, 5e11]
APB = 345
YSF = 4e13
if action_number < len(management_actions):
action=management_actions[action_number]
else:
print("ERROR! Management option is not available!" + str (action))
parameter_list=[(beta_LG if action[0] else beta ,
eps, phi, rho,
sigma_ET if action[1] else sigma,
tau_A, tau_S, theta)]
return parameter_list
def plot_phase_space(dynamic):
save_path='./images/phase_space_plots/phase_space_' + dynamic + '.pdf'
num = 400
shift_axis=(2400, 1e14, 1e12)
aws_0 = np.random.rand(num, 3)
#print(aws_0)
# a small hack to make all the parameters available as global variables
# aws.globalize_dictionary(aws.AWS_parameters, module=aws)
aws.globalize_dictionary(aws.grid_parameters, module=aws)
aws.globalize_dictionary(aws.boundary_parameters, module=aws)
# parameter_dict = aws.get_management_parameter_dict(dynamic, aws.AYS_parameters)
# parameter_list=[]
# parameter_list.append(helper.get_ordered_parameters(aws._AYS_rhs, parameter_dict))
# print(parameter_list)
#
parameter_list=get_parameters(management_options.index(dynamic))
print(parameter_list)
########################################
# prepare the integration
########################################
time = np.linspace(0, 300, 1000)
one_step=np.linspace(0,10,1000)
#formatters, locators=get_ticks()
colortop = "green"
colorbottom = "black"
fig, ax3d = ays_general.create_figure(A_mid=aws.A_mid, W_mid=aws.W_mid, S_mid=aws.S_mid)
#fig = plt.figure(figsize=(18,8))
#ax3d = plt3d.Axes3D(fig)
ax3d.view_init(ays_general.ELEVATION_FLOW, ays_general.AZIMUTH_FLOW)
#ax3d.view_init(elev=89, azim=270)
S_scale = 1e9
W_scale = 1e12
ax3d.set_xlabel("\n\nexcess atmospheric carbon\nstock A [GtC]", size=16)
ax3d.set_ylabel("\neconomic output Y [%1.0e USD/yr]"%W_scale, size=16)
ax3d.set_zlabel("\n\nrenewable knowledge\nstock S [%1.0e GJ]"%S_scale, size=16)
x0_test = [.9, 0.5, 0] # a, w, s
# management trajectory with degrowth:
# Here we get the hairy trajectories that are integrated via odeint
for i in range(num):
x0 = aws_0[i]
traj = integ.odeint(aws.AYS_rescaled_rhs, x0, time, args=parameter_list[0])
#print(traj[-1])
ax3d.plot3D(xs=traj[:,0], ys=traj[:,1], zs=traj[:,2],
color=colorbottom if traj[-1,2]<0.5 else colortop, alpha=.3)
# ax3d.scatter(*zip(traj[0]),color='grey')
# ax3d.scatter(traj[-1][0], traj[-1][1], traj[-1][2],
# color='green' if good_final_state(traj[-1])else 'red' , alpha=0.5)
#print(x0_test)
traj_one_step=integ.odeint(aws.AYS_rescaled_rhs, x0_test,one_step , args=parameter_list[0])
#traj_one_step=integ.odeint(aws.AYS_rescaled_rhs, green_fp,one_step , args=parameter_list[0])
ax3d.plot3D(xs=traj_one_step[:,0], ys=traj_one_step[:,1], zs=traj_one_step[:,2],
color='red', alpha=.3)
ays_general.add_boundary(ax3d,
sunny_boundaries=["planetary-boundary", "social-foundation"],
**aws.grid_parameters, **aws.boundary_parameters)
#ax3d.set_xlim(0, )
#ax3d.set_ylim(0, 10e13)
#ax3d.set_zlim(0, 1e12)
ax3d.grid(False)
plt.savefig(save_path)
plt.show()
if __name__ == "__main__":
plot_phase_space('default')
| #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
from ays_general import __version__, __version_info__
import ays_model as aws
import ays_general
from pyviability import helper
import numpy as np
import scipy.integrate as integ
import scipy.optimize as opt
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as plt3d
import matplotlib.ticker as ticker
from matplotlib import animation
import warnings as warn
import heapq as hq
import operator as op
import argparse, argcomplete
import pickle
import functools as ft
green_fp=[0,1,1]
final_radius=0.1
brown_fp=[0.6,0.4,0]
def good_final_state(state):
a,y,s=state
if np.abs(a - green_fp[0]) < final_radius and np.abs(y - green_fp[1]) < final_radius and np.abs(s-green_fp[2])< final_radius:
return True
else:
return False
management_options=['default', 'LG' , 'ET','LG+ET' ]
management_actions=[(False, False), (True, False), (False, True), (True, True)]
def get_parameters(action_number=0):
"""
This function is needed to return the parameter set for the chosen management option.
Here the action numbers are really transformed to parameter lists, according to the chosen
management option.
Parameters:
-action: Number of the action in the actionset.
Can be transformed into: 'default', 'degrowth' ,'energy-transformation' or both DG and ET at the same time
"""
# AYS example from Kittel et al. 2017:
tau_A = 50
tau_S = 50
beta = 0.03
beta_LG = 0.015
eps = 147
theta = beta /(350) # beta / ( 950 - A_offset(=600) )
rho = 2.
sigma = 4e12
sigma_ET = sigma*0.5**(1/rho)
phi = 4.7e10
AYS0 = [240, 7e13, 5e11]
APB = 345
YSF = 4e13
if action_number < len(management_actions):
action=management_actions[action_number]
else:
print("ERROR! Management option is not available!" + str (action))
parameter_list=[(beta_LG if action[0] else beta ,
eps, phi, rho,
sigma_ET if action[1] else sigma,
tau_A, tau_S, theta)]
return parameter_list
def plot_phase_space(dynamic):
save_path='./images/phase_space_plots/phase_space_' + dynamic + '.pdf'
num = 400
shift_axis=(2400, 1e14, 1e12)
aws_0 = np.random.rand(num, 3)
#print(aws_0)
# a small hack to make all the parameters available as global variables
# aws.globalize_dictionary(aws.AWS_parameters, module=aws)
aws.globalize_dictionary(aws.grid_parameters, module=aws)
aws.globalize_dictionary(aws.boundary_parameters, module=aws)
# parameter_dict = aws.get_management_parameter_dict(dynamic, aws.AYS_parameters)
# parameter_list=[]
# parameter_list.append(helper.get_ordered_parameters(aws._AYS_rhs, parameter_dict))
# print(parameter_list)
#
parameter_list=get_parameters(management_options.index(dynamic))
print(parameter_list)
########################################
# prepare the integration
########################################
time = np.linspace(0, 300, 1000)
one_step=np.linspace(0,10,1000)
#formatters, locators=get_ticks()
colortop = "green"
colorbottom = "black"
fig, ax3d = ays_general.create_figure(A_mid=aws.A_mid, W_mid=aws.W_mid, S_mid=aws.S_mid)
#fig = plt.figure(figsize=(18,8))
#ax3d = plt3d.Axes3D(fig)
ax3d.view_init(ays_general.ELEVATION_FLOW, ays_general.AZIMUTH_FLOW)
#ax3d.view_init(elev=89, azim=270)
S_scale = 1e9
W_scale = 1e12
ax3d.set_xlabel("\n\nexcess atmospheric carbon\nstock A [GtC]", size=16)
ax3d.set_ylabel("\neconomic output Y [%1.0e USD/yr]"%W_scale, size=16)
ax3d.set_zlabel("\n\nrenewable knowledge\nstock S [%1.0e GJ]"%S_scale, size=16)
x0_test = [.9, 0.5, 0] # a, w, s
# management trajectory with degrowth:
# Here we get the hairy trajectories that are integrated via odeint
for i in range(num):
x0 = aws_0[i]
traj = integ.odeint(aws.AYS_rescaled_rhs, x0, time, args=parameter_list[0])
#print(traj[-1])
ax3d.plot3D(xs=traj[:,0], ys=traj[:,1], zs=traj[:,2],
color=colorbottom if traj[-1,2]<0.5 else colortop, alpha=.3)
# ax3d.scatter(*zip(traj[0]),color='grey')
# ax3d.scatter(traj[-1][0], traj[-1][1], traj[-1][2],
# color='green' if good_final_state(traj[-1])else 'red' , alpha=0.5)
#print(x0_test)
traj_one_step=integ.odeint(aws.AYS_rescaled_rhs, x0_test,one_step , args=parameter_list[0])
#traj_one_step=integ.odeint(aws.AYS_rescaled_rhs, green_fp,one_step , args=parameter_list[0])
ax3d.plot3D(xs=traj_one_step[:,0], ys=traj_one_step[:,1], zs=traj_one_step[:,2],
color='red', alpha=.3)
ays_general.add_boundary(ax3d,
sunny_boundaries=["planetary-boundary", "social-foundation"],
**aws.grid_parameters, **aws.boundary_parameters)
#ax3d.set_xlim(0, )
#ax3d.set_ylim(0, 10e13)
#ax3d.set_zlim(0, 1e12)
ax3d.grid(False)
plt.savefig(save_path)
plt.show()
if __name__ == "__main__":
plot_phase_space('default')
| en | 0.353394 | #!/usr/bin/env python3 # PYTHON_ARGCOMPLETE_OK This function is needed to return the parameter set for the chosen management option. Here the action numbers are really transformed to parameter lists, according to the chosen management option. Parameters: -action: Number of the action in the actionset. Can be transformed into: 'default', 'degrowth' ,'energy-transformation' or both DG and ET at the same time # AYS example from Kittel et al. 2017: # beta / ( 950 - A_offset(=600) ) #print(aws_0) # a small hack to make all the parameters available as global variables # aws.globalize_dictionary(aws.AWS_parameters, module=aws) # parameter_dict = aws.get_management_parameter_dict(dynamic, aws.AYS_parameters) # parameter_list=[] # parameter_list.append(helper.get_ordered_parameters(aws._AYS_rhs, parameter_dict)) # print(parameter_list) # ######################################## # prepare the integration ######################################## #formatters, locators=get_ticks() #fig = plt.figure(figsize=(18,8)) #ax3d = plt3d.Axes3D(fig) #ax3d.view_init(elev=89, azim=270) # a, w, s # management trajectory with degrowth: # Here we get the hairy trajectories that are integrated via odeint #print(traj[-1]) # ax3d.scatter(*zip(traj[0]),color='grey') # ax3d.scatter(traj[-1][0], traj[-1][1], traj[-1][2], # color='green' if good_final_state(traj[-1])else 'red' , alpha=0.5) #print(x0_test) #traj_one_step=integ.odeint(aws.AYS_rescaled_rhs, green_fp,one_step , args=parameter_list[0]) #ax3d.set_xlim(0, ) #ax3d.set_ylim(0, 10e13) #ax3d.set_zlim(0, 1e12) | 2.510803 | 3 |
textmagic/rest/models/sessions.py | textmagic/textmagic-rest-python | 28 | 6613777 | <gh_stars>10-100
from . import Model, CollectionModel, Messages
class Session(Model):
"""
A Session object model
.. attribute:: id
.. attribute:: startTime
.. attribute:: text
.. attribute:: source
.. attribute:: referenceId
.. attribute:: price
.. attribute:: numbersCount
"""
class Sessions(CollectionModel):
name = "sessions"
searchable = False
instance = Session
def list(self, **kwargs):
"""
Returns a list of :class:`Session` objects and a pager dict.
:Example:
sessions, pager = client.sessions.list()
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
kwargs["search"] = False
return self.get_instances(kwargs)
def delete(self, uid):
"""
Delete the specified Session from Textmagic.
Returns True if success.
:Example:
client.sessions.delete(1901001)
:param int uid: The unique id of the Session. Required.
"""
return self.delete_instance(uid)
def messages(self, uid=0, **kwargs):
"""
Fetch messages by given session id.
An useful synonym for "messages/search" command with provided `sessionId` parameter.
Returns a list of :class:`Message` objects and a pager dict.
:Example:
messages = client.sessions.messages(1901001)
:param int uid: The unique id of the Session. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
messages = Messages(self.base_uri, self.auth)
return self.get_subresource_instances(uid, instance=messages,
resource="messages", params=kwargs) | from . import Model, CollectionModel, Messages
class Session(Model):
"""
A Session object model
.. attribute:: id
.. attribute:: startTime
.. attribute:: text
.. attribute:: source
.. attribute:: referenceId
.. attribute:: price
.. attribute:: numbersCount
"""
class Sessions(CollectionModel):
name = "sessions"
searchable = False
instance = Session
def list(self, **kwargs):
"""
Returns a list of :class:`Session` objects and a pager dict.
:Example:
sessions, pager = client.sessions.list()
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
kwargs["search"] = False
return self.get_instances(kwargs)
def delete(self, uid):
"""
Delete the specified Session from Textmagic.
Returns True if success.
:Example:
client.sessions.delete(1901001)
:param int uid: The unique id of the Session. Required.
"""
return self.delete_instance(uid)
def messages(self, uid=0, **kwargs):
"""
Fetch messages by given session id.
An useful synonym for "messages/search" command with provided `sessionId` parameter.
Returns a list of :class:`Message` objects and a pager dict.
:Example:
messages = client.sessions.messages(1901001)
:param int uid: The unique id of the Session. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
messages = Messages(self.base_uri, self.auth)
return self.get_subresource_instances(uid, instance=messages,
resource="messages", params=kwargs) | en | 0.422816 | A Session object model .. attribute:: id .. attribute:: startTime .. attribute:: text .. attribute:: source .. attribute:: referenceId .. attribute:: price .. attribute:: numbersCount Returns a list of :class:`Session` objects and a pager dict. :Example: sessions, pager = client.sessions.list() :param int page: Fetch specified results page. Default=1 :param int limit: How many results on page. Default=10 Delete the specified Session from Textmagic. Returns True if success. :Example: client.sessions.delete(1901001) :param int uid: The unique id of the Session. Required. Fetch messages by given session id. An useful synonym for "messages/search" command with provided `sessionId` parameter. Returns a list of :class:`Message` objects and a pager dict. :Example: messages = client.sessions.messages(1901001) :param int uid: The unique id of the Session. Required. :param int page: Fetch specified results page. Default=1 :param int limit: How many results on page. Default=10 | 2.764814 | 3 |
src/TheseNeedReformulating/PytorchToONNX.py | verivital/nnmvt | 4 | 6613778 | <reponame>verivital/nnmvt<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Neural Network Verification Model Translation Tool (NNVMT)
@author:
<NAME> (<EMAIL>)
"""
### Example ###
# from src.PytorchModels import SuperResolutionNet
# torch_model = SuperResolutionNet(upscale_factor = 3)
# PytorchPrinterCNN('https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth',
# 'C:\Users\manzand\AnacondaProjects\nnvmt\translated_networks\resolution.onnx',torch_model,[1,244,244])
#import os
#from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.onnx
#import torch.nn as nn
#import torch.nn.init as init
# Outside in the terminal, need to type the following steps
# >> python
# >> import model
# >> torch_model = model()
# >> exec(open('PytorchToONNX.py').read())
# >> PytorchPrinterCNN(Weights,OutputFile, torch_model,, InputSize,*names_of_inputs_and_outputs)
def PytorchPrinterCNN(WeightsFile, OutputFilePath, torch_model, input_size, batch_size = 1 ,inname = ['inputs'], outname = ['outputs']):
#get the name of the file without the end extension
#model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location))
torch_model.load_state_dict(model_zoo.load_url(WeightsFile, map_location = 'cpu'))
torch_model.train(False)
#batch_size = 1
# input size of the form [channels, height, widht]
x = torch.randn(batch_size,input_size[0], input_size[1], input_size[2], requires_grad=True)
# Export the model
torch.onnx.export(torch_model, # model being run
x, # model input (or a tuple for multiple inputs)
OutputFilePath, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
input_names = inname, # the model's input names
output_names = outname) # the model's output names
exit()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Neural Network Verification Model Translation Tool (NNVMT)
@author:
<NAME> (<EMAIL>)
"""
### Example ###
# from src.PytorchModels import SuperResolutionNet
# torch_model = SuperResolutionNet(upscale_factor = 3)
# PytorchPrinterCNN('https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth',
# 'C:\Users\manzand\AnacondaProjects\nnvmt\translated_networks\resolution.onnx',torch_model,[1,244,244])
#import os
#from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.onnx
#import torch.nn as nn
#import torch.nn.init as init
# Outside in the terminal, need to type the following steps
# >> python
# >> import model
# >> torch_model = model()
# >> exec(open('PytorchToONNX.py').read())
# >> PytorchPrinterCNN(Weights,OutputFile, torch_model,, InputSize,*names_of_inputs_and_outputs)
def PytorchPrinterCNN(WeightsFile, OutputFilePath, torch_model, input_size, batch_size = 1 ,inname = ['inputs'], outname = ['outputs']):
#get the name of the file without the end extension
#model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location))
torch_model.load_state_dict(model_zoo.load_url(WeightsFile, map_location = 'cpu'))
torch_model.train(False)
#batch_size = 1
# input size of the form [channels, height, widht]
x = torch.randn(batch_size,input_size[0], input_size[1], input_size[2], requires_grad=True)
# Export the model
torch.onnx.export(torch_model, # model being run
x, # model input (or a tuple for multiple inputs)
OutputFilePath, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
input_names = inname, # the model's input names
output_names = outname) # the model's output names
exit() | en | 0.526303 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Neural Network Verification Model Translation Tool (NNVMT) @author: <NAME> (<EMAIL>) ### Example ### # from src.PytorchModels import SuperResolutionNet # torch_model = SuperResolutionNet(upscale_factor = 3) # PytorchPrinterCNN('https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth', # 'C:\Users\manzand\AnacondaProjects\nnvmt\translated_networks\resolution.onnx',torch_model,[1,244,244]) #import os #from torch import nn #import torch.nn as nn #import torch.nn.init as init # Outside in the terminal, need to type the following steps # >> python # >> import model # >> torch_model = model() # >> exec(open('PytorchToONNX.py').read()) # >> PytorchPrinterCNN(Weights,OutputFile, torch_model,, InputSize,*names_of_inputs_and_outputs) #get the name of the file without the end extension #model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth' # torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location)) #batch_size = 1 # input size of the form [channels, height, widht] # Export the model # model being run # model input (or a tuple for multiple inputs) # where to save the model (can be a file or file-like object) # store the trained parameter weights inside the model file # the model's input names # the model's output names | 2.750269 | 3 |
API/data.py | Johan809/web_final | 3 | 6613779 | from datetime import datetime
from peewee import *
import uuid
db = SqliteDatabase('ITLAMED.db')
ZodiacalSigns = [
'aries', 'libra', 'tauro',
'escorpio', 'geminis', 'sagitario',
'cancer', 'capricornio', 'leo',
'acuario', 'virgo', 'piscis'
]
class DBModel(Model):
class Meta:
database = db
class Doctor(DBModel):
_id = AutoField()
name = CharField()
email = CharField()
password = CharField()
class Patient(DBModel):
_id = AutoField()
dr = ForeignKeyField(Doctor, backref='h_doctor')
id_card = CharField() # Cedula
photo = BlobField() # convertir la imagen en base64 y guardarla en la db
name = CharField()
lastname = CharField()
blood_type = CharField()
email = CharField()
gender = CharField()
b_date = DateField()
allergies = CharField()
class Consultation(DBModel):
_id = AutoField()
dr = ForeignKeyField(Doctor, backref='cons')
patient = ForeignKeyField(Patient, backref='patients')
date = DateField()
motive = CharField()
n_insurance = CharField()
p_amount = FloatField()
diagnosis = TextField()
note = TextField()
photo = BlobField() # lo mismo que la foto del paciente
class Sesion(DBModel):
_id = AutoField()
token = CharField()
user = ForeignKeyField(Doctor, backref='user')
def getOld(nacDate: str):
today = datetime.now()
birthday = datetime.strptime(nacDate, "%Y-%m-%d")
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))
def getSign(nacDate: datetime.date):
month = nacDate.month
day = nacDate.day
if ((day >= 21 and month == 3) or (day <= 20 and month == 4)):
sign = 0
elif ((day >= 24 and month == 9) or (day <= 23 and month == 10)):
sign = 1
elif ((day >= 21 and month == 4) or (day <= 21 and month == 5)):
sign = 2
elif ((day >= 24 and month == 10) or (day <= 22 and month == 11)):
sign = 3
elif ((day >= 22 and month == 5) or (day <= 21 and month == 6)):
sign = 4
elif ((day >= 23 and month == 11) or (day <= 21 and month == 12)):
sign = 5
elif ((day >= 21 and month == 6) or (day <= 23 and month == 7)):
sign = 6
elif ((day >= 22 and month == 12) or (day <= 20 and month == 1)):
sign = 7
elif ((day >= 24 and month == 7) or (day <= 23 and month == 8)):
sign = 8
elif ((day >= 21 and month == 1) or (day <= 19 and month == 2)):
sign = 9
elif ((day >= 24 and month == 8) or (day <= 23 and month == 9)):
sign = 10
elif ((day >= 20 and month == 2) or (day <= 20 and month == 3)):
sign = 11
return ZodiacalSigns[sign].capitalize()
def getDate(_date: str):
if "-" in _date:
withDash = int(_date.split('-')[0])
if withDash > 1000:
resultDate = datetime.strptime(_date, "%Y-%m-%d")
return resultDate
elif withDash <= 31:
resultDate = datetime.strptime(_date, "%d-%m-%Y")
return resultDate
elif "/" in _date:
withSlash = int(_date.split('/')[0])
if withSlash > 1000:
resultDate = datetime.strptime(_date, "%Y/%m/%d")
return resultDate
elif withSlash <= 31:
resultDate = datetime.strptime(_date, "%d/%m/%Y")
return resultDate
def generate_token():
return str(uuid.uuid4()).replace('-', '')
def serverAnswer(status: bool, msg: str, args={}):
_arg = False
if args != {}:
_arg = True
a = {'ok': status, 'msg': msg, 'arg': args}
b = {'ok': status, 'msg': msg}
return a if _arg else b
| from datetime import datetime
from peewee import *
import uuid
db = SqliteDatabase('ITLAMED.db')
ZodiacalSigns = [
'aries', 'libra', 'tauro',
'escorpio', 'geminis', 'sagitario',
'cancer', 'capricornio', 'leo',
'acuario', 'virgo', 'piscis'
]
class DBModel(Model):
class Meta:
database = db
class Doctor(DBModel):
_id = AutoField()
name = CharField()
email = CharField()
password = CharField()
class Patient(DBModel):
_id = AutoField()
dr = ForeignKeyField(Doctor, backref='h_doctor')
id_card = CharField() # Cedula
photo = BlobField() # convertir la imagen en base64 y guardarla en la db
name = CharField()
lastname = CharField()
blood_type = CharField()
email = CharField()
gender = CharField()
b_date = DateField()
allergies = CharField()
class Consultation(DBModel):
_id = AutoField()
dr = ForeignKeyField(Doctor, backref='cons')
patient = ForeignKeyField(Patient, backref='patients')
date = DateField()
motive = CharField()
n_insurance = CharField()
p_amount = FloatField()
diagnosis = TextField()
note = TextField()
photo = BlobField() # lo mismo que la foto del paciente
class Sesion(DBModel):
_id = AutoField()
token = CharField()
user = ForeignKeyField(Doctor, backref='user')
def getOld(nacDate: str):
today = datetime.now()
birthday = datetime.strptime(nacDate, "%Y-%m-%d")
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))
def getSign(nacDate: datetime.date):
month = nacDate.month
day = nacDate.day
if ((day >= 21 and month == 3) or (day <= 20 and month == 4)):
sign = 0
elif ((day >= 24 and month == 9) or (day <= 23 and month == 10)):
sign = 1
elif ((day >= 21 and month == 4) or (day <= 21 and month == 5)):
sign = 2
elif ((day >= 24 and month == 10) or (day <= 22 and month == 11)):
sign = 3
elif ((day >= 22 and month == 5) or (day <= 21 and month == 6)):
sign = 4
elif ((day >= 23 and month == 11) or (day <= 21 and month == 12)):
sign = 5
elif ((day >= 21 and month == 6) or (day <= 23 and month == 7)):
sign = 6
elif ((day >= 22 and month == 12) or (day <= 20 and month == 1)):
sign = 7
elif ((day >= 24 and month == 7) or (day <= 23 and month == 8)):
sign = 8
elif ((day >= 21 and month == 1) or (day <= 19 and month == 2)):
sign = 9
elif ((day >= 24 and month == 8) or (day <= 23 and month == 9)):
sign = 10
elif ((day >= 20 and month == 2) or (day <= 20 and month == 3)):
sign = 11
return ZodiacalSigns[sign].capitalize()
def getDate(_date: str):
if "-" in _date:
withDash = int(_date.split('-')[0])
if withDash > 1000:
resultDate = datetime.strptime(_date, "%Y-%m-%d")
return resultDate
elif withDash <= 31:
resultDate = datetime.strptime(_date, "%d-%m-%Y")
return resultDate
elif "/" in _date:
withSlash = int(_date.split('/')[0])
if withSlash > 1000:
resultDate = datetime.strptime(_date, "%Y/%m/%d")
return resultDate
elif withSlash <= 31:
resultDate = datetime.strptime(_date, "%d/%m/%Y")
return resultDate
def generate_token():
return str(uuid.uuid4()).replace('-', '')
def serverAnswer(status: bool, msg: str, args={}):
_arg = False
if args != {}:
_arg = True
a = {'ok': status, 'msg': msg, 'arg': args}
b = {'ok': status, 'msg': msg}
return a if _arg else b
| es | 0.953541 | # Cedula # convertir la imagen en base64 y guardarla en la db # lo mismo que la foto del paciente | 2.764265 | 3 |
asynapplicationinsights/channel/aiohttpchannel.py | RobertoPrevato/aioapplicationinsights | 2 | 6613780 | <reponame>RobertoPrevato/aioapplicationinsights
import aiohttp
import asyncio
from typing import Optional, List
from .abstractions import TelemetryChannel
from ..exceptions import OperationFailed
from ..utils.json import friendly_dumps
class AiohttpTelemetryChannel(TelemetryChannel):
def __init__(self,
loop:Optional[asyncio.AbstractEventLoop]=None,
client:Optional[aiohttp.ClientSession]=None,
endpoint:Optional[str]=None):
super().__init__()
dispose_client = True
if client is None:
if loop is None:
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession(loop=loop)
else:
dispose_client = False
if not endpoint:
endpoint = 'https://dc.services.visualstudio.com/v2/track'
self._dispose_client = dispose_client
self._http_client = client
self._endpoint = endpoint
self._headers = {'Accept': 'application/json', 'Content-Type': 'application/json; charset=utf-8'}
async def send(self, data: List):
body = friendly_dumps(data)
response = await self._http_client.post(self._endpoint,
data=body.encode('utf8'),
headers=self._headers)
if response.status != 200:
text = await response.text()
raise OperationFailed(f'Response status does not indicate success: {response.status}; response body: {text}')
async def dispose(self):
# NB: the client is disposed only if it was instantiated
if self._dispose_client:
await self._http_client.close()
| import aiohttp
import asyncio
from typing import Optional, List
from .abstractions import TelemetryChannel
from ..exceptions import OperationFailed
from ..utils.json import friendly_dumps
class AiohttpTelemetryChannel(TelemetryChannel):
def __init__(self,
loop:Optional[asyncio.AbstractEventLoop]=None,
client:Optional[aiohttp.ClientSession]=None,
endpoint:Optional[str]=None):
super().__init__()
dispose_client = True
if client is None:
if loop is None:
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession(loop=loop)
else:
dispose_client = False
if not endpoint:
endpoint = 'https://dc.services.visualstudio.com/v2/track'
self._dispose_client = dispose_client
self._http_client = client
self._endpoint = endpoint
self._headers = {'Accept': 'application/json', 'Content-Type': 'application/json; charset=utf-8'}
async def send(self, data: List):
body = friendly_dumps(data)
response = await self._http_client.post(self._endpoint,
data=body.encode('utf8'),
headers=self._headers)
if response.status != 200:
text = await response.text()
raise OperationFailed(f'Response status does not indicate success: {response.status}; response body: {text}')
async def dispose(self):
# NB: the client is disposed only if it was instantiated
if self._dispose_client:
await self._http_client.close() | en | 0.991026 | # NB: the client is disposed only if it was instantiated | 2.088466 | 2 |
py/illuminated/02_count-inversions.py | bmoretz/Daily-Coding-Problem | 1 | 6613781 | '''
This file contains all of the 100,000 integers between 1 and 100,000 (inclusive) in some order, with no integer repeated.
Your task is to compute the number of inversions in the file given, where the i^th row of the file indicates the i^th entry of an array.
Because of the large size of this array, you should implement the fast divide-and-conquer algorithm covered in the video lectures.
The numeric answer for the given input file should be typed in the space below.
So if your answer is 1198233847, then just type 1198233847 in the space provided without any space / commas / any other punctuation marks. You can make up to 5 attempts, and we'll use the best one for grading.
'''
from utility import get_test_file
file_path = get_test_file('count-inversions', 'IntegerArray.txt')
def to_array(str):
return [int(c) for c in str]
def read_numbers():
with open(file_path, 'r') as f:
lines = f.read().splitlines()
numbers = [int(line) for line in lines]
return numbers
def count_inversions(arr):
def ci(arr):
n = len(arr)
if n <= 1:
return (0, arr)
mid, inversions = n//2, 0
# parse out results from left call
left_inversions, left_set = ci(arr[:mid])
inversions += left_inversions
# parse results from right call
right_inversions, right_set = ci(arr[mid:])
inversions += right_inversions
# merge results
left_index, right_index = 0, 0
result = []
for index in range(n):
left = left_set[left_index] if left_index < len(left_set) else None
right = right_set[right_index] if right_index < len(right_set) else None
if not right or (left and left <= right):
result += [left]
left_index += 1
else:
result += [right]
right_index += 1
if left: inversions += len(left_set[left_index:])
return (inversions, result)
if arr == None:
return (0, None)
inversions, results = ci(arr)
return inversions
arr = read_numbers()
result = count_inversions(arr)
print(result) | '''
This file contains all of the 100,000 integers between 1 and 100,000 (inclusive) in some order, with no integer repeated.
Your task is to compute the number of inversions in the file given, where the i^th row of the file indicates the i^th entry of an array.
Because of the large size of this array, you should implement the fast divide-and-conquer algorithm covered in the video lectures.
The numeric answer for the given input file should be typed in the space below.
So if your answer is 1198233847, then just type 1198233847 in the space provided without any space / commas / any other punctuation marks. You can make up to 5 attempts, and we'll use the best one for grading.
'''
from utility import get_test_file
file_path = get_test_file('count-inversions', 'IntegerArray.txt')
def to_array(str):
return [int(c) for c in str]
def read_numbers():
with open(file_path, 'r') as f:
lines = f.read().splitlines()
numbers = [int(line) for line in lines]
return numbers
def count_inversions(arr):
def ci(arr):
n = len(arr)
if n <= 1:
return (0, arr)
mid, inversions = n//2, 0
# parse out results from left call
left_inversions, left_set = ci(arr[:mid])
inversions += left_inversions
# parse results from right call
right_inversions, right_set = ci(arr[mid:])
inversions += right_inversions
# merge results
left_index, right_index = 0, 0
result = []
for index in range(n):
left = left_set[left_index] if left_index < len(left_set) else None
right = right_set[right_index] if right_index < len(right_set) else None
if not right or (left and left <= right):
result += [left]
left_index += 1
else:
result += [right]
right_index += 1
if left: inversions += len(left_set[left_index:])
return (inversions, result)
if arr == None:
return (0, None)
inversions, results = ci(arr)
return inversions
arr = read_numbers()
result = count_inversions(arr)
print(result) | en | 0.875032 | This file contains all of the 100,000 integers between 1 and 100,000 (inclusive) in some order, with no integer repeated. Your task is to compute the number of inversions in the file given, where the i^th row of the file indicates the i^th entry of an array. Because of the large size of this array, you should implement the fast divide-and-conquer algorithm covered in the video lectures. The numeric answer for the given input file should be typed in the space below. So if your answer is 1198233847, then just type 1198233847 in the space provided without any space / commas / any other punctuation marks. You can make up to 5 attempts, and we'll use the best one for grading. # parse out results from left call # parse results from right call # merge results | 4.02022 | 4 |
datasets/CryptoPunks/CryptoPunks.py | AlgoveraAI/creations | 0 | 6613782 | <reponame>AlgoveraAI/creations<gh_stars>0
"""CryptoPunks Data Set"""
import datasets
_DATA_URL = "https://drive.google.com/file/d/1d01VQ1plsB8ZIO5VF0LKV2MxdNQjvoCW/view?usp=sharing"
class CryptoPunks(datasets.GeneratorBasedBuilder):
"""CryptoPunks Data Set"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_images",
version=datasets.Version("1.0.0", ""),
description="Plain image import of CryptoPunks Dataset",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"img": datasets.Array3D(shape=(24, 24, 3), dtype="uint8"),
"label": datasets.features.ClassLabel(
names=[
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
),
}
),
supervised_keys=("img", "label"),
homepage="https://www.cs.toronto.edu/~kriz/cifar.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "test"}
),
]
def _generate_examples(self, files, split):
"""This function returns the examples in the raw (text) form."""
if split == "train":
batches = ["data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5"]
if split == "test":
batches = ["test_batch"]
batches = [f"cifar-10-batches-py/{filename}" for filename in batches]
for path, fo in files:
if path in batches:
dict = pickle.load(fo, encoding="bytes")
labels = dict[b"labels"]
images = dict[b"data"]
for idx, _ in enumerate(images):
img_reshaped = np.transpose(np.reshape(images[idx], (3, 32, 32)), (1, 2, 0))
yield f"{path}_{idx}", {
"img": img_reshaped,
"label": labels[idx],
} | """CryptoPunks Data Set"""
import datasets
_DATA_URL = "https://drive.google.com/file/d/1d01VQ1plsB8ZIO5VF0LKV2MxdNQjvoCW/view?usp=sharing"
class CryptoPunks(datasets.GeneratorBasedBuilder):
"""CryptoPunks Data Set"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_images",
version=datasets.Version("1.0.0", ""),
description="Plain image import of CryptoPunks Dataset",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"img": datasets.Array3D(shape=(24, 24, 3), dtype="uint8"),
"label": datasets.features.ClassLabel(
names=[
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
),
}
),
supervised_keys=("img", "label"),
homepage="https://www.cs.toronto.edu/~kriz/cifar.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "test"}
),
]
def _generate_examples(self, files, split):
"""This function returns the examples in the raw (text) form."""
if split == "train":
batches = ["data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5"]
if split == "test":
batches = ["test_batch"]
batches = [f"cifar-10-batches-py/{filename}" for filename in batches]
for path, fo in files:
if path in batches:
dict = pickle.load(fo, encoding="bytes")
labels = dict[b"labels"]
images = dict[b"data"]
for idx, _ in enumerate(images):
img_reshaped = np.transpose(np.reshape(images[idx], (3, 32, 32)), (1, 2, 0))
yield f"{path}_{idx}", {
"img": img_reshaped,
"label": labels[idx],
} | en | 0.316143 | CryptoPunks Data Set CryptoPunks Data Set This function returns the examples in the raw (text) form. | 2.822002 | 3 |
tests/django_restframework_gis_tests/migrations/0003_schema_models.py | ioionu/django-rest-framework-gis | 582 | 6613783 | # Generated by Django 3.0.4 on 2020-04-03 06:52
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_restframework_gis_tests", "0002_nullable"),
]
operations = [
migrations.CreateModel(
name="LineStringModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.LineStringField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiLineStringModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiPointModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.MultiPointField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiPolygonModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"polygon",
django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="PointModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
("location", django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={"abstract": False},
),
migrations.CreateModel(
name="PolygonModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"polygon",
django.contrib.gis.db.models.fields.PolygonField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="GeometryModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.GeometryField(srid=4326),
),
],
options={"abstract": False},
),
]
| # Generated by Django 3.0.4 on 2020-04-03 06:52
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_restframework_gis_tests", "0002_nullable"),
]
operations = [
migrations.CreateModel(
name="LineStringModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.LineStringField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiLineStringModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiPointModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.MultiPointField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiPolygonModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"polygon",
django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="PointModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
("location", django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={"abstract": False},
),
migrations.CreateModel(
name="PolygonModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"polygon",
django.contrib.gis.db.models.fields.PolygonField(srid=4326),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="GeometryModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("random_field1", models.CharField(max_length=32)),
("random_field2", models.IntegerField()),
(
"points",
django.contrib.gis.db.models.fields.GeometryField(srid=4326),
),
],
options={"abstract": False},
),
]
| en | 0.850008 | # Generated by Django 3.0.4 on 2020-04-03 06:52 | 1.697251 | 2 |
test/test_KewordsExtractor.py | GulnaraSh/Knowledge-mining-python | 0 | 6613784 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Unit tests for the knowmine.KeywordsExtractor module.
"""
from knowmine.KeywordsExtractor import ExtractKeywords
def test_ExtractKeywords():
sentences = ['The European Union established the regulation'
+ 'registration, evaluation, authorization and restriction of'
+ 'chemicals (REACH) to protect human health and the'
+ 'environment from hazards of industrial chemicals.',
'Industry has to register all substances produced in or'
+ 'imported into the EU at rate �1 t/y.',
'Physicochemical, (eco) toxicological and exposure-relevant'
+ 'information have to be supplied for registration depending'
+ 'on the production rate.',
'For environmental assessment of chemicals to be registered'
+ ' by 2018 (1e100 t/y) at least the following information'
+ 'has to be presented: - Acute toxicity to algae, daphniand'
+ 'fish (the latter only for substances >10 t/y)'
+ 'Octanol/water partition coefficient (log KOW)'
+ 'Water solubility (SW) - Biodegradability '
+ 'This information is used to decide whether substance'
+ 'has to be classified as hazardous to the aquatic'
+ 'environment and labelled according to the CLP regulation'
+ '(European Commission, 2009).',
'For substances >10 t/y chemical safety assessment (CSA)'
+ 'has to be performed, including the derivation of the'
+ 'predicted environmental concentration (PEC) as well as'
+ 'the predicted no effect concentration (PNEC) and'
+ 'the assessment of (very) persistent,'
+ '(very) bioaccumulative and toxic (PBT/vPvB) properties.']
keywords = ExtractKeywords(sentences)
assert len(keywords) == 5
assert len(keywords[0]) != 0
| # -*- coding: utf-8 -*-
"""
Unit tests for the knowmine.KeywordsExtractor module.
"""
from knowmine.KeywordsExtractor import ExtractKeywords
def test_ExtractKeywords():
sentences = ['The European Union established the regulation'
+ 'registration, evaluation, authorization and restriction of'
+ 'chemicals (REACH) to protect human health and the'
+ 'environment from hazards of industrial chemicals.',
'Industry has to register all substances produced in or'
+ 'imported into the EU at rate �1 t/y.',
'Physicochemical, (eco) toxicological and exposure-relevant'
+ 'information have to be supplied for registration depending'
+ 'on the production rate.',
'For environmental assessment of chemicals to be registered'
+ ' by 2018 (1e100 t/y) at least the following information'
+ 'has to be presented: - Acute toxicity to algae, daphniand'
+ 'fish (the latter only for substances >10 t/y)'
+ 'Octanol/water partition coefficient (log KOW)'
+ 'Water solubility (SW) - Biodegradability '
+ 'This information is used to decide whether substance'
+ 'has to be classified as hazardous to the aquatic'
+ 'environment and labelled according to the CLP regulation'
+ '(European Commission, 2009).',
'For substances >10 t/y chemical safety assessment (CSA)'
+ 'has to be performed, including the derivation of the'
+ 'predicted environmental concentration (PEC) as well as'
+ 'the predicted no effect concentration (PNEC) and'
+ 'the assessment of (very) persistent,'
+ '(very) bioaccumulative and toxic (PBT/vPvB) properties.']
keywords = ExtractKeywords(sentences)
assert len(keywords) == 5
assert len(keywords[0]) != 0 | en | 0.748024 | # -*- coding: utf-8 -*- Unit tests for the knowmine.KeywordsExtractor module. | 3.104813 | 3 |
src/scripts/extract_all_uniprot.py | pmoris/host-pathogen-ppi-fim | 1 | 6613785 | <reponame>pmoris/host-pathogen-ppi-fim
"""Script to extract all identifiers from a set of PPI files and convert them
to UniProt ACs.
NOTE: the local mapping option cannot discern between reviewed and unreviewed
entries!
"""
import argparse
import numpy as np
import pandas as pd
import sys
from pathlib import Path
from phppipy.ppi_tools import id_mapper
from phppipy.ppi_tools import ppi_import
from phppipy.ppi_tools import ppi_filter
parser = argparse.ArgumentParser(
description='Script to extract all identifiers from a set of PPI files and convert them to UniProt ACs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-i',
'--input',
dest='input',
type=str,
required=True,
help='Directory with PPI interaction files.')
parser.add_argument(
'-m',
'--mapping',
dest='mapping',
type=str,
required=False,
help='Full mapping file from EBI GOA. Omitting this will default to the online UniProt mapping service.')
parser.add_argument(
'-o',
'--output',
dest='output',
type=str,
required=True,
help='Output directory. File is saved as "all_identifiers.txt"')
args = parser.parse_args()
# Import PPI files
input_dir = Path(args.input)
mitab_files = input_dir.glob('*.mitab')
ppi_df_list = [ppi_import.read_mi_tab(i) for i in mitab_files if i.is_file()]
phisto_files = input_dir.glob('phi*.csv')
mi_file = input_dir / 'mi.obo'
ppi_df_list.extend(
[ppi_import.read_mitab_phisto(i, mi_file) for i in phisto_files])
# Merge PPI datasets
for i in ppi_df_list:
i['origin'] = i.name
ppi_df = pd.concat(ppi_df_list, axis=0, join='outer', ignore_index=True, sort=True)
# remap to UniProt AC
id_mapper.check_unique_identifier(ppi_df)
out_mappings_dir = Path(args.output) / 'mapping'
if args.mapping:
full_mapping_path = Path(args.mapping)
else:
full_mapping_path = None
try:
out_mappings_dir.mkdir(parents=True, exist_ok=False)
# omitting args.mapping defaults it to None, which prompts the map2uniprot function to use the online service
id_mapper.map2uniprot(ppi_df, out_mappings_dir, reviewed_only=True, full_mapping_file=full_mapping_path)
except FileExistsError:
print(
'Warning: supplied output directory already contains a "mapping" directory, aborting operation.'
)
sys.exit(1)
# remove multiple mappings
ppi_df = id_mapper.remove_mult(ppi_df)
# TODO: save these somewhere or deal with them in a way
# create unique identifier by combining xrefs
ppi_filter.unique_identifier(ppi_df)
# remove duplicates
ppi_df = ppi_df.drop_duplicates(subset=['xref_partners_sorted'], keep='first')
ppi_df = ppi_df.reset_index(drop=True)
# remove non-UniProt identifiers
ppi_df = ppi_df.loc[(ppi_df.xref_A.str.contains('uniprot'))
& (ppi_df.xref_B.str.contains('uniprot'))]
ppi_df = ppi_df.reset_index(drop=True)
# save protein list for filtering gaf/interpro files
all_identifiers = pd.Series(
pd.unique(ppi_df[['xref_A',
'xref_B']].values.ravel('K'))).str.split(':').str.get(1)
out_identifiers = Path(args.output) / 'all_identifiers.txt'
out_identifiers.parent.mkdir(parents=True, exist_ok=True)
with out_identifiers.open('w') as out:
for i in all_identifiers:
out.write("{}\n".format(i))
print('Saved list of all UniProtACs to {}'.format(out_identifiers))
| """Script to extract all identifiers from a set of PPI files and convert them
to UniProt ACs.
NOTE: the local mapping option cannot discern between reviewed and unreviewed
entries!
"""
import argparse
import numpy as np
import pandas as pd
import sys
from pathlib import Path
from phppipy.ppi_tools import id_mapper
from phppipy.ppi_tools import ppi_import
from phppipy.ppi_tools import ppi_filter
parser = argparse.ArgumentParser(
description='Script to extract all identifiers from a set of PPI files and convert them to UniProt ACs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-i',
'--input',
dest='input',
type=str,
required=True,
help='Directory with PPI interaction files.')
parser.add_argument(
'-m',
'--mapping',
dest='mapping',
type=str,
required=False,
help='Full mapping file from EBI GOA. Omitting this will default to the online UniProt mapping service.')
parser.add_argument(
'-o',
'--output',
dest='output',
type=str,
required=True,
help='Output directory. File is saved as "all_identifiers.txt"')
args = parser.parse_args()
# Import PPI files
input_dir = Path(args.input)
mitab_files = input_dir.glob('*.mitab')
ppi_df_list = [ppi_import.read_mi_tab(i) for i in mitab_files if i.is_file()]
phisto_files = input_dir.glob('phi*.csv')
mi_file = input_dir / 'mi.obo'
ppi_df_list.extend(
[ppi_import.read_mitab_phisto(i, mi_file) for i in phisto_files])
# Merge PPI datasets
for i in ppi_df_list:
i['origin'] = i.name
ppi_df = pd.concat(ppi_df_list, axis=0, join='outer', ignore_index=True, sort=True)
# remap to UniProt AC
id_mapper.check_unique_identifier(ppi_df)
out_mappings_dir = Path(args.output) / 'mapping'
if args.mapping:
full_mapping_path = Path(args.mapping)
else:
full_mapping_path = None
try:
out_mappings_dir.mkdir(parents=True, exist_ok=False)
# omitting args.mapping defaults it to None, which prompts the map2uniprot function to use the online service
id_mapper.map2uniprot(ppi_df, out_mappings_dir, reviewed_only=True, full_mapping_file=full_mapping_path)
except FileExistsError:
print(
'Warning: supplied output directory already contains a "mapping" directory, aborting operation.'
)
sys.exit(1)
# remove multiple mappings
ppi_df = id_mapper.remove_mult(ppi_df)
# TODO: save these somewhere or deal with them in a way
# create unique identifier by combining xrefs
ppi_filter.unique_identifier(ppi_df)
# remove duplicates
ppi_df = ppi_df.drop_duplicates(subset=['xref_partners_sorted'], keep='first')
ppi_df = ppi_df.reset_index(drop=True)
# remove non-UniProt identifiers
ppi_df = ppi_df.loc[(ppi_df.xref_A.str.contains('uniprot'))
& (ppi_df.xref_B.str.contains('uniprot'))]
ppi_df = ppi_df.reset_index(drop=True)
# save protein list for filtering gaf/interpro files
all_identifiers = pd.Series(
pd.unique(ppi_df[['xref_A',
'xref_B']].values.ravel('K'))).str.split(':').str.get(1)
out_identifiers = Path(args.output) / 'all_identifiers.txt'
out_identifiers.parent.mkdir(parents=True, exist_ok=True)
with out_identifiers.open('w') as out:
for i in all_identifiers:
out.write("{}\n".format(i))
print('Saved list of all UniProtACs to {}'.format(out_identifiers)) | en | 0.708408 | Script to extract all identifiers from a set of PPI files and convert them to UniProt ACs. NOTE: the local mapping option cannot discern between reviewed and unreviewed entries! # Import PPI files # Merge PPI datasets # remap to UniProt AC # omitting args.mapping defaults it to None, which prompts the map2uniprot function to use the online service # remove multiple mappings # TODO: save these somewhere or deal with them in a way # create unique identifier by combining xrefs # remove duplicates # remove non-UniProt identifiers # save protein list for filtering gaf/interpro files | 2.802952 | 3 |
hyperspeed/human.py | bovesan/mistika-hyperspeed | 3 | 6613786 | <filename>hyperspeed/human.py
#!/usr/bin/env python
from datetime import datetime
def size(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
def reltime(d):
if type(d) != datetime:
d = datetime.fromtimestamp(d)
diff = datetime.now() - d
s = diff.seconds
if diff.days > 7 or diff.days < 0:
return d.strftime('%d %b %y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '%i days ago' % diff.days
elif s <= 1:
return 'just now'
elif s < 60:
return '%i seconds ago' % s
elif s < 120:
return '1 minute ago'
elif s < 3600:
return '%i minutes ago' % (s/60)
elif s < 7200:
return '1 hour ago'
else:
return '%i hours ago' % (s/3600)
def time(d):
return '%s, %s' % (datetime.fromtimestamp(d).strftime("%H:%M"), reltime(d))
def time_of_day(d):
return datetime.fromtimestamp(d).strftime("%H:%M:%S")
def duration(s):
parts = []
units = [
(1, 'second', 'seconds'),
(60, 'minute', 'minutes'),
(60*60, 'hour', 'hours'),
(24*60*60, 'day', 'days'),
]
while len(units) > 0:
unit_size, unit_singular, unit_plural = units.pop()
unit_count = s // unit_size
if unit_size == 1:
parts.append('%3.1f %s' % (s, unit_plural))
elif unit_count == 1:
parts.append('%i %s' % (unit_count, unit_singular))
elif unit_count > 1:
parts.append('%i %s' % (unit_count, unit_plural))
s = s - unit_count * unit_size
return ', '.join(parts) | <filename>hyperspeed/human.py
#!/usr/bin/env python
from datetime import datetime
def size(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
def reltime(d):
if type(d) != datetime:
d = datetime.fromtimestamp(d)
diff = datetime.now() - d
s = diff.seconds
if diff.days > 7 or diff.days < 0:
return d.strftime('%d %b %y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '%i days ago' % diff.days
elif s <= 1:
return 'just now'
elif s < 60:
return '%i seconds ago' % s
elif s < 120:
return '1 minute ago'
elif s < 3600:
return '%i minutes ago' % (s/60)
elif s < 7200:
return '1 hour ago'
else:
return '%i hours ago' % (s/3600)
def time(d):
return '%s, %s' % (datetime.fromtimestamp(d).strftime("%H:%M"), reltime(d))
def time_of_day(d):
return datetime.fromtimestamp(d).strftime("%H:%M:%S")
def duration(s):
parts = []
units = [
(1, 'second', 'seconds'),
(60, 'minute', 'minutes'),
(60*60, 'hour', 'hours'),
(24*60*60, 'day', 'days'),
]
while len(units) > 0:
unit_size, unit_singular, unit_plural = units.pop()
unit_count = s // unit_size
if unit_size == 1:
parts.append('%3.1f %s' % (s, unit_plural))
elif unit_count == 1:
parts.append('%i %s' % (unit_count, unit_singular))
elif unit_count > 1:
parts.append('%i %s' % (unit_count, unit_plural))
s = s - unit_count * unit_size
return ', '.join(parts) | ru | 0.26433 | #!/usr/bin/env python | 3.172595 | 3 |
pyleecan/Methods/Slot/SlotUD/set_from_point_list.py | IrakozeFD/pyleecan | 95 | 6613787 | <reponame>IrakozeFD/pyleecan
# -*- coding: utf-8 -*-
from ....Classes.Arc1 import Arc1
from ....Classes.Arc3 import Arc3
from ....Classes.Segment import Segment
def set_from_point_list(self, point_list, is_sym=False):
"""Set the line_list from a point list (connected by Segments)
Parameters
----------
self : SlotUD
A SlotUD object
point_list : [complex]
List of complex coordinates
is_sym : bool
True to duplicate the point by symmetries
"""
# Apply symmetry if needed
Z_list = point_list.copy()
if is_sym:
for point in point_list[::-1]:
Z_list.append(point.conjugate())
# Creation of curve
line_list = list()
for ii in range(len(Z_list) - 1):
line_list.append(Segment(Z_list[ii], Z_list[ii + 1]))
self.line_list = line_list
| # -*- coding: utf-8 -*-
from ....Classes.Arc1 import Arc1
from ....Classes.Arc3 import Arc3
from ....Classes.Segment import Segment
def set_from_point_list(self, point_list, is_sym=False):
"""Set the line_list from a point list (connected by Segments)
Parameters
----------
self : SlotUD
A SlotUD object
point_list : [complex]
List of complex coordinates
is_sym : bool
True to duplicate the point by symmetries
"""
# Apply symmetry if needed
Z_list = point_list.copy()
if is_sym:
for point in point_list[::-1]:
Z_list.append(point.conjugate())
# Creation of curve
line_list = list()
for ii in range(len(Z_list) - 1):
line_list.append(Segment(Z_list[ii], Z_list[ii + 1]))
self.line_list = line_list | en | 0.645422 | # -*- coding: utf-8 -*- Set the line_list from a point list (connected by Segments) Parameters ---------- self : SlotUD A SlotUD object point_list : [complex] List of complex coordinates is_sym : bool True to duplicate the point by symmetries # Apply symmetry if needed # Creation of curve | 3.266722 | 3 |
models/movingMNISTExplorer.py | Micky774/NewRepo | 2 | 6613788 | <filename>models/movingMNISTExplorer.py
import numpy as np
import imageio
import argparse
import torch
from math import ceil
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
from sklearn.preprocessing import normalize
"""
Loads video data from the observation in the moving MNIST dataset corresponding to given index
Saves the data as an mp4 named by the provided filename
Also constructs a Pytorch Dataset from the moving MNIST data
Splits the dataset into training and testing sets and constructs loaders for them
Uses imageio. Requires installation of ffmpeg.
@author <NAME>
"""
#Parses user arguments
parser = argparse.ArgumentParser(description='Moving MNIST Explorer')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--testSplit', type=float, default=.2, metavar='%',
help='portion of dataset to test on (default: .2)')
parser.add_argument('--index', type=int, default=-1, metavar='i',
help = 'index from 0 to 9999 in moving mnist dataset from which to generate a video')
parser.add_argument('--filename', type=str, default='', metavar='F',
help = 'name of file to which the video should be saved')
parser.add_argument('--source', type=str, default='../data/mnist_test_seq.npy', metavar='S',
help = 'path to moving MNIST dataset (default: \'../data/mnist_test_seq.npy\')')
args = parser.parse_args()
def genLoaders(batch_size=128, no_cuda=False, seed=1, testSplit=.2, index=-1, filename='', source='../data/mnist_test_seq.npy'):
cuda = not no_cuda and torch.cuda.is_available()
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
torch.manual_seed(seed)
#Loads moving MNIST dataset
mnist = np.load(source)
#Saves video as mp4 or raises Exception if invalid index and filename are provided
if(index >= 0 and index < 10000):
if(filename != ''):
imageio.mimwrite(filename, mnist[:,index], fps=10)
else:
raise Exception('filename must be defined')
elif(filename != ''):
raise Exception('valid index between 0 and 9999 must be defined')
mnist = mnist / 255.0
#movingMNISTDataset class
class movingMNISTDataset(Dataset):
"""
Initializes dataset
@param npArray (numpy.array): moving MNIST dataset
@param transform(callable, optional): Optional transform to be applied on a sample.
"""
def __init__(self, npArray, transform=None):
self.npArray = npArray
self.transform = transform
"""
Gets number of observations in dataset
@return number of observations
"""
def __len__(self):
return (self.npArray.shape)[1]
"""
Gets the observation at a given index
@param index (int): index corresponding to observation which is to be returned
@return Tensor observation corresponding to given index
"""
def __getitem__(self, index):
obs = self.npArray[:,index]
if self.transform:
obs = self.transform(obs)
return obs
#Constructs Pytorch Dataset from moving MNIST data
data = movingMNISTDataset(npArray=mnist, transform=transforms.ToTensor())
length = data.__len__()
print(data.__getitem__(1).shape)
#Splits data into training and testing data
if(testSplit <= 0 or testSplit >= 1):
raise Exception('testSplit must be between 0 and 1 (exclusively)')
testSize = ceil(testSplit * length)
trainSize = length - testSize
trainSet, testSet = random_split(data, [trainSize, testSize])
#Constructs DataLoaders for training and testing data
train_loader = DataLoader(trainSet, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(testSet, batch_size=batch_size, shuffle=True, **kwargs)
return train_loader, test_loader
genLoaders(args.batch_size, args.no_cuda, args.seed, args.testSplit, args.index, args.filename, args.source) | <filename>models/movingMNISTExplorer.py
import numpy as np
import imageio
import argparse
import torch
from math import ceil
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
from sklearn.preprocessing import normalize
"""
Loads video data from the observation in the moving MNIST dataset corresponding to given index
Saves the data as an mp4 named by the provided filename
Also constructs a Pytorch Dataset from the moving MNIST data
Splits the dataset into training and testing sets and constructs loaders for them
Uses imageio. Requires installation of ffmpeg.
@author <NAME>
"""
#Parses user arguments
parser = argparse.ArgumentParser(description='Moving MNIST Explorer')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--testSplit', type=float, default=.2, metavar='%',
help='portion of dataset to test on (default: .2)')
parser.add_argument('--index', type=int, default=-1, metavar='i',
help = 'index from 0 to 9999 in moving mnist dataset from which to generate a video')
parser.add_argument('--filename', type=str, default='', metavar='F',
help = 'name of file to which the video should be saved')
parser.add_argument('--source', type=str, default='../data/mnist_test_seq.npy', metavar='S',
help = 'path to moving MNIST dataset (default: \'../data/mnist_test_seq.npy\')')
args = parser.parse_args()
def genLoaders(batch_size=128, no_cuda=False, seed=1, testSplit=.2, index=-1, filename='', source='../data/mnist_test_seq.npy'):
cuda = not no_cuda and torch.cuda.is_available()
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
torch.manual_seed(seed)
#Loads moving MNIST dataset
mnist = np.load(source)
#Saves video as mp4 or raises Exception if invalid index and filename are provided
if(index >= 0 and index < 10000):
if(filename != ''):
imageio.mimwrite(filename, mnist[:,index], fps=10)
else:
raise Exception('filename must be defined')
elif(filename != ''):
raise Exception('valid index between 0 and 9999 must be defined')
mnist = mnist / 255.0
#movingMNISTDataset class
class movingMNISTDataset(Dataset):
"""
Initializes dataset
@param npArray (numpy.array): moving MNIST dataset
@param transform(callable, optional): Optional transform to be applied on a sample.
"""
def __init__(self, npArray, transform=None):
self.npArray = npArray
self.transform = transform
"""
Gets number of observations in dataset
@return number of observations
"""
def __len__(self):
return (self.npArray.shape)[1]
"""
Gets the observation at a given index
@param index (int): index corresponding to observation which is to be returned
@return Tensor observation corresponding to given index
"""
def __getitem__(self, index):
obs = self.npArray[:,index]
if self.transform:
obs = self.transform(obs)
return obs
#Constructs Pytorch Dataset from moving MNIST data
data = movingMNISTDataset(npArray=mnist, transform=transforms.ToTensor())
length = data.__len__()
print(data.__getitem__(1).shape)
#Splits data into training and testing data
if(testSplit <= 0 or testSplit >= 1):
raise Exception('testSplit must be between 0 and 1 (exclusively)')
testSize = ceil(testSplit * length)
trainSize = length - testSize
trainSet, testSet = random_split(data, [trainSize, testSize])
#Constructs DataLoaders for training and testing data
train_loader = DataLoader(trainSet, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(testSet, batch_size=batch_size, shuffle=True, **kwargs)
return train_loader, test_loader
genLoaders(args.batch_size, args.no_cuda, args.seed, args.testSplit, args.index, args.filename, args.source) | en | 0.741925 | Loads video data from the observation in the moving MNIST dataset corresponding to given index Saves the data as an mp4 named by the provided filename Also constructs a Pytorch Dataset from the moving MNIST data Splits the dataset into training and testing sets and constructs loaders for them Uses imageio. Requires installation of ffmpeg. @author <NAME> #Parses user arguments #Loads moving MNIST dataset #Saves video as mp4 or raises Exception if invalid index and filename are provided #movingMNISTDataset class Initializes dataset @param npArray (numpy.array): moving MNIST dataset @param transform(callable, optional): Optional transform to be applied on a sample. Gets number of observations in dataset @return number of observations Gets the observation at a given index @param index (int): index corresponding to observation which is to be returned @return Tensor observation corresponding to given index #Constructs Pytorch Dataset from moving MNIST data #Splits data into training and testing data #Constructs DataLoaders for training and testing data | 2.680071 | 3 |
download.py | ryanwhowe/GeoNames | 0 | 6613789 | <gh_stars>0
import os
from datetime import datetime, timedelta
import wget
from tqdm import tqdm
from GeoNames import GeoNames, dump
def check_create_directory(directory):
if not os.path.isdir(directory):
# todo add some logic to check for write permissions before attempting to create dir
os.mkdir(directory)
def pull_file(sourcefile, desitinationfile):
if not os.path.isfile(desitinationfile):
# wget was the fastest http download implementation I was able to find, tried urllib3 & responses
# Suppress the default download progress bar which overwrites the overall progress bar from tqdm
wget.download(sourcefile, desitinationfile, bar=None)
def main():
currentdir = os.path.dirname(os.path.realpath(__file__))
downloads = GeoNames.get_dir(currentdir, 'downloads')
check_create_directory(downloads)
total = len(dump.FILES) + len(dump.INCREMENTAL)
with tqdm(total=total) as pbar:
# first process the static files, if present we skip them and move onto incremental files
for file in dump.FILES:
sourcefilename = dump.URL + file
destinationfilename = downloads + file
pull_file(sourcefilename, destinationfilename)
pbar.update(1)
for file in dump.INCREMENTAL:
# construct the filename from todays date
sourcefilename = dump.URL + file + (datetime.today() - timedelta(1)).strftime('%Y-%m-%d') + '.txt'
destinationfilename = downloads + file + (datetime.today() - timedelta(1)).strftime('%Y-%m-%d') + '.txt'
pull_file(sourcefilename, destinationfilename)
pbar.update(1)
main()
| import os
from datetime import datetime, timedelta
import wget
from tqdm import tqdm
from GeoNames import GeoNames, dump
def check_create_directory(directory):
if not os.path.isdir(directory):
# todo add some logic to check for write permissions before attempting to create dir
os.mkdir(directory)
def pull_file(sourcefile, desitinationfile):
if not os.path.isfile(desitinationfile):
# wget was the fastest http download implementation I was able to find, tried urllib3 & responses
# Suppress the default download progress bar which overwrites the overall progress bar from tqdm
wget.download(sourcefile, desitinationfile, bar=None)
def main():
currentdir = os.path.dirname(os.path.realpath(__file__))
downloads = GeoNames.get_dir(currentdir, 'downloads')
check_create_directory(downloads)
total = len(dump.FILES) + len(dump.INCREMENTAL)
with tqdm(total=total) as pbar:
# first process the static files, if present we skip them and move onto incremental files
for file in dump.FILES:
sourcefilename = dump.URL + file
destinationfilename = downloads + file
pull_file(sourcefilename, destinationfilename)
pbar.update(1)
for file in dump.INCREMENTAL:
# construct the filename from todays date
sourcefilename = dump.URL + file + (datetime.today() - timedelta(1)).strftime('%Y-%m-%d') + '.txt'
destinationfilename = downloads + file + (datetime.today() - timedelta(1)).strftime('%Y-%m-%d') + '.txt'
pull_file(sourcefilename, destinationfilename)
pbar.update(1)
main() | en | 0.874321 | # todo add some logic to check for write permissions before attempting to create dir # wget was the fastest http download implementation I was able to find, tried urllib3 & responses # Suppress the default download progress bar which overwrites the overall progress bar from tqdm # first process the static files, if present we skip them and move onto incremental files # construct the filename from todays date | 2.927466 | 3 |
web_applications/e_commerce/store/migrations/0001_initial.py | Had96dad/Python | 0 | 6613790 | # Generated by Django 3.2 on 2021-05-02 23:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import media.media
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_complete', models.BooleanField(default=False)),
('note', models.CharField(blank=True, max_length=255, null=True)),
('date_completed', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=100, null=True)),
('state', models.CharField(blank=True, max_length=100, null=True)),
('zipcode', models.PositiveSmallIntegerField(blank=True, null=True)),
('related_order_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.order')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, default='e_commerce/product.png', null=True, upload_to=media.media.image_location_product)),
('name', models.CharField(max_length=100)),
('price', models.FloatField()),
('is_digital', models.BooleanField(blank=True, default=False, null=True)),
('description', models.TextField(blank=True, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(blank=True, to='store.Tag')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveSmallIntegerField(blank=True, default=0, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
('related_order_number', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.order')),
],
),
]
| # Generated by Django 3.2 on 2021-05-02 23:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import media.media
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_complete', models.BooleanField(default=False)),
('note', models.CharField(blank=True, max_length=255, null=True)),
('date_completed', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=100, null=True)),
('state', models.CharField(blank=True, max_length=100, null=True)),
('zipcode', models.PositiveSmallIntegerField(blank=True, null=True)),
('related_order_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.order')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, default='e_commerce/product.png', null=True, upload_to=media.media.image_location_product)),
('name', models.CharField(max_length=100)),
('price', models.FloatField()),
('is_digital', models.BooleanField(blank=True, default=False, null=True)),
('description', models.TextField(blank=True, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(blank=True, to='store.Tag')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveSmallIntegerField(blank=True, default=0, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
('related_order_number', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.order')),
],
),
]
| en | 0.871067 | # Generated by Django 3.2 on 2021-05-02 23:47 | 1.749581 | 2 |
sequana/enrichment.py | ddesvillechabrol/sequana | 0 | 6613791 | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from pathlib import Path
import re
import os
import json
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
from sequana.lazy import numpy as np
from matplotlib_venn import venn2_unweighted, venn3_unweighted
# from sequana.rnadiff import RNADiffResults
from sequana.summary import Summary
import colorlog
logger = colorlog.getLogger(__name__)
try:
import gseapy
except:
pass
__all__ = ["PantherEnrichment", "KeggPathwayEnrichment", "Mart"]
class PantherEnrichment:
"""
# This will read your rnadiff results and tstore the rlevants genes into
# mygenes_u, mygenes_down, mygenes attributes.
By default, we keep only the genes with a adjusted pvalue <= 0.05. The
fold_change threshold is on a log2 scale and set to 0 (meaning no
filtering). Only one value is requested and
used to filter out positive and negative fold change (on the log2
scale). In other word, a log2 fold change threshold of 2 means that we
filter out values between -2 and 2.
If you prefer to work in natural scale, just set the parameter
fc_threshold, which overwrite the log2_fc_threshold parameter.
::
pe = PantherEnrichment("input_file.tsv", taxon=10090, log2_fc_threshold=1)
# compute enrichment for genes down and up
pe.compute_enrichment_down()
pe.compute_enrichment_up()
# Results for up case is stored in pe.enrichment
# then, we plot the most mportat go terms
df_up = pe.plot_go_terms("up")
df = pe.plot_go_terms("up", pe.MF)
pe.save_chart(df, "chart_MF_up.png")
# all 3 main ontology
df = pe.plot_go_terms("up")
pe.save_chart(df, "chart_up.png")
e.stats contains some statistics. One important is the list of unmapped
genes. The results from the GO enrichment are stored in the attributes
enrichment. There, we have again adjusted p-value and a fold enrichment,
which can in turn be filtered or not.
You can retrieve the cleaned data using the get_data method.
You can also plot the GO terms that are significantly enriched using::
e.plot_go_terms(['GO:0003674', 'GO:0008150', 'GO:0005575'])
This function returns the dataframe used during the plotting.
If you want to look at the up regulated genes only::
e.compute_enrichment(pe.mygenes_up, 83333)
df = e.plot_go_terms(['GO:0003674', 'GO:0008150', 'GO:0005575'],
log=False, include_negative_enrichment=False,
fontsize=8, sort_by='fold_enrichment',
show_pvalues=True, fdr_threshold=0.05)
The number of genes is limited to about 3100 depending (don't ask me
why, this seem to be a hard-coded limitation on PantherDB website).
In such case, you should add a filter e.g on padj or fold change
"""
def __init__(
self,
gene_lists,
taxon,
requests_per_sec=10,
padj_threshold=0.05,
log2_fc_threshold=0,
fc_threshold=None,
enrichment_fdr=0.05,
max_entries=3000,
annot_col="Name",
):
"""
rnadiff if provided, superseeds the input filename. This is useful for
debugging
"""
self.enrichment_fdr = enrichment_fdr
# users can set the fold change threshold in the log2 scale or normal
# scale.
assert log2_fc_threshold >= 0, "log2 fc_threshold must be >=0"
if fc_threshold is not None:
log2_fc_threshold = pylab.log2(fc_threshold)
from bioservices import panther, quickgo
self.panther = panther.Panther(cache=True)
self.valid_taxons = [
x["taxon_id"] for x in self.panther.get_supported_genomes()
]
self.summary = {}
self._taxon = None
self.taxon = taxon
self.quickgo = quickgo.QuickGO(cache=True)
self.quickgo.requests_per_sec = requests_per_sec
self._ancestors = {"MF": "GO:0003674", "CC": "GO:0005575", "BP": "GO:0008150"}
# self.aspects = {"MF": "molecular_function"}
self.ontologies = [
"GO:0003674",
"GO:0008150",
"GO:0005575",
"ANNOT_TYPE_ID_PANTHER_GO_SLIM_MF",
"ANNOT_TYPE_ID_PANTHER_GO_SLIM_BP",
"ANNOT_TYPE_ID_PANTHER_GO_SLIM_CC",
"ANNOT_TYPE_ID_PANTHER_PC",
"ANNOT_TYPE_ID_PANTHER_PATHWAY",
"ANNOT_TYPE_ID_REACTOME_PATHWAY",
]
self.MF = "GO:0003674"
self.CC = "GO:0005575"
self.BP = "GO:0008150"
self.ontology_aliases = [
"MF",
"BP",
"CC",
"SLIM_MF",
"SLIM_BP",
"SLIM_CC",
"PROTEIN",
"PANTHER_PATHWAY",
"REACTOME_PATHWAY",
]
# panth accepts onyl ~2-3000 genes at max. Let us restrict the analysis
# to the first 2000 genes based on their log2 fold change 2000 + and
# 2000 negatives
self.mygenes = gene_lists["all"]
self.mygenes_down = gene_lists["down"]
self.mygenes_up = gene_lists["up"]
msg = "Ignoring pvalue adjusted > {} and fold change in [{}, {}]".format(
padj_threshold, 1 / (2 ** log2_fc_threshold), 2 ** log2_fc_threshold
)
logger.info(msg)
# used in report module
self.summary["fold_change_range"] = [
1 / (2 ** log2_fc_threshold),
2 ** log2_fc_threshold,
]
self.summary["padj_threshold"] = padj_threshold
fc_threshold = log2_fc_threshold
logger.info(
f"Starting with {len(self.mygenes)} genes ({len(self.mygenes_down)} down; {len(self.mygenes_up)} up)"
)
Ndown = len(self.mygenes_down)
Nup = len(self.mygenes_up)
self.summary["DGE_after_filtering"] = {"up": Nup, "down": Ndown}
logger.info(
"Filtering and keeping {} genes ({} down; {} up)".format(
Ndown + Nup, Ndown, Nup
)
)
self.enrichment = {}
self.stats = {}
self.obsolets = []
def _set_taxon(self, taxon):
if taxon not in self.valid_taxons:
raise ValueError(
"taxon {} ".format(taxon)
+ " not in pantherDB. please check the 'valid_taxons' attribute"
)
self.taxon_info = [
x for x in self.panther.get_supported_genomes() if x["taxon_id"] == taxon
]
self.taxon_info = self.taxon_info[0]
self._taxon_id = taxon
def _get_taxon(self):
return self._taxon_id
taxon = property(_get_taxon, _set_taxon)
def compute_enrichment(
self,
taxid=None,
ontologies=None,
enrichment_test="FISHER",
correction="FDR",
progress=True,
):
"""
:param enrichment_test: Fisher or Binomial
:param correction: FDR or Bonferonni
The field **number_in_reference** indicates from the reference, the number
of genes that have a given ontolgy term. For instance, 998 genes have
the term. This is stored in **number_in_reference**. If the reference
contains 4391 genes, and you provided 49
genes , the **expected** number of genes that have this ontology term is
49*998/4391 that is 11.1369, which is stored in **"expected**.
Now, if you actually find that 14 out of 49
genes have the term, you need to compare the numbers 11.1369 and 14. Are
they really different ? The ratio 14 / 11.1369 is stored in
**fold_enrichment**. The pvalue and FDR are stored as well.
Some genes may be missing If you provide 50 genes, you may end up with
only 45 being mapped onto panther db database. This may explain some
differences with the expected value.
Fold enrichment is the number_in_list / expected ratio. Another close metric is the
fractional difference: (observed - expected) / expected. This metric is
slighlty less than the fold enrichment
To get the number f genes from the database, simply take one output, and
compute number_in_reference * number_in_list / expected
The fold enrichment is also called odd-ratio.
"""
for gene_list, category in zip(
(self.mygenes_down, self.mygenes_up, self.mygenes), ("down", "up", "all")
):
self.enrichment[category], self.stats[category] = self._compute_enrichment(
gene_list,
taxid=taxid,
ontologies=ontologies,
enrichment_test=enrichment_test,
correction=correction,
progress=progress,
)
def _compute_enrichment(
self,
mygenes,
taxid,
ontologies=None,
enrichment_test="FISHER",
correction="FDR",
progress=True,
):
# taxid=83333 # ecoli
if taxid is None:
taxid = self.taxon
if isinstance(mygenes, list):
mygenes = ",".join(mygenes)
if mygenes.count(",") > 2000:
logger.warning(
"Please reduce the list input genes. may fail on pantherb otherwise"
)
if len(mygenes) <= 2:
logger.error(
f"Less than 2 genes are found for in the gene set: {mygenes}. No enrichment will be computed"
)
return None, None
if ontologies is None:
ontologies = self.ontologies
else:
for x in ontologies:
assert x in self.ontologies
# for each ontology categorym we will store one key/value item
enrichment = {}
for ontology in ontologies:
logger.info("Computing enrichment for {}".format(ontology))
results = self.panther.get_enrichment(
mygenes,
taxid,
ontology,
enrichment_test=enrichment_test,
correction=correction,
)
count = 0
while count < 2 and results == 404:
logger.warning("Panther request failed Trying again")
results = self.panther.get_enrichment(
mygenes,
taxid,
ontology,
enrichment_test=enrichment_test,
correction=correction,
)
count += 1
if results == 404:
logger.warning(
"Invalid output from pantherdb (too many genes ?). skipping {}".format(
ontology
)
)
enrichment[ontology] = None
continue
if isinstance(results["result"], dict): # pragma: no cover
results["result"] = [results["result"]]
pvalues = [x["pValue"] for x in results["result"]]
import statsmodels
import statsmodels.stats.multitest
if correction == "FDR":
fdr = statsmodels.stats.multitest.multipletests(
pvalues, method="fdr_bh"
)[1]
elif correction.lower() == "bonferroni":
fdr = statsmodels.stats.multitest.multipletests(
pvalues, method="bonferroni"
)[1]
for i, pvalue in enumerate(pvalues):
results["result"][i]["fdr2"] = fdr[i]
if enrichment_test.lower() == "binomial":
results["result"][i]["fdr"] = fdr[i]
enrichment[ontology] = results
stats = dict([(k, len(v["result"])) for k, v in enrichment.items()])
stats["input_genes"] = len(mygenes.split(","))
try:
unmapped = enrichment[ontologies[0]]["input_list"]["unmapped_id"]
stats["unmapped_genes"] = unmapped
stats["N_unmapped_genes"] = len(unmapped)
except:
stats["unmapped_genes"] = []
stats["N_unmapped_genes"] = 0
# Here, looking at the FDr, it appears that when using bonferroni,
# all FDR are set to zeros. Moreover, when using Fisher tests and
# FDR (supposibly a FDR_BH, the results are noisy as compare to a
# test from statsmodels. Moreover, when using binomial test, the FDR
# is not computed... So, we will recompute the FDR ourself
return enrichment, stats
def get_functional_classification(
self, mygenes, taxon
): # pragma: no cover ; too slow
"""Mapping information from pantherDB for the lisf of genes
We also store uniprot persistent id
"""
logger.warning("Very slow. Please wait")
if isinstance(mygenes, list):
mygenes = ",".join(mygenes)
res = self.panther.get_mapping(mygenes, taxon)
res = res["mapped"]
N = len(res)
from easydev import Progress
pb = Progress(N)
for i, item in enumerate(res):
accession = item["accession"]
res[i]["persistent_id"] = self._get_name_given_accession(accession)
pb.animate(i + 1)
return res
def _get_name_given_accession(self, accession): # pragma: no cover
from bioservices import UniProt
self.uniprot = UniProt(cache=True)
self.uniprot.requests_per_sec = 10
acc = [x for x in accession.split("|") if x.startswith("UniProtKB")]
acc = acc[0].split("=")[1]
res = self.uniprot.get_df(acc, limit=1)
name = res["Gene names (primary )"][0]
return name
def plot_piechart(self, df):
# Here we show the GO terms that have number in list > 0
# Note, that this is dangerous to look only at this picture without
# the reference plot, which data is not available thourg the pathner API
labels = []
for this in df.query("number_in_list!=0").label.values:
if len(this) > 50:
labels.append(this)
else:
labels.append(this[0:50] + "...")
pylab.pie(df.query("number_in_list!=0").number_in_list, labels=labels)
pylab.tight_layout()
def get_data(
self, category, ontologies, include_negative_enrichment=True, fdr=0.05
):
"""
From all input GO term that have been found and stored in
enrichment[ONTOLOGY]['result'], we keep those with fdr<0.05. We also
exclude UNCLASSIFIED entries. The final dataframe is returned
::
pe.get_data("GO:0003674")
"""
if isinstance(ontologies, str):
ontologies = [ontologies]
else:
assert isinstance(ontologies, list)
if category not in self.enrichment:
logger.warning("You must call compute_enrichment_{}".format(category))
return
# First, we select the required ontologies and build a common data set
all_data = []
for ontology in ontologies:
data = self.enrichment[category][ontology]["result"]
if isinstance(data, dict):
# there was only one hit, we expect:
data = [data]
all_data.extend(data)
data = all_data
# remove unclassified GO terms
unclassified = [x for x in data if x["term"]["label"] == "UNCLASSIFIED"]
logger.info("Found {} unclassified".format(len(unclassified)))
data = [x for x in data if x["term"]["label"] != "UNCLASSIFIED"]
df = pd.DataFrame(data)
if len(df) == 0:
return df
else:
logger.info("Found {} GO terms".format(len(df)))
df = df.query("number_in_list!=0").copy()
logger.info(
"Found {} GO terms with at least 1 gene in reference".format(len(df))
)
# extract the ID and label
df["id"] = [x["id"] for x in df["term"]]
df["label"] = [x["label"] for x in df["term"]]
# some extra information for convenience
df["pct_diff_expr"] = df["number_in_list"] * 100 / df["number_in_reference"]
df["log2_fold_enrichment"] = pylab.log2(df["fold_enrichment"])
df["abs_log2_fold_enrichment"] = abs(pylab.log2(df["fold_enrichment"]))
df["expected"] = [int(x) for x in df.expected]
# Some user may want to include GO terms with fold enrichment
# significanyly below 1 or not.
if include_negative_enrichment is False:
df = df.query("fold_enrichment>=1").copy()
logger.info(
"Found {} GO terms after keeping only positive enrichment".format(
len(df)
)
)
# filter out FDR>0.05
df = df.query("fdr<=@fdr").copy()
logger.info("Found {} GO terms after keeping only FDR<{}".format(len(df), fdr))
return df
def plot_go_terms(
self,
category,
ontologies=None,
max_features=50,
log=False,
fontsize=9,
minimum_genes=0,
pvalue=0.05,
cmap="summer_r",
sort_by="fold_enrichment",
show_pvalues=False,
include_negative_enrichment=False,
fdr_threshold=0.05,
compute_levels=True,
progress=True,
):
if ontologies is None:
ontologies = ["GO:0003674", "GO:0008150", "GO:0005575"]
assert sort_by in ["pValue", "fold_enrichment", "fdr"]
df = self.get_data(
category,
ontologies,
include_negative_enrichment=include_negative_enrichment,
fdr=fdr_threshold,
)
if df is None or len(df) == 0:
return df
# df stores the entire data set
# subdf will store the subset (max of n_features, and add dummy values)
df = df.query("pValue<=@pvalue")
logger.info("Filtering out pvalue>{}. Kept {} GO terms".format(pvalue, len(df)))
df = df.reset_index(drop=True)
# Select a subset of the data to keep the best max_features in terms of
# pValue
subdf = df.query("number_in_list>@minimum_genes").copy()
logger.info(
"Filtering out GO terms with less than {} genes: Kept {} GO terms".format(
minimum_genes, len(subdf)
)
)
logger.info("Filtering out the 3 parent terms")
subdf = subdf.query("id not in @self.ontologies")
if subdf is None or len(subdf) == 0:
return subdf
# Keeping only a part of the data, sorting by pValue
if sort_by == "pValue":
subdf = subdf.sort_values(by="pValue", ascending=False).iloc[-max_features:]
df = df.sort_values(by="pValue", ascending=False)
elif sort_by == "fold_enrichment":
subdf = subdf.sort_values(
by="abs_log2_fold_enrichment", ascending=True
).iloc[-max_features:]
df = df.sort_values(by="abs_log2_fold_enrichment", ascending=False)
elif sort_by == "fdr":
subdf = subdf.sort_values(by="fdr", ascending=False).iloc[-max_features:]
df = df.sort_values(by="fdr", ascending=False)
subdf = subdf.reset_index(drop=True)
# We get all levels for each go id.
# They are stored by MF, CC or BP
if compute_levels:
paths = self._get_graph(list(subdf["id"].values), progress=progress)
levels = []
keys = list(paths.keys())
goid_levels = paths[keys[0]]
if len(keys) > 1:
for k in keys[1:]:
goid_levels.update(paths[k])
levels = [goid_levels[ID] for ID in subdf["id"].values]
subdf["level"] = levels
else:
subdf["level"] = ""
# now, for the subdf, which is used to plot the results, we add dummy
# rows to make the yticks range scale nicer.
M = 10
datum = subdf.iloc[-1].copy()
datum.fdr = 0
datum.number_in_list = 0
datum.fold_enrichment = 1
datum.label = ""
datum["id"] = ""
datum["level"] = ""
while len(subdf) < 10:
subdf = pd.concat([datum.to_frame().T, subdf], axis=0)
self.temp = subdf
N = len(subdf)
size_factor = 10000 / len(subdf)
max_size = subdf.number_in_list.max()
# ignore the dummy values
min_size = min([x for x in subdf.number_in_list.values if x != 0])
# here we define a size for each entry.
# For the dummy entries, size is null (int(bool(x))) makes sure
# it is not shown
sizes = [
max(max_size * 0.2, x) * int(bool(x))
for x in size_factor
* subdf.number_in_list.values
/ subdf.number_in_list.max()
]
m1 = min([x for x in sizes if x != 0])
m3 = max(sizes)
m2 = m1 + (m3 - m1) / 2
# The plot itself. we stretch wheen there is lots of features
if len(subdf) > 25:
fig = pylab.figure(num=1)
fig.set_figwidth(10)
fig.set_figheight(8)
else:
fig = pylab.figure(num=1)
fig.set_figwidth(10)
fig.set_figheight(6)
pylab.clf()
if log:
pylab.scatter(
[pylab.log2(x) if x else 0 for x in subdf.fold_enrichment],
range(len(subdf)),
c=subdf.fdr,
s=sizes,
cmap=cmap,
alpha=0.8,
ec="k",
vmin=0,
vmax=fdr_threshold,
zorder=10,
)
# pylab.barh(range(N), pylab.log2(subdf.fold_enrichment), color="r",
# label="pvalue>0.05; FDR>0.05")
# pylab.axvline(1, color="gray", ls="--")
# pylab.axvline(-1, color="gray", ls="--")
else:
pylab.scatter(
subdf.fold_enrichment,
range(len(subdf)),
c=subdf.fdr,
cmap=cmap,
s=sizes,
ec="k",
alpha=0.8,
vmin=0,
vmax=fdr_threshold,
zorder=10,
)
# pylab.barh(range(N), subdf.fold_enrichment, color="r",
# label="not significant")
# set color bar height
pylab.grid(zorder=-10)
ax2 = pylab.colorbar(shrink=0.5)
ax2.ax.set_ylabel("FDR")
# define the labels
max_label_length = 45
labels = [
x if len(x) < max_label_length else x[0 : max_label_length - 3] + "..."
for x in list(subdf.label)
]
ticks = []
for level, ID, label in zip(subdf["level"], subdf.id, labels):
if ID:
ticks.append("{} ({}) {}".format(ID, level, "; " + label.title()))
else:
ticks.append("")
# Refine the fontsize of ylabel if not many
if len(subdf) < 10:
pylab.yticks(range(N), ticks, fontsize=fontsize, ha="left")
else:
pylab.yticks(range(N), ticks, fontsize=fontsize, ha="left")
yax = pylab.gca().get_yaxis()
try:
pad = [x.label1.get_window_extent().width for x in yax.majorTicks]
yax.set_tick_params(pad=max(pad))
except:
yax.set_tick_params(pad=60 * fontsize * 0.7)
yax.set_tick_params(pad=60 * fontsize * 0.6)
# deal with the x-axis now. what is the range ?
fc_max = subdf.fold_enrichment.max(skipna=True)
fc_min = subdf.fold_enrichment.min(skipna=True)
# go into log2 space
fc_max = pylab.log2(fc_max)
fc_min = pylab.log2(fc_min)
abs_max = max(fc_max, abs(fc_min), 1)
if log:
fc_max = abs_max * 1.5
else:
fc_max = 2 ** abs_max * 1.2
pylab.axvline(0, color="k", lw=2)
if log:
pylab.xlabel("Fold Enrichment (log2)")
else:
pylab.xlabel("Fold Enrichment")
# dealwith fold change below 0.
if include_negative_enrichment:
pylab.xlim([-fc_max, fc_max])
else:
pylab.xlim([0, fc_max])
pylab.tight_layout()
# The pvalues:
if show_pvalues:
ax = pylab.gca().twiny()
# ax.set_xlim([0, max(-pylab.log10(subdf.pValue))*1.2])
pvalues = [-pylab.log10(pv) if pv > 0 else 0 for pv in subdf.pValue]
ax.set_xlim([0, max(pvalues) * 1.2])
ax.set_xlabel("p-values (log10)", fontsize=12)
ax.plot(pvalues, range(len(subdf)), label="pvalue", lw=2, color="k")
ax.axvline(1.33, lw=1, ls="--", color="grey", label="pvalue=0.05")
pylab.tight_layout()
pylab.legend(loc="lower right")
# now, let us add a legend
s1 = pylab.scatter([], [], s=m1, marker="o", color="#555555", ec="k")
s2 = pylab.scatter([], [], s=m2, marker="o", color="#555555", ec="k")
s3 = pylab.scatter([], [], s=m3, marker="o", color="#555555", ec="k")
if len(subdf) <= 10:
labelspacing = 1.5 * 2
borderpad = 1.5
handletextpad = 2
elif len(subdf) < 20:
labelspacing = 1.5 * 2
borderpad = 1
handletextpad = 2
else:
labelspacing = 1.5
borderpad = 2
handletextpad = 2
# get back the dataframe without the dummies
subdf = subdf.query("number_in_list>0")
if len(subdf) >= 3:
leg = pylab.legend(
(s1, s2, s3),
(
str(int(min_size)),
str(int(min_size + (max_size - min_size) / 2)),
str(int(max_size)),
),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
elif len(subdf) >= 2:
leg = pylab.legend(
(s1, s3),
(str(int(min_size)), str(int(max_size))),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
else:
leg = pylab.legend(
(s1,),
(str(int(min_size)),),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
frame = leg.get_frame()
frame.set_facecolor("#b4aeae")
frame.set_edgecolor("black")
frame.set_alpha(1)
self.subdf = subdf
self.df = df
return df
def _get_graph(self, go_ids, ontologies=None, progress=True):
# Here we filter the data to keep only the relevant go terms as shown in
# panther pie chart
import networkx as nx
gg = nx.DiGraph()
# assert ontology in ['MF', 'BP', 'CC']
if ontologies is None:
ontologies = ["MF", "BP", "CC"]
elif isinstance(ontologies, str):
ontologies = [ontologies]
ancestors = [self._ancestors[x] for x in ontologies]
levels = []
real_ids = []
obsolets = []
from easydev import Progress
pb = Progress(len(go_ids))
logger.info("Retrieving info for each significant go terms")
annotations = {}
for i, go_id in enumerate(go_ids):
# Some go terms maybe obsolet or renamed. Looking at other functions
# may not work simply because the ID has changed.
info = self.quickgo.get_go_terms(go_id)
annotations[go_id] = info
if info[0]["id"] != go_id:
_id = info[0]["id"]
logger.warning("changed {} to {}".format(go_id, _id))
annotations[_id] = info
else:
_id = go_id
aspect = info[0]["aspect"]
if info[0]["isObsolete"] is True:
logger.warning("Skipping obsolet go terms: {}".format(go_id))
obsolets.append(go_id)
continue
real_ids.append(_id)
# now figure out the distance to main ancestor
# we can try several times
# if _id != self.ancestors[ontology]:
for ancestor in ancestors:
edges = self.quickgo.get_go_paths(_id, ancestor)
if edges == 400:
logger.warning("Could not retrieve {} to {}".format(_id, ancestor))
continue
if edges["numberOfHits"] == 0:
continue
if len(edges["results"]) >= 1:
for path in edges["results"]:
for edge in path:
gg.add_edge(edge["child"], edge["parent"])
else:
print(_id, edges["results"])
if progress is True:
pb.animate(i + 1)
self.obsolets += obsolets
self.annotations = annotations
self.graph = gg
all_paths = {}
for ancestor in ancestors:
if ancestor not in gg:
continue
paths = nx.shortest_path_length(gg, target=ancestor)
for obsolet in obsolets:
paths[obsolet] = 100
all_paths[ancestor] = paths
return all_paths
def save_chart(self, data, filename="chart.png"):
"""
pe = PantherEnrichment("B4052-V1.T1vsT0.complete.xls", fc_threshold=5,
padj_threshold=0.05)
df = pe.plot_go_terms("down", log=True, compute_levels=False)
pe.save_chart(df, "chart.png")
"""
# if dataframe, get 'id' column, otherwise expect a list or string of go
# terms separated by commas
if isinstance(data, list):
goids = ",".join(data)
elif isinstance(data, str):
goids = data
elif "id" in data:
goids = ",".join(list(data["id"].values))
try:
goids = [x for x in goids.split(",") if x not in self.obsolets]
except:
logger.error("Could not save chart")
goids = ",".join(goids)
# remove obsolets
try:
res = self.quickgo.get_go_chart(goids)
if res is None:
raise Exception
with open(filename, "wb") as fout:
fout.write(res.content)
except:
import shutil
logger.warning(
"Could not create the GO chart. Maybe too many go IDs ({})".format(
len(goids.split(","))
)
)
from sequana import sequana_data
no_data = sequana_data("no_data.png")
shutil.copy(no_data, filename)
class GSEA:
def __init__(self, species):
pass
def enrichment(self, gene_list, verbose=False, background=None):
enr = gseapy.enrichr(
gene_list=gene_list,
gene_sets=self.gene_sets,
verbose=verbose,
background=background,
outdir="test",
no_plot=True,
)
return enr
class KeggPathwayEnrichment:
"""Kegg Pathways enrichment from DGE results
DGE = Differentially Gene Expression
Current input is the output of the RNADiff analysis. This is a file
than can be read by RNADiffResults
When performing a GDE analysis, feature counts are computed using an input
GFF. Depending on your parameters the gene names may be saved as ensembl
identifiers or gene names. If you have gene names understood by Kegg, you
simply need to use this code::
ke = KeggPathwayEnrichment("rnadiff", "eco") #"eco" for E. coli here
this calls ke.compute_enrichment() that stores the up, down and all results
in the attribute :attr:`enrichment` as a dictionary.
You can now plot the results::
ke.barplot('down')
and save enriched pathways as follows::
up = ke.save_significant_pathways("up")
down = ke.save_significant_pathways("down")
up.to_csv("kegg_pathway_up_regulated.csv")
down.to_csv("kegg_pathway_down_regulated.csv")
This class works like a charm for ecoli with GFF that uses gene names.
For mus musculus, organism is **mmu** (not **mus*). you will need to have
a mapping of the Ensembl ID into Kegg IDs (actually gene name).
You can perform the conversion using BioServices/BioMart. We have
implemented a simple function inside Sequana::
from sequana.enrichment import Mart
conv = Mart("mmusculus_gene_ensembl")
df = conf.query()
conf.save(df)
You can then import the dataframe, as follows using the mapper argument::
import pandas as pd
df = pd.read_csv("biomart.csv")
df = df.rename({"external_gene_name":"name", "ensembl_gene_id": "ensembl"},
axis=1)
df = df.set_index("ensembl", inplace=True)
KeggPathwayEnrichment("path_to_rnadiff", "mmu", mapper=df)
More generally, when starting KeggPathwayEnrichment, we read all pathways.
This may change with time. So, you can save the pathways::
ke.export_pathways_to_json()
And read them back::
ke = KeggPathwayEnrichment("path_to_rnadiff", "mmu", mapper=df,
preload_directory="kegg_pathways/mmu")
df = ke.scatterplot('down')
tight_layout()
savefig("B4052_T1vsT0_KE_scatterplot_down.png")
df = ke.scatterplot('up')
savefig("B4052_T1vsT0_KE_scatterplot_up.png")
"""
def __init__(
self,
gene_lists,
organism,
alpha=0.05,
log2_fc=0,
progress=True,
mapper=None,
background=None,
preload_directory=None,
convert_input_gene_to_upper_case=False,
):
"""
In some cases, the input identifiers are converted into names thanks to
the input mapper (csv file). Yet, if the external name are from one
species and you use another species in kegg, the kegg names may be upper case
while your species' name are in lower case. In such situations, you may
set input identifiers are upper case setting the
convert_input_gene_to_upper_case parameter to True
"""
self.convert_input_gene_to_upper_case = convert_input_gene_to_upper_case
from bioservices import KEGG
self.kegg = KEGG(cache=True)
self.kegg.organism = organism
self.summary = Summary("KeggPathwayEnrichment")
self.summary.add_params(
{
"organism": organism,
"alpha": alpha,
"log2_fc": log2_fc,
"mapper": (True if mapper is not None else False),
"background": background,
}
)
self.gene_lists = gene_lists
if background:
self.background = background
else:
self.background = len(self.kegg.list(self.kegg.organism).split("\n"))
logger.info("Set number of genes to {}".format(self.background))
self._load_pathways(progress=progress, preload_directory=preload_directory)
if isinstance(mapper, str):
import pandas as pd
df = pd.read_csv(mapper)
df = df.rename(
{"external_gene_name": "name", "ensembl_gene_id": "ensembl"}, axis=1
)
df.set_index("ensembl", inplace=True)
self.mapper = df
else: # the dataframe should already contain the correct columns and index
self.mapper = mapper
try:
self.compute_enrichment()
except Exception as err:
print(err)
logger.critical("An error occured while computing enrichments. ")
def _load_pathways(self, progress=True, preload_directory=None):
# This is just loading all pathways once for all
self.pathways = {}
if preload_directory:
# preload is a directory with all pathways in it
import glob
pathways = glob.glob(preload_directory + "/*json")
for i, name in enumerate(pathways):
key = name.strip(".json").split("/")[-1]
with open(name, "r") as fin:
data = json.load(fin)
self.pathways[key] = data
else:
logger.info("loading all pathways from KEGG. may take time the first time")
from easydev import Progress
pb = Progress(len(self.kegg.pathwayIds))
for i, ID in enumerate(self.kegg.pathwayIds):
self.pathways[ID.replace("path:", "")] = self.kegg.parse(
self.kegg.get(ID)
)
if progress:
pb.animate(i + 1)
# Some cleanup. Note that if we read the json file, this is different
# since already cleanup but this code does no harm
for ID in self.pathways.keys():
name = self.pathways[ID]["NAME"]
if isinstance(name, list):
name = name[0]
self.pathways[ID]["NAME"] = name.split(" - ", 1)[0]
# save gene sets
self.gene_sets = {}
for ID in self.pathways.keys():
res = self.pathways[ID]
if "GENE" in res.keys():
results = []
# some pathways reports genes as a dictionary id:'gene name; description' ('.eg. eco')
# others reports genes as a dictionary id:'description'
for geneID, description in res["GENE"].items():
if ";" in description:
name = description.split(";")[0]
else:
name = geneID
results.append(name)
self.gene_sets[ID] = results
else:
logger.debug("SKIPPED (no genes) {}: {}".format(ID, res["NAME"]))
# save all pathways info
self.df_pathways = pd.DataFrame(self.pathways).T
del self.df_pathways["ENTRY"]
del self.df_pathways["REFERENCE"]
go = [
x["GO"] if isinstance(x, dict) and "GO" in x.keys() else None
for x in self.df_pathways.DBLINKS
]
self.df_pathways["GO"] = go
del self.df_pathways["DBLINKS"]
def plot_genesets_hist(self, bins=20):
N = len(self.gene_sets.keys())
pylab.clf()
pylab.hist([len(v) for k, v in self.gene_sets.items()], bins=bins, lw=1, ec="k")
pylab.title("{} gene sets".format(N))
pylab.xlabel("Gene set sizes")
pylab.grid(True)
a, b = pylab.xlim()
pylab.xlim([0, b])
def compute_enrichment(self, background=None):
if background is None:
background = self.background
self.summary.data["missing_genes"] = {}
self.summary.data["input_gene_list"] = {}
self.enrichment = {}
self.enrichment["up"] = self._enrichr("up", background=background)
self.enrichment["down"] = self._enrichr("down", background=background)
self.enrichment["all"] = self._enrichr("all", background=background)
if (
len(self.enrichment["up"].results) == 0
and len(self.enrichment["up"].results) == 0
):
logger.warning(
"Enrichment results are empty. Could be real because number of"
" deregulated genes is low or an incompatible set of gene IDs."
" Please use BioMart to convert your IDs into external gene names "
)
def _enrichr(self, category, background=None, verbose=True):
if background is None:
background = self.background
if isinstance(category, list):
gene_list = category
else:
assert category in ["up", "down", "all"]
gene_list = self.gene_lists[category]
logger.info("Input gene list of {} ids".format(len(gene_list)))
self.summary.data["input_gene_list"][category] = len(gene_list)
if self.mapper is not None:
missing = [x for x in gene_list if x not in self.mapper.index]
logger.info("Missing genes from mapper dataframe: {}".format(len(missing)))
self.summary.data["missing_genes"][category] = ",".join(missing)
gene_list = [x for x in gene_list if x in self.mapper.index]
identifiers = self.mapper.loc[gene_list]["name"].drop_duplicates().values
if self.convert_input_gene_to_upper_case:
identifiers = [x.upper() for x in identifiers if isinstance(x, str)]
logger.info("Mapped gene list of {} ids".format(len(identifiers)))
gene_list = list(identifiers)
enr = gseapy.enrichr(
gene_list=gene_list,
gene_sets=self.gene_sets,
verbose=verbose,
background=background,
outdir="test",
no_plot=True,
)
return enr
def _get_final_df(self, df, cutoff=0.05, nmax=10):
# takes the df and populate the name and size of the found pathways
# we also sort by adjusted p-value
# we keep adj p-value <=0.05
if len(df) == 0:
return df
df = df.copy()
df["name"] = [self.pathways[x]["NAME"] for x in df.Term]
df["size"] = [len(x.split(";")) for x in df.Genes]
df = df.sort_values("Adjusted P-value")
df.reset_index(drop=True, inplace=True)
df = df[df["Adjusted P-value"] <= cutoff]
if len(df) < nmax:
nmax = len(df)
df = df.iloc[0:nmax]
df = df.sort_values("Adjusted P-value", ascending=False)
df = df.rename({"Term": "pathway_id"}, axis=1)
df = df[df.columns]
return df
def barplot(self, category, cutoff=0.05, nmax=10):
assert category in ["up", "down", "all"]
df = self._get_final_df(
self.enrichment[category].results, cutoff=cutoff, nmax=nmax
)
if len(df) == 0:
return df
pylab.clf()
pylab.barh(range(len(df)), -pylab.log10(df["Adjusted P-value"]))
pylab.yticks(range(len(df)), df.name)
pylab.axvline(1.3, lw=2, ls="--", color="r")
pylab.grid(True)
pylab.xlabel("Adjusted p-value (log10)")
pylab.ylabel("Gene sets")
a, b = pylab.xlim()
pylab.xlim([0, b])
pylab.tight_layout()
return df
def scatterplot(self, category, cutoff=0.05, nmax=10, gene_set_size=[]):
assert category in ["up", "down", "all"]
df = self._get_final_df(
self.enrichment[category].results, cutoff=cutoff, nmax=nmax
)
if len(df) == 0:
return df
pylab.clf()
pylab.scatter(
-pylab.log10(df["Adjusted P-value"]),
range(len(df)),
s=10 * df["size"],
c=df["Adjusted P-value"],
)
pylab.xlabel("Odd ratio")
pylab.ylabel("Gene sets")
pylab.yticks(range(len(df)), df.name)
a, b = pylab.xlim()
pylab.xlim([0, b])
pylab.grid(True)
ax = pylab.gca()
M = max(df["size"])
if M > 100:
l1, l2, l3 = "10", "100", str(M)
else:
l1, l2, l3 = str(round(M / 3)), str(round(M * 2 / 3)), str(M)
handles = [
pylab.Line2D([0], [0], marker="o", markersize=5, label=l1, ls=""),
pylab.Line2D([0], [0], marker="o", markersize=10, label=l2, ls=""),
pylab.Line2D([0], [0], marker="o", markersize=15, label=l3, ls=""),
]
ax.legend(handles=handles, loc="upper left", title="gene-set size")
pylab.axvline(1.3, lw=2, ls="--", color="r")
pylab.tight_layout()
ax = pylab.colorbar(pylab.gci())
return df
# FIXME rnadiff object is not imported anymore. This function is not functional
def _get_summary_pathway(self, pathway_ID, df):
genes = self.df_pathways.loc[pathway_ID]["GENE"]
df_down = df.query("padj<=0.05 and log2FoldChange<0").copy()
df_up = df.query("padj<=0.05 and log2FoldChange>=0").copy()
if "Name" not in df_down.columns:
df_down["Name"] = df_down["ID"]
if "Name" not in df_up.columns:
df_up["Name"] = df_up["ID"]
logger.info("{}".format(pathway_ID))
logger.info("Total down-regulated: {}".format(len(df_down)))
logger.info("Total up-regulated: {}".format(len(df_up)))
mapper = {}
for k, v in genes.items():
mapper[v.split(";")[0]] = k
self.genes = genes
self.df_down = df_down
self.df_up = df_up
summary_names = []
summary_keggids = []
summary_types = []
summary_pvalues = []
summary_fcs = []
if self.mapper is not None:
if "Name" not in df_down.columns:
df_down["Name"] = df_down["ID"]
Names = []
for index in df_down.index:
Names.append(self.mapper.loc[index]["name"][0])
df_down["Name"] = Names
if "Name" not in df_up.columns:
df_up["Name"] = df_up["ID"]
Names = []
for index in df_up.index:
Names.append(self.mapper.loc[index]["name"][0])
df_up["Name"] = Names
#
identifiers = []
new_mapper = {}
for name, kegg_id in mapper.items():
try:
identifier = (
self.mapper.query("name == @name")["name"]
.drop_duplicates()
.index[0]
)
identifiers.append(identifier)
new_mapper[identifier] = kegg_id
except:
logger.warning(
"Skipped {}(kegg ID {}). could not find mapping".format(
name, kegg_id
)
)
mapper = new_mapper
for name, kegg_id in mapper.items():
summary_names.append(name)
summary_keggids.append(kegg_id)
if name.lower() in [x.lower() for x in df_down.Name]:
padj = -pylab.log10(df_down.query("Name==@name").padj.values[0])
fc = df_down.query("Name==@name").log2FoldChange.values[0]
summary_fcs.append(fc)
summary_pvalues.append(padj)
summary_types.append("-")
elif name.lower() in [x.lower() for x in df_up.Name]:
padj = -pylab.log10(df_up.query("Name==@name").padj.values[0])
summary_pvalues.append(padj)
fc = df_up.query("Name==@name").log2FoldChange.values[0]
summary_fcs.append(fc)
summary_types.append("+")
else:
summary_pvalues.append(None)
summary_fcs.append(None)
summary_types.append("=")
summary = pd.DataFrame(
{
"type": summary_types,
"name": summary_names,
"padj": summary_pvalues,
"fc": summary_fcs,
"keggid": summary_keggids,
}
)
summary["description"] = [
self.pathways[pathway_ID]["GENE"][x] for x in summary.keggid
]
return summary
def _get_colors(self, summary):
colors = {}
for index, row in summary.iterrows():
pvalue = row["padj"]
type_ = row["type"]
kegg_id = row["keggid"]
if type_ == "-":
if pvalue > 0 and pvalue < 5:
colors[kegg_id] = "#FF8C00,black"
elif pvalue < 10:
colors[kegg_id] = "#FF0000,black"
else:
colors[kegg_id] = "#B22222%2Cblack"
elif type_ == "+":
if pvalue > 0 and pvalue < 5:
colors[kegg_id] = "#9ACD32,black"
elif pvalue < 10:
colors[kegg_id] = "#008000,black"
else:
colors[kegg_id] = "#006400,#000000"
else:
colors[kegg_id] = "grey,black"
return colors
def save_pathway(self, pathway_ID, df, scale=None, show=False, filename=None):
summary = self._get_summary_pathway(pathway_ID, df)
colors = self._get_colors(summary)
logger.info("pathway {} total genes: {}".format(pathway_ID, len(summary)))
count_up = len(summary.query("type == '+'"))
count_down = len(summary.query("type == '-'"))
logger.info("this pathway down-regulared genes: {}".format(count_down))
logger.info("this pathway up-regulated genes: {}".format(count_up))
url = "https://www.kegg.jp/kegg-bin/show_pathway"
# dcolor = "white" --> does not work with the post requests unlike get
# requests
params = {
"map": pathway_ID,
"multi_query": "\r\n".join(
["{} {}".format(k, v) for k, v in colors.items()]
),
}
self.params = params
import requests
html_page = requests.post(url, data=params)
self.tmp = html_page
html_page = html_page.content.decode()
links_to_png = [
x for x in html_page.split() if "png" in x and x.startswith("src")
]
link_to_png = links_to_png[0].replace("src=", "").replace('"', "")
r = requests.get("https://www.kegg.jp/{}".format(link_to_png))
if filename is None:
filename = "{}.png".format(pathway_ID)
with open(filename, "wb") as fout:
fout.write(r.content)
return summary
def save_all_pathways(self): # pragma: no cover
# This does not do any enrichment. Just save all pathways once for all
# with useful information
for ID in self.pathway.keys():
self.save_pathway(ID)
def save_significant_pathways(
self, category, cutoff=0.05, nmax=20, background=None, tag="", outdir="."
): # pragma: no cover
"""category should be up, down or all"""
if background is None:
background = self.background
# select the relevant pathways
df = self._enrichr(category, background).results
df = self._get_final_df(df, cutoff=cutoff, nmax=nmax)
logger.warning("Found {} pathways to save".format(len(df)))
if len(df) == nmax:
logger.warning("Restricted pathways to {}".format(nmax))
logger.info("saving {} deregulated pathways".format(len(df)))
summaries = {}
for ID in df["pathway_id"]:
summary = self.save_pathway(
ID, filename=(Path(outdir) / f"{ID}_{category}.png")
)
summaries[ID] = summary
return summaries
def find_pathways_by_gene(self, gene_name, match="exact"):
"""Returns pathways that contain the gene name
ke.find_pathways_by_gene("ysgA")
"""
# First let us find the kegg ID
genes = self.kegg.list(self.kegg.organism).strip().split("\n")
keggid = [x.split("\t")[0].strip() for x in genes]
gene_names = [x.split("\t")[1].split(";")[0].strip() for x in genes]
self.keggid = keggid
self.gene_names = gene_names
candidates = []
for x, y in zip(keggid, gene_names):
if match == "exact":
if gene_name == y:
candidates = x.split(":")[1]
break
else:
if gene_name in y:
candidates.append(x)
if match != "exact":
candidates = [x.split(":")[1] for x in candidates]
logger.info("Found {} candidate(s): {}".format(len(candidates), candidates))
else:
logger.info("Found {} in {}".format(gene_name, candidates))
paths = []
for key in self.pathways.keys():
if "GENE" in self.pathways[key]:
if match == "exact":
if candidates in self.pathways[key]["GENE"].keys():
paths.append(key)
else:
for candidate in candidates:
if candicodate in self.pathways[key]["GENE"].keys():
paths.append(key)
return list(set(paths))
def save_project(self, tag, outdir="."):
"""Save tables and visualisations of the complete enrichmment analysis."""
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
from pylab import savefig
for category in ["up", "down", "all"]:
common_out = Path(f"{tag}_kegg_gsea_{category}_degs")
results = self.enrichment[category].results
if not results.empty:
# FIXME: For now fixing a nmax to 10000 to be sure to have all results
# (this could be improved by having self.complete_df and self.filtered_df attributes)
self._get_final_df(results, cutoff=1, nmax=10000).to_csv(
outdir / (common_out.name + ".csv")
)
self._get_final_df(results, cutoff=0.05, nmax=10000).to_csv(
outdir / (common_out.name + "_significant.csv")
)
self.barplot(category)
savefig(outdir / (common_out.name + "_barplot.png"), dpi=200)
self.scatterplot(category)
savefig(outdir / (common_out.name + "_scatterplot.png"), dpi=200)
# TODO: Implement significant pathways export here (got an ID
# error before, so commenting)
# self.save_significant_pathways(
# category, tag=tag, outdir=(outdir / "pathways")
# )
# In case of no enrichment results, create empty files stating so
else:
(outdir / (common_out.name + "_NO_RESULTS")).touch()
# FIXME: I think this table is redundant with previous csv export. Is it correct ?
# > So commenting for now
# df.to_csv(outdir / (common_out.name + ".csv"), index=None)
def export_pathways_to_json(self, outdir="kegg_pathways"):
# This is useful to keep an exact track of the pathways that were used.
# They can be loaded back. If so, we use kegg service only in
# :meth:`find_pathways_by_gene` method and
outdir = outdir + "/" + self.kegg.organism
from easydev import mkdirs
mkdirs(outdir)
import json
for key, data in self.pathways.items():
with open(f"{outdir}/{key}.json", "w") as fout:
json.dump(data, fout)
# not tested. This is tested trough bioservics and takes a long time
class Mart: # pragma: no cover
"""
conv = Mart(dataset="mmusculus_gene_ensembl")
# you could choose hsapiens_gene_ensembl for instance
df = conv.query()
df.set_index("ensembl_gene_id")
conv.save(df)
The file can now be loaded in KeggPathwayEnrichment as a mapper of the
ensemble identifier to external names understood by Kegg.
"""
def __init__(self, dataset, mart="ENSEMBL_MART_ENSEMBL"):
logger.info("Init Mart")
from bioservices import BioMart
self.biomart = BioMart()
self.datasets = self.biomart.get_datasets(mart)
self._dataset = None
try:
self.dataset = dataset
except:
logger.critical("Invalid dataset. checks datasets attributse")
def _set_dataset(self, dataset):
if dataset not in self.datasets["name"].values:
raise ValueError(
"Invalid dataset {}. Check the Choose amongst {}".format(
dataset, self.datasets
)
)
self._dataset = dataset
self.attributes = self.biomart.attributes(dataset=dataset)
self.filters = self.biomart.filters(dataset=dataset)
def _get_dataset(self):
return self._dataset
dataset = property(_get_dataset, _set_dataset)
def query(
self,
attributes=["ensembl_gene_id", "go_id", "entrezgene_id", "external_gene_name"],
):
logger.info("Please wait. This may take a while depending on your connection")
self.biomart.new_query()
self.biomart.add_dataset_to_xml(self.dataset)
for attribute in attributes:
if attribute not in self.attributes:
logger.error(
"{} not found in the dataset {}".format(attribute, self.dataset)
)
raise ValueError
self.biomart.add_attribute_to_xml(attribute)
xml = self.biomart.get_xml()
results = self.biomart.query(xml)
import pandas as pd
import io
df = pd.read_csv(io.StringIO(results), sep="\t")
df.columns = attributes
# df = df.set_index('ensembl_gene_id')
# name should be the name used by kegg
return df
def save(self, df, filename=None):
"""df is the output of :meth:`~query`. This function save it keeping
track of day/month/year and dataset."""
import time
date = time.localtime()
if filename is None:
filename = "biomart_{}__{}_{}_{}.csv".format(
self.dataset, date.tm_year, date.tm_mon, date.tm_mday
)
logger.info("Saving into {}".format(filename))
df.to_csv(filename, index=False)
| # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from pathlib import Path
import re
import os
import json
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
from sequana.lazy import numpy as np
from matplotlib_venn import venn2_unweighted, venn3_unweighted
# from sequana.rnadiff import RNADiffResults
from sequana.summary import Summary
import colorlog
logger = colorlog.getLogger(__name__)
try:
import gseapy
except:
pass
__all__ = ["PantherEnrichment", "KeggPathwayEnrichment", "Mart"]
class PantherEnrichment:
"""
# This will read your rnadiff results and tstore the rlevants genes into
# mygenes_u, mygenes_down, mygenes attributes.
By default, we keep only the genes with a adjusted pvalue <= 0.05. The
fold_change threshold is on a log2 scale and set to 0 (meaning no
filtering). Only one value is requested and
used to filter out positive and negative fold change (on the log2
scale). In other word, a log2 fold change threshold of 2 means that we
filter out values between -2 and 2.
If you prefer to work in natural scale, just set the parameter
fc_threshold, which overwrite the log2_fc_threshold parameter.
::
pe = PantherEnrichment("input_file.tsv", taxon=10090, log2_fc_threshold=1)
# compute enrichment for genes down and up
pe.compute_enrichment_down()
pe.compute_enrichment_up()
# Results for up case is stored in pe.enrichment
# then, we plot the most mportat go terms
df_up = pe.plot_go_terms("up")
df = pe.plot_go_terms("up", pe.MF)
pe.save_chart(df, "chart_MF_up.png")
# all 3 main ontology
df = pe.plot_go_terms("up")
pe.save_chart(df, "chart_up.png")
e.stats contains some statistics. One important is the list of unmapped
genes. The results from the GO enrichment are stored in the attributes
enrichment. There, we have again adjusted p-value and a fold enrichment,
which can in turn be filtered or not.
You can retrieve the cleaned data using the get_data method.
You can also plot the GO terms that are significantly enriched using::
e.plot_go_terms(['GO:0003674', 'GO:0008150', 'GO:0005575'])
This function returns the dataframe used during the plotting.
If you want to look at the up regulated genes only::
e.compute_enrichment(pe.mygenes_up, 83333)
df = e.plot_go_terms(['GO:0003674', 'GO:0008150', 'GO:0005575'],
log=False, include_negative_enrichment=False,
fontsize=8, sort_by='fold_enrichment',
show_pvalues=True, fdr_threshold=0.05)
The number of genes is limited to about 3100 depending (don't ask me
why, this seem to be a hard-coded limitation on PantherDB website).
In such case, you should add a filter e.g on padj or fold change
"""
def __init__(
self,
gene_lists,
taxon,
requests_per_sec=10,
padj_threshold=0.05,
log2_fc_threshold=0,
fc_threshold=None,
enrichment_fdr=0.05,
max_entries=3000,
annot_col="Name",
):
"""
rnadiff if provided, superseeds the input filename. This is useful for
debugging
"""
self.enrichment_fdr = enrichment_fdr
# users can set the fold change threshold in the log2 scale or normal
# scale.
assert log2_fc_threshold >= 0, "log2 fc_threshold must be >=0"
if fc_threshold is not None:
log2_fc_threshold = pylab.log2(fc_threshold)
from bioservices import panther, quickgo
self.panther = panther.Panther(cache=True)
self.valid_taxons = [
x["taxon_id"] for x in self.panther.get_supported_genomes()
]
self.summary = {}
self._taxon = None
self.taxon = taxon
self.quickgo = quickgo.QuickGO(cache=True)
self.quickgo.requests_per_sec = requests_per_sec
self._ancestors = {"MF": "GO:0003674", "CC": "GO:0005575", "BP": "GO:0008150"}
# self.aspects = {"MF": "molecular_function"}
self.ontologies = [
"GO:0003674",
"GO:0008150",
"GO:0005575",
"ANNOT_TYPE_ID_PANTHER_GO_SLIM_MF",
"ANNOT_TYPE_ID_PANTHER_GO_SLIM_BP",
"ANNOT_TYPE_ID_PANTHER_GO_SLIM_CC",
"ANNOT_TYPE_ID_PANTHER_PC",
"ANNOT_TYPE_ID_PANTHER_PATHWAY",
"ANNOT_TYPE_ID_REACTOME_PATHWAY",
]
self.MF = "GO:0003674"
self.CC = "GO:0005575"
self.BP = "GO:0008150"
self.ontology_aliases = [
"MF",
"BP",
"CC",
"SLIM_MF",
"SLIM_BP",
"SLIM_CC",
"PROTEIN",
"PANTHER_PATHWAY",
"REACTOME_PATHWAY",
]
# panth accepts onyl ~2-3000 genes at max. Let us restrict the analysis
# to the first 2000 genes based on their log2 fold change 2000 + and
# 2000 negatives
self.mygenes = gene_lists["all"]
self.mygenes_down = gene_lists["down"]
self.mygenes_up = gene_lists["up"]
msg = "Ignoring pvalue adjusted > {} and fold change in [{}, {}]".format(
padj_threshold, 1 / (2 ** log2_fc_threshold), 2 ** log2_fc_threshold
)
logger.info(msg)
# used in report module
self.summary["fold_change_range"] = [
1 / (2 ** log2_fc_threshold),
2 ** log2_fc_threshold,
]
self.summary["padj_threshold"] = padj_threshold
fc_threshold = log2_fc_threshold
logger.info(
f"Starting with {len(self.mygenes)} genes ({len(self.mygenes_down)} down; {len(self.mygenes_up)} up)"
)
Ndown = len(self.mygenes_down)
Nup = len(self.mygenes_up)
self.summary["DGE_after_filtering"] = {"up": Nup, "down": Ndown}
logger.info(
"Filtering and keeping {} genes ({} down; {} up)".format(
Ndown + Nup, Ndown, Nup
)
)
self.enrichment = {}
self.stats = {}
self.obsolets = []
def _set_taxon(self, taxon):
if taxon not in self.valid_taxons:
raise ValueError(
"taxon {} ".format(taxon)
+ " not in pantherDB. please check the 'valid_taxons' attribute"
)
self.taxon_info = [
x for x in self.panther.get_supported_genomes() if x["taxon_id"] == taxon
]
self.taxon_info = self.taxon_info[0]
self._taxon_id = taxon
def _get_taxon(self):
return self._taxon_id
taxon = property(_get_taxon, _set_taxon)
def compute_enrichment(
self,
taxid=None,
ontologies=None,
enrichment_test="FISHER",
correction="FDR",
progress=True,
):
"""
:param enrichment_test: Fisher or Binomial
:param correction: FDR or Bonferonni
The field **number_in_reference** indicates from the reference, the number
of genes that have a given ontolgy term. For instance, 998 genes have
the term. This is stored in **number_in_reference**. If the reference
contains 4391 genes, and you provided 49
genes , the **expected** number of genes that have this ontology term is
49*998/4391 that is 11.1369, which is stored in **"expected**.
Now, if you actually find that 14 out of 49
genes have the term, you need to compare the numbers 11.1369 and 14. Are
they really different ? The ratio 14 / 11.1369 is stored in
**fold_enrichment**. The pvalue and FDR are stored as well.
Some genes may be missing If you provide 50 genes, you may end up with
only 45 being mapped onto panther db database. This may explain some
differences with the expected value.
Fold enrichment is the number_in_list / expected ratio. Another close metric is the
fractional difference: (observed - expected) / expected. This metric is
slighlty less than the fold enrichment
To get the number f genes from the database, simply take one output, and
compute number_in_reference * number_in_list / expected
The fold enrichment is also called odd-ratio.
"""
for gene_list, category in zip(
(self.mygenes_down, self.mygenes_up, self.mygenes), ("down", "up", "all")
):
self.enrichment[category], self.stats[category] = self._compute_enrichment(
gene_list,
taxid=taxid,
ontologies=ontologies,
enrichment_test=enrichment_test,
correction=correction,
progress=progress,
)
def _compute_enrichment(
self,
mygenes,
taxid,
ontologies=None,
enrichment_test="FISHER",
correction="FDR",
progress=True,
):
# taxid=83333 # ecoli
if taxid is None:
taxid = self.taxon
if isinstance(mygenes, list):
mygenes = ",".join(mygenes)
if mygenes.count(",") > 2000:
logger.warning(
"Please reduce the list input genes. may fail on pantherb otherwise"
)
if len(mygenes) <= 2:
logger.error(
f"Less than 2 genes are found for in the gene set: {mygenes}. No enrichment will be computed"
)
return None, None
if ontologies is None:
ontologies = self.ontologies
else:
for x in ontologies:
assert x in self.ontologies
# for each ontology categorym we will store one key/value item
enrichment = {}
for ontology in ontologies:
logger.info("Computing enrichment for {}".format(ontology))
results = self.panther.get_enrichment(
mygenes,
taxid,
ontology,
enrichment_test=enrichment_test,
correction=correction,
)
count = 0
while count < 2 and results == 404:
logger.warning("Panther request failed Trying again")
results = self.panther.get_enrichment(
mygenes,
taxid,
ontology,
enrichment_test=enrichment_test,
correction=correction,
)
count += 1
if results == 404:
logger.warning(
"Invalid output from pantherdb (too many genes ?). skipping {}".format(
ontology
)
)
enrichment[ontology] = None
continue
if isinstance(results["result"], dict): # pragma: no cover
results["result"] = [results["result"]]
pvalues = [x["pValue"] for x in results["result"]]
import statsmodels
import statsmodels.stats.multitest
if correction == "FDR":
fdr = statsmodels.stats.multitest.multipletests(
pvalues, method="fdr_bh"
)[1]
elif correction.lower() == "bonferroni":
fdr = statsmodels.stats.multitest.multipletests(
pvalues, method="bonferroni"
)[1]
for i, pvalue in enumerate(pvalues):
results["result"][i]["fdr2"] = fdr[i]
if enrichment_test.lower() == "binomial":
results["result"][i]["fdr"] = fdr[i]
enrichment[ontology] = results
stats = dict([(k, len(v["result"])) for k, v in enrichment.items()])
stats["input_genes"] = len(mygenes.split(","))
try:
unmapped = enrichment[ontologies[0]]["input_list"]["unmapped_id"]
stats["unmapped_genes"] = unmapped
stats["N_unmapped_genes"] = len(unmapped)
except:
stats["unmapped_genes"] = []
stats["N_unmapped_genes"] = 0
# Here, looking at the FDr, it appears that when using bonferroni,
# all FDR are set to zeros. Moreover, when using Fisher tests and
# FDR (supposibly a FDR_BH, the results are noisy as compare to a
# test from statsmodels. Moreover, when using binomial test, the FDR
# is not computed... So, we will recompute the FDR ourself
return enrichment, stats
def get_functional_classification(
self, mygenes, taxon
): # pragma: no cover ; too slow
"""Mapping information from pantherDB for the lisf of genes
We also store uniprot persistent id
"""
logger.warning("Very slow. Please wait")
if isinstance(mygenes, list):
mygenes = ",".join(mygenes)
res = self.panther.get_mapping(mygenes, taxon)
res = res["mapped"]
N = len(res)
from easydev import Progress
pb = Progress(N)
for i, item in enumerate(res):
accession = item["accession"]
res[i]["persistent_id"] = self._get_name_given_accession(accession)
pb.animate(i + 1)
return res
def _get_name_given_accession(self, accession): # pragma: no cover
from bioservices import UniProt
self.uniprot = UniProt(cache=True)
self.uniprot.requests_per_sec = 10
acc = [x for x in accession.split("|") if x.startswith("UniProtKB")]
acc = acc[0].split("=")[1]
res = self.uniprot.get_df(acc, limit=1)
name = res["Gene names (primary )"][0]
return name
def plot_piechart(self, df):
# Here we show the GO terms that have number in list > 0
# Note, that this is dangerous to look only at this picture without
# the reference plot, which data is not available thourg the pathner API
labels = []
for this in df.query("number_in_list!=0").label.values:
if len(this) > 50:
labels.append(this)
else:
labels.append(this[0:50] + "...")
pylab.pie(df.query("number_in_list!=0").number_in_list, labels=labels)
pylab.tight_layout()
def get_data(
self, category, ontologies, include_negative_enrichment=True, fdr=0.05
):
"""
From all input GO term that have been found and stored in
enrichment[ONTOLOGY]['result'], we keep those with fdr<0.05. We also
exclude UNCLASSIFIED entries. The final dataframe is returned
::
pe.get_data("GO:0003674")
"""
if isinstance(ontologies, str):
ontologies = [ontologies]
else:
assert isinstance(ontologies, list)
if category not in self.enrichment:
logger.warning("You must call compute_enrichment_{}".format(category))
return
# First, we select the required ontologies and build a common data set
all_data = []
for ontology in ontologies:
data = self.enrichment[category][ontology]["result"]
if isinstance(data, dict):
# there was only one hit, we expect:
data = [data]
all_data.extend(data)
data = all_data
# remove unclassified GO terms
unclassified = [x for x in data if x["term"]["label"] == "UNCLASSIFIED"]
logger.info("Found {} unclassified".format(len(unclassified)))
data = [x for x in data if x["term"]["label"] != "UNCLASSIFIED"]
df = pd.DataFrame(data)
if len(df) == 0:
return df
else:
logger.info("Found {} GO terms".format(len(df)))
df = df.query("number_in_list!=0").copy()
logger.info(
"Found {} GO terms with at least 1 gene in reference".format(len(df))
)
# extract the ID and label
df["id"] = [x["id"] for x in df["term"]]
df["label"] = [x["label"] for x in df["term"]]
# some extra information for convenience
df["pct_diff_expr"] = df["number_in_list"] * 100 / df["number_in_reference"]
df["log2_fold_enrichment"] = pylab.log2(df["fold_enrichment"])
df["abs_log2_fold_enrichment"] = abs(pylab.log2(df["fold_enrichment"]))
df["expected"] = [int(x) for x in df.expected]
# Some user may want to include GO terms with fold enrichment
# significanyly below 1 or not.
if include_negative_enrichment is False:
df = df.query("fold_enrichment>=1").copy()
logger.info(
"Found {} GO terms after keeping only positive enrichment".format(
len(df)
)
)
# filter out FDR>0.05
df = df.query("fdr<=@fdr").copy()
logger.info("Found {} GO terms after keeping only FDR<{}".format(len(df), fdr))
return df
def plot_go_terms(
self,
category,
ontologies=None,
max_features=50,
log=False,
fontsize=9,
minimum_genes=0,
pvalue=0.05,
cmap="summer_r",
sort_by="fold_enrichment",
show_pvalues=False,
include_negative_enrichment=False,
fdr_threshold=0.05,
compute_levels=True,
progress=True,
):
if ontologies is None:
ontologies = ["GO:0003674", "GO:0008150", "GO:0005575"]
assert sort_by in ["pValue", "fold_enrichment", "fdr"]
df = self.get_data(
category,
ontologies,
include_negative_enrichment=include_negative_enrichment,
fdr=fdr_threshold,
)
if df is None or len(df) == 0:
return df
# df stores the entire data set
# subdf will store the subset (max of n_features, and add dummy values)
df = df.query("pValue<=@pvalue")
logger.info("Filtering out pvalue>{}. Kept {} GO terms".format(pvalue, len(df)))
df = df.reset_index(drop=True)
# Select a subset of the data to keep the best max_features in terms of
# pValue
subdf = df.query("number_in_list>@minimum_genes").copy()
logger.info(
"Filtering out GO terms with less than {} genes: Kept {} GO terms".format(
minimum_genes, len(subdf)
)
)
logger.info("Filtering out the 3 parent terms")
subdf = subdf.query("id not in @self.ontologies")
if subdf is None or len(subdf) == 0:
return subdf
# Keeping only a part of the data, sorting by pValue
if sort_by == "pValue":
subdf = subdf.sort_values(by="pValue", ascending=False).iloc[-max_features:]
df = df.sort_values(by="pValue", ascending=False)
elif sort_by == "fold_enrichment":
subdf = subdf.sort_values(
by="abs_log2_fold_enrichment", ascending=True
).iloc[-max_features:]
df = df.sort_values(by="abs_log2_fold_enrichment", ascending=False)
elif sort_by == "fdr":
subdf = subdf.sort_values(by="fdr", ascending=False).iloc[-max_features:]
df = df.sort_values(by="fdr", ascending=False)
subdf = subdf.reset_index(drop=True)
# We get all levels for each go id.
# They are stored by MF, CC or BP
if compute_levels:
paths = self._get_graph(list(subdf["id"].values), progress=progress)
levels = []
keys = list(paths.keys())
goid_levels = paths[keys[0]]
if len(keys) > 1:
for k in keys[1:]:
goid_levels.update(paths[k])
levels = [goid_levels[ID] for ID in subdf["id"].values]
subdf["level"] = levels
else:
subdf["level"] = ""
# now, for the subdf, which is used to plot the results, we add dummy
# rows to make the yticks range scale nicer.
M = 10
datum = subdf.iloc[-1].copy()
datum.fdr = 0
datum.number_in_list = 0
datum.fold_enrichment = 1
datum.label = ""
datum["id"] = ""
datum["level"] = ""
while len(subdf) < 10:
subdf = pd.concat([datum.to_frame().T, subdf], axis=0)
self.temp = subdf
N = len(subdf)
size_factor = 10000 / len(subdf)
max_size = subdf.number_in_list.max()
# ignore the dummy values
min_size = min([x for x in subdf.number_in_list.values if x != 0])
# here we define a size for each entry.
# For the dummy entries, size is null (int(bool(x))) makes sure
# it is not shown
sizes = [
max(max_size * 0.2, x) * int(bool(x))
for x in size_factor
* subdf.number_in_list.values
/ subdf.number_in_list.max()
]
m1 = min([x for x in sizes if x != 0])
m3 = max(sizes)
m2 = m1 + (m3 - m1) / 2
# The plot itself. we stretch wheen there is lots of features
if len(subdf) > 25:
fig = pylab.figure(num=1)
fig.set_figwidth(10)
fig.set_figheight(8)
else:
fig = pylab.figure(num=1)
fig.set_figwidth(10)
fig.set_figheight(6)
pylab.clf()
if log:
pylab.scatter(
[pylab.log2(x) if x else 0 for x in subdf.fold_enrichment],
range(len(subdf)),
c=subdf.fdr,
s=sizes,
cmap=cmap,
alpha=0.8,
ec="k",
vmin=0,
vmax=fdr_threshold,
zorder=10,
)
# pylab.barh(range(N), pylab.log2(subdf.fold_enrichment), color="r",
# label="pvalue>0.05; FDR>0.05")
# pylab.axvline(1, color="gray", ls="--")
# pylab.axvline(-1, color="gray", ls="--")
else:
pylab.scatter(
subdf.fold_enrichment,
range(len(subdf)),
c=subdf.fdr,
cmap=cmap,
s=sizes,
ec="k",
alpha=0.8,
vmin=0,
vmax=fdr_threshold,
zorder=10,
)
# pylab.barh(range(N), subdf.fold_enrichment, color="r",
# label="not significant")
# set color bar height
pylab.grid(zorder=-10)
ax2 = pylab.colorbar(shrink=0.5)
ax2.ax.set_ylabel("FDR")
# define the labels
max_label_length = 45
labels = [
x if len(x) < max_label_length else x[0 : max_label_length - 3] + "..."
for x in list(subdf.label)
]
ticks = []
for level, ID, label in zip(subdf["level"], subdf.id, labels):
if ID:
ticks.append("{} ({}) {}".format(ID, level, "; " + label.title()))
else:
ticks.append("")
# Refine the fontsize of ylabel if not many
if len(subdf) < 10:
pylab.yticks(range(N), ticks, fontsize=fontsize, ha="left")
else:
pylab.yticks(range(N), ticks, fontsize=fontsize, ha="left")
yax = pylab.gca().get_yaxis()
try:
pad = [x.label1.get_window_extent().width for x in yax.majorTicks]
yax.set_tick_params(pad=max(pad))
except:
yax.set_tick_params(pad=60 * fontsize * 0.7)
yax.set_tick_params(pad=60 * fontsize * 0.6)
# deal with the x-axis now. what is the range ?
fc_max = subdf.fold_enrichment.max(skipna=True)
fc_min = subdf.fold_enrichment.min(skipna=True)
# go into log2 space
fc_max = pylab.log2(fc_max)
fc_min = pylab.log2(fc_min)
abs_max = max(fc_max, abs(fc_min), 1)
if log:
fc_max = abs_max * 1.5
else:
fc_max = 2 ** abs_max * 1.2
pylab.axvline(0, color="k", lw=2)
if log:
pylab.xlabel("Fold Enrichment (log2)")
else:
pylab.xlabel("Fold Enrichment")
# dealwith fold change below 0.
if include_negative_enrichment:
pylab.xlim([-fc_max, fc_max])
else:
pylab.xlim([0, fc_max])
pylab.tight_layout()
# The pvalues:
if show_pvalues:
ax = pylab.gca().twiny()
# ax.set_xlim([0, max(-pylab.log10(subdf.pValue))*1.2])
pvalues = [-pylab.log10(pv) if pv > 0 else 0 for pv in subdf.pValue]
ax.set_xlim([0, max(pvalues) * 1.2])
ax.set_xlabel("p-values (log10)", fontsize=12)
ax.plot(pvalues, range(len(subdf)), label="pvalue", lw=2, color="k")
ax.axvline(1.33, lw=1, ls="--", color="grey", label="pvalue=0.05")
pylab.tight_layout()
pylab.legend(loc="lower right")
# now, let us add a legend
s1 = pylab.scatter([], [], s=m1, marker="o", color="#555555", ec="k")
s2 = pylab.scatter([], [], s=m2, marker="o", color="#555555", ec="k")
s3 = pylab.scatter([], [], s=m3, marker="o", color="#555555", ec="k")
if len(subdf) <= 10:
labelspacing = 1.5 * 2
borderpad = 1.5
handletextpad = 2
elif len(subdf) < 20:
labelspacing = 1.5 * 2
borderpad = 1
handletextpad = 2
else:
labelspacing = 1.5
borderpad = 2
handletextpad = 2
# get back the dataframe without the dummies
subdf = subdf.query("number_in_list>0")
if len(subdf) >= 3:
leg = pylab.legend(
(s1, s2, s3),
(
str(int(min_size)),
str(int(min_size + (max_size - min_size) / 2)),
str(int(max_size)),
),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
elif len(subdf) >= 2:
leg = pylab.legend(
(s1, s3),
(str(int(min_size)), str(int(max_size))),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
else:
leg = pylab.legend(
(s1,),
(str(int(min_size)),),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
frame = leg.get_frame()
frame.set_facecolor("#b4aeae")
frame.set_edgecolor("black")
frame.set_alpha(1)
self.subdf = subdf
self.df = df
return df
def _get_graph(self, go_ids, ontologies=None, progress=True):
# Here we filter the data to keep only the relevant go terms as shown in
# panther pie chart
import networkx as nx
gg = nx.DiGraph()
# assert ontology in ['MF', 'BP', 'CC']
if ontologies is None:
ontologies = ["MF", "BP", "CC"]
elif isinstance(ontologies, str):
ontologies = [ontologies]
ancestors = [self._ancestors[x] for x in ontologies]
levels = []
real_ids = []
obsolets = []
from easydev import Progress
pb = Progress(len(go_ids))
logger.info("Retrieving info for each significant go terms")
annotations = {}
for i, go_id in enumerate(go_ids):
# Some go terms maybe obsolet or renamed. Looking at other functions
# may not work simply because the ID has changed.
info = self.quickgo.get_go_terms(go_id)
annotations[go_id] = info
if info[0]["id"] != go_id:
_id = info[0]["id"]
logger.warning("changed {} to {}".format(go_id, _id))
annotations[_id] = info
else:
_id = go_id
aspect = info[0]["aspect"]
if info[0]["isObsolete"] is True:
logger.warning("Skipping obsolet go terms: {}".format(go_id))
obsolets.append(go_id)
continue
real_ids.append(_id)
# now figure out the distance to main ancestor
# we can try several times
# if _id != self.ancestors[ontology]:
for ancestor in ancestors:
edges = self.quickgo.get_go_paths(_id, ancestor)
if edges == 400:
logger.warning("Could not retrieve {} to {}".format(_id, ancestor))
continue
if edges["numberOfHits"] == 0:
continue
if len(edges["results"]) >= 1:
for path in edges["results"]:
for edge in path:
gg.add_edge(edge["child"], edge["parent"])
else:
print(_id, edges["results"])
if progress is True:
pb.animate(i + 1)
self.obsolets += obsolets
self.annotations = annotations
self.graph = gg
all_paths = {}
for ancestor in ancestors:
if ancestor not in gg:
continue
paths = nx.shortest_path_length(gg, target=ancestor)
for obsolet in obsolets:
paths[obsolet] = 100
all_paths[ancestor] = paths
return all_paths
def save_chart(self, data, filename="chart.png"):
"""
pe = PantherEnrichment("B4052-V1.T1vsT0.complete.xls", fc_threshold=5,
padj_threshold=0.05)
df = pe.plot_go_terms("down", log=True, compute_levels=False)
pe.save_chart(df, "chart.png")
"""
# if dataframe, get 'id' column, otherwise expect a list or string of go
# terms separated by commas
if isinstance(data, list):
goids = ",".join(data)
elif isinstance(data, str):
goids = data
elif "id" in data:
goids = ",".join(list(data["id"].values))
try:
goids = [x for x in goids.split(",") if x not in self.obsolets]
except:
logger.error("Could not save chart")
goids = ",".join(goids)
# remove obsolets
try:
res = self.quickgo.get_go_chart(goids)
if res is None:
raise Exception
with open(filename, "wb") as fout:
fout.write(res.content)
except:
import shutil
logger.warning(
"Could not create the GO chart. Maybe too many go IDs ({})".format(
len(goids.split(","))
)
)
from sequana import sequana_data
no_data = sequana_data("no_data.png")
shutil.copy(no_data, filename)
class GSEA:
def __init__(self, species):
pass
def enrichment(self, gene_list, verbose=False, background=None):
enr = gseapy.enrichr(
gene_list=gene_list,
gene_sets=self.gene_sets,
verbose=verbose,
background=background,
outdir="test",
no_plot=True,
)
return enr
class KeggPathwayEnrichment:
"""Kegg Pathways enrichment from DGE results
DGE = Differentially Gene Expression
Current input is the output of the RNADiff analysis. This is a file
than can be read by RNADiffResults
When performing a GDE analysis, feature counts are computed using an input
GFF. Depending on your parameters the gene names may be saved as ensembl
identifiers or gene names. If you have gene names understood by Kegg, you
simply need to use this code::
ke = KeggPathwayEnrichment("rnadiff", "eco") #"eco" for E. coli here
this calls ke.compute_enrichment() that stores the up, down and all results
in the attribute :attr:`enrichment` as a dictionary.
You can now plot the results::
ke.barplot('down')
and save enriched pathways as follows::
up = ke.save_significant_pathways("up")
down = ke.save_significant_pathways("down")
up.to_csv("kegg_pathway_up_regulated.csv")
down.to_csv("kegg_pathway_down_regulated.csv")
This class works like a charm for ecoli with GFF that uses gene names.
For mus musculus, organism is **mmu** (not **mus*). you will need to have
a mapping of the Ensembl ID into Kegg IDs (actually gene name).
You can perform the conversion using BioServices/BioMart. We have
implemented a simple function inside Sequana::
from sequana.enrichment import Mart
conv = Mart("mmusculus_gene_ensembl")
df = conf.query()
conf.save(df)
You can then import the dataframe, as follows using the mapper argument::
import pandas as pd
df = pd.read_csv("biomart.csv")
df = df.rename({"external_gene_name":"name", "ensembl_gene_id": "ensembl"},
axis=1)
df = df.set_index("ensembl", inplace=True)
KeggPathwayEnrichment("path_to_rnadiff", "mmu", mapper=df)
More generally, when starting KeggPathwayEnrichment, we read all pathways.
This may change with time. So, you can save the pathways::
ke.export_pathways_to_json()
And read them back::
ke = KeggPathwayEnrichment("path_to_rnadiff", "mmu", mapper=df,
preload_directory="kegg_pathways/mmu")
df = ke.scatterplot('down')
tight_layout()
savefig("B4052_T1vsT0_KE_scatterplot_down.png")
df = ke.scatterplot('up')
savefig("B4052_T1vsT0_KE_scatterplot_up.png")
"""
def __init__(
self,
gene_lists,
organism,
alpha=0.05,
log2_fc=0,
progress=True,
mapper=None,
background=None,
preload_directory=None,
convert_input_gene_to_upper_case=False,
):
"""
In some cases, the input identifiers are converted into names thanks to
the input mapper (csv file). Yet, if the external name are from one
species and you use another species in kegg, the kegg names may be upper case
while your species' name are in lower case. In such situations, you may
set input identifiers are upper case setting the
convert_input_gene_to_upper_case parameter to True
"""
self.convert_input_gene_to_upper_case = convert_input_gene_to_upper_case
from bioservices import KEGG
self.kegg = KEGG(cache=True)
self.kegg.organism = organism
self.summary = Summary("KeggPathwayEnrichment")
self.summary.add_params(
{
"organism": organism,
"alpha": alpha,
"log2_fc": log2_fc,
"mapper": (True if mapper is not None else False),
"background": background,
}
)
self.gene_lists = gene_lists
if background:
self.background = background
else:
self.background = len(self.kegg.list(self.kegg.organism).split("\n"))
logger.info("Set number of genes to {}".format(self.background))
self._load_pathways(progress=progress, preload_directory=preload_directory)
if isinstance(mapper, str):
import pandas as pd
df = pd.read_csv(mapper)
df = df.rename(
{"external_gene_name": "name", "ensembl_gene_id": "ensembl"}, axis=1
)
df.set_index("ensembl", inplace=True)
self.mapper = df
else: # the dataframe should already contain the correct columns and index
self.mapper = mapper
try:
self.compute_enrichment()
except Exception as err:
print(err)
logger.critical("An error occured while computing enrichments. ")
def _load_pathways(self, progress=True, preload_directory=None):
# This is just loading all pathways once for all
self.pathways = {}
if preload_directory:
# preload is a directory with all pathways in it
import glob
pathways = glob.glob(preload_directory + "/*json")
for i, name in enumerate(pathways):
key = name.strip(".json").split("/")[-1]
with open(name, "r") as fin:
data = json.load(fin)
self.pathways[key] = data
else:
logger.info("loading all pathways from KEGG. may take time the first time")
from easydev import Progress
pb = Progress(len(self.kegg.pathwayIds))
for i, ID in enumerate(self.kegg.pathwayIds):
self.pathways[ID.replace("path:", "")] = self.kegg.parse(
self.kegg.get(ID)
)
if progress:
pb.animate(i + 1)
# Some cleanup. Note that if we read the json file, this is different
# since already cleanup but this code does no harm
for ID in self.pathways.keys():
name = self.pathways[ID]["NAME"]
if isinstance(name, list):
name = name[0]
self.pathways[ID]["NAME"] = name.split(" - ", 1)[0]
# save gene sets
self.gene_sets = {}
for ID in self.pathways.keys():
res = self.pathways[ID]
if "GENE" in res.keys():
results = []
# some pathways reports genes as a dictionary id:'gene name; description' ('.eg. eco')
# others reports genes as a dictionary id:'description'
for geneID, description in res["GENE"].items():
if ";" in description:
name = description.split(";")[0]
else:
name = geneID
results.append(name)
self.gene_sets[ID] = results
else:
logger.debug("SKIPPED (no genes) {}: {}".format(ID, res["NAME"]))
# save all pathways info
self.df_pathways = pd.DataFrame(self.pathways).T
del self.df_pathways["ENTRY"]
del self.df_pathways["REFERENCE"]
go = [
x["GO"] if isinstance(x, dict) and "GO" in x.keys() else None
for x in self.df_pathways.DBLINKS
]
self.df_pathways["GO"] = go
del self.df_pathways["DBLINKS"]
def plot_genesets_hist(self, bins=20):
N = len(self.gene_sets.keys())
pylab.clf()
pylab.hist([len(v) for k, v in self.gene_sets.items()], bins=bins, lw=1, ec="k")
pylab.title("{} gene sets".format(N))
pylab.xlabel("Gene set sizes")
pylab.grid(True)
a, b = pylab.xlim()
pylab.xlim([0, b])
def compute_enrichment(self, background=None):
if background is None:
background = self.background
self.summary.data["missing_genes"] = {}
self.summary.data["input_gene_list"] = {}
self.enrichment = {}
self.enrichment["up"] = self._enrichr("up", background=background)
self.enrichment["down"] = self._enrichr("down", background=background)
self.enrichment["all"] = self._enrichr("all", background=background)
if (
len(self.enrichment["up"].results) == 0
and len(self.enrichment["up"].results) == 0
):
logger.warning(
"Enrichment results are empty. Could be real because number of"
" deregulated genes is low or an incompatible set of gene IDs."
" Please use BioMart to convert your IDs into external gene names "
)
def _enrichr(self, category, background=None, verbose=True):
if background is None:
background = self.background
if isinstance(category, list):
gene_list = category
else:
assert category in ["up", "down", "all"]
gene_list = self.gene_lists[category]
logger.info("Input gene list of {} ids".format(len(gene_list)))
self.summary.data["input_gene_list"][category] = len(gene_list)
if self.mapper is not None:
missing = [x for x in gene_list if x not in self.mapper.index]
logger.info("Missing genes from mapper dataframe: {}".format(len(missing)))
self.summary.data["missing_genes"][category] = ",".join(missing)
gene_list = [x for x in gene_list if x in self.mapper.index]
identifiers = self.mapper.loc[gene_list]["name"].drop_duplicates().values
if self.convert_input_gene_to_upper_case:
identifiers = [x.upper() for x in identifiers if isinstance(x, str)]
logger.info("Mapped gene list of {} ids".format(len(identifiers)))
gene_list = list(identifiers)
enr = gseapy.enrichr(
gene_list=gene_list,
gene_sets=self.gene_sets,
verbose=verbose,
background=background,
outdir="test",
no_plot=True,
)
return enr
def _get_final_df(self, df, cutoff=0.05, nmax=10):
# takes the df and populate the name and size of the found pathways
# we also sort by adjusted p-value
# we keep adj p-value <=0.05
if len(df) == 0:
return df
df = df.copy()
df["name"] = [self.pathways[x]["NAME"] for x in df.Term]
df["size"] = [len(x.split(";")) for x in df.Genes]
df = df.sort_values("Adjusted P-value")
df.reset_index(drop=True, inplace=True)
df = df[df["Adjusted P-value"] <= cutoff]
if len(df) < nmax:
nmax = len(df)
df = df.iloc[0:nmax]
df = df.sort_values("Adjusted P-value", ascending=False)
df = df.rename({"Term": "pathway_id"}, axis=1)
df = df[df.columns]
return df
def barplot(self, category, cutoff=0.05, nmax=10):
assert category in ["up", "down", "all"]
df = self._get_final_df(
self.enrichment[category].results, cutoff=cutoff, nmax=nmax
)
if len(df) == 0:
return df
pylab.clf()
pylab.barh(range(len(df)), -pylab.log10(df["Adjusted P-value"]))
pylab.yticks(range(len(df)), df.name)
pylab.axvline(1.3, lw=2, ls="--", color="r")
pylab.grid(True)
pylab.xlabel("Adjusted p-value (log10)")
pylab.ylabel("Gene sets")
a, b = pylab.xlim()
pylab.xlim([0, b])
pylab.tight_layout()
return df
def scatterplot(self, category, cutoff=0.05, nmax=10, gene_set_size=[]):
assert category in ["up", "down", "all"]
df = self._get_final_df(
self.enrichment[category].results, cutoff=cutoff, nmax=nmax
)
if len(df) == 0:
return df
pylab.clf()
pylab.scatter(
-pylab.log10(df["Adjusted P-value"]),
range(len(df)),
s=10 * df["size"],
c=df["Adjusted P-value"],
)
pylab.xlabel("Odd ratio")
pylab.ylabel("Gene sets")
pylab.yticks(range(len(df)), df.name)
a, b = pylab.xlim()
pylab.xlim([0, b])
pylab.grid(True)
ax = pylab.gca()
M = max(df["size"])
if M > 100:
l1, l2, l3 = "10", "100", str(M)
else:
l1, l2, l3 = str(round(M / 3)), str(round(M * 2 / 3)), str(M)
handles = [
pylab.Line2D([0], [0], marker="o", markersize=5, label=l1, ls=""),
pylab.Line2D([0], [0], marker="o", markersize=10, label=l2, ls=""),
pylab.Line2D([0], [0], marker="o", markersize=15, label=l3, ls=""),
]
ax.legend(handles=handles, loc="upper left", title="gene-set size")
pylab.axvline(1.3, lw=2, ls="--", color="r")
pylab.tight_layout()
ax = pylab.colorbar(pylab.gci())
return df
# FIXME rnadiff object is not imported anymore. This function is not functional
def _get_summary_pathway(self, pathway_ID, df):
genes = self.df_pathways.loc[pathway_ID]["GENE"]
df_down = df.query("padj<=0.05 and log2FoldChange<0").copy()
df_up = df.query("padj<=0.05 and log2FoldChange>=0").copy()
if "Name" not in df_down.columns:
df_down["Name"] = df_down["ID"]
if "Name" not in df_up.columns:
df_up["Name"] = df_up["ID"]
logger.info("{}".format(pathway_ID))
logger.info("Total down-regulated: {}".format(len(df_down)))
logger.info("Total up-regulated: {}".format(len(df_up)))
mapper = {}
for k, v in genes.items():
mapper[v.split(";")[0]] = k
self.genes = genes
self.df_down = df_down
self.df_up = df_up
summary_names = []
summary_keggids = []
summary_types = []
summary_pvalues = []
summary_fcs = []
if self.mapper is not None:
if "Name" not in df_down.columns:
df_down["Name"] = df_down["ID"]
Names = []
for index in df_down.index:
Names.append(self.mapper.loc[index]["name"][0])
df_down["Name"] = Names
if "Name" not in df_up.columns:
df_up["Name"] = df_up["ID"]
Names = []
for index in df_up.index:
Names.append(self.mapper.loc[index]["name"][0])
df_up["Name"] = Names
#
identifiers = []
new_mapper = {}
for name, kegg_id in mapper.items():
try:
identifier = (
self.mapper.query("name == @name")["name"]
.drop_duplicates()
.index[0]
)
identifiers.append(identifier)
new_mapper[identifier] = kegg_id
except:
logger.warning(
"Skipped {}(kegg ID {}). could not find mapping".format(
name, kegg_id
)
)
mapper = new_mapper
for name, kegg_id in mapper.items():
summary_names.append(name)
summary_keggids.append(kegg_id)
if name.lower() in [x.lower() for x in df_down.Name]:
padj = -pylab.log10(df_down.query("Name==@name").padj.values[0])
fc = df_down.query("Name==@name").log2FoldChange.values[0]
summary_fcs.append(fc)
summary_pvalues.append(padj)
summary_types.append("-")
elif name.lower() in [x.lower() for x in df_up.Name]:
padj = -pylab.log10(df_up.query("Name==@name").padj.values[0])
summary_pvalues.append(padj)
fc = df_up.query("Name==@name").log2FoldChange.values[0]
summary_fcs.append(fc)
summary_types.append("+")
else:
summary_pvalues.append(None)
summary_fcs.append(None)
summary_types.append("=")
summary = pd.DataFrame(
{
"type": summary_types,
"name": summary_names,
"padj": summary_pvalues,
"fc": summary_fcs,
"keggid": summary_keggids,
}
)
summary["description"] = [
self.pathways[pathway_ID]["GENE"][x] for x in summary.keggid
]
return summary
def _get_colors(self, summary):
colors = {}
for index, row in summary.iterrows():
pvalue = row["padj"]
type_ = row["type"]
kegg_id = row["keggid"]
if type_ == "-":
if pvalue > 0 and pvalue < 5:
colors[kegg_id] = "#FF8C00,black"
elif pvalue < 10:
colors[kegg_id] = "#FF0000,black"
else:
colors[kegg_id] = "#B22222%2Cblack"
elif type_ == "+":
if pvalue > 0 and pvalue < 5:
colors[kegg_id] = "#9ACD32,black"
elif pvalue < 10:
colors[kegg_id] = "#008000,black"
else:
colors[kegg_id] = "#006400,#000000"
else:
colors[kegg_id] = "grey,black"
return colors
def save_pathway(self, pathway_ID, df, scale=None, show=False, filename=None):
summary = self._get_summary_pathway(pathway_ID, df)
colors = self._get_colors(summary)
logger.info("pathway {} total genes: {}".format(pathway_ID, len(summary)))
count_up = len(summary.query("type == '+'"))
count_down = len(summary.query("type == '-'"))
logger.info("this pathway down-regulared genes: {}".format(count_down))
logger.info("this pathway up-regulated genes: {}".format(count_up))
url = "https://www.kegg.jp/kegg-bin/show_pathway"
# dcolor = "white" --> does not work with the post requests unlike get
# requests
params = {
"map": pathway_ID,
"multi_query": "\r\n".join(
["{} {}".format(k, v) for k, v in colors.items()]
),
}
self.params = params
import requests
html_page = requests.post(url, data=params)
self.tmp = html_page
html_page = html_page.content.decode()
links_to_png = [
x for x in html_page.split() if "png" in x and x.startswith("src")
]
link_to_png = links_to_png[0].replace("src=", "").replace('"', "")
r = requests.get("https://www.kegg.jp/{}".format(link_to_png))
if filename is None:
filename = "{}.png".format(pathway_ID)
with open(filename, "wb") as fout:
fout.write(r.content)
return summary
def save_all_pathways(self): # pragma: no cover
# This does not do any enrichment. Just save all pathways once for all
# with useful information
for ID in self.pathway.keys():
self.save_pathway(ID)
def save_significant_pathways(
self, category, cutoff=0.05, nmax=20, background=None, tag="", outdir="."
): # pragma: no cover
"""category should be up, down or all"""
if background is None:
background = self.background
# select the relevant pathways
df = self._enrichr(category, background).results
df = self._get_final_df(df, cutoff=cutoff, nmax=nmax)
logger.warning("Found {} pathways to save".format(len(df)))
if len(df) == nmax:
logger.warning("Restricted pathways to {}".format(nmax))
logger.info("saving {} deregulated pathways".format(len(df)))
summaries = {}
for ID in df["pathway_id"]:
summary = self.save_pathway(
ID, filename=(Path(outdir) / f"{ID}_{category}.png")
)
summaries[ID] = summary
return summaries
def find_pathways_by_gene(self, gene_name, match="exact"):
"""Returns pathways that contain the gene name
ke.find_pathways_by_gene("ysgA")
"""
# First let us find the kegg ID
genes = self.kegg.list(self.kegg.organism).strip().split("\n")
keggid = [x.split("\t")[0].strip() for x in genes]
gene_names = [x.split("\t")[1].split(";")[0].strip() for x in genes]
self.keggid = keggid
self.gene_names = gene_names
candidates = []
for x, y in zip(keggid, gene_names):
if match == "exact":
if gene_name == y:
candidates = x.split(":")[1]
break
else:
if gene_name in y:
candidates.append(x)
if match != "exact":
candidates = [x.split(":")[1] for x in candidates]
logger.info("Found {} candidate(s): {}".format(len(candidates), candidates))
else:
logger.info("Found {} in {}".format(gene_name, candidates))
paths = []
for key in self.pathways.keys():
if "GENE" in self.pathways[key]:
if match == "exact":
if candidates in self.pathways[key]["GENE"].keys():
paths.append(key)
else:
for candidate in candidates:
if candicodate in self.pathways[key]["GENE"].keys():
paths.append(key)
return list(set(paths))
def save_project(self, tag, outdir="."):
"""Save tables and visualisations of the complete enrichmment analysis."""
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
from pylab import savefig
for category in ["up", "down", "all"]:
common_out = Path(f"{tag}_kegg_gsea_{category}_degs")
results = self.enrichment[category].results
if not results.empty:
# FIXME: For now fixing a nmax to 10000 to be sure to have all results
# (this could be improved by having self.complete_df and self.filtered_df attributes)
self._get_final_df(results, cutoff=1, nmax=10000).to_csv(
outdir / (common_out.name + ".csv")
)
self._get_final_df(results, cutoff=0.05, nmax=10000).to_csv(
outdir / (common_out.name + "_significant.csv")
)
self.barplot(category)
savefig(outdir / (common_out.name + "_barplot.png"), dpi=200)
self.scatterplot(category)
savefig(outdir / (common_out.name + "_scatterplot.png"), dpi=200)
# TODO: Implement significant pathways export here (got an ID
# error before, so commenting)
# self.save_significant_pathways(
# category, tag=tag, outdir=(outdir / "pathways")
# )
# In case of no enrichment results, create empty files stating so
else:
(outdir / (common_out.name + "_NO_RESULTS")).touch()
# FIXME: I think this table is redundant with previous csv export. Is it correct ?
# > So commenting for now
# df.to_csv(outdir / (common_out.name + ".csv"), index=None)
def export_pathways_to_json(self, outdir="kegg_pathways"):
# This is useful to keep an exact track of the pathways that were used.
# They can be loaded back. If so, we use kegg service only in
# :meth:`find_pathways_by_gene` method and
outdir = outdir + "/" + self.kegg.organism
from easydev import mkdirs
mkdirs(outdir)
import json
for key, data in self.pathways.items():
with open(f"{outdir}/{key}.json", "w") as fout:
json.dump(data, fout)
# not tested. This is tested trough bioservics and takes a long time
class Mart: # pragma: no cover
"""
conv = Mart(dataset="mmusculus_gene_ensembl")
# you could choose hsapiens_gene_ensembl for instance
df = conv.query()
df.set_index("ensembl_gene_id")
conv.save(df)
The file can now be loaded in KeggPathwayEnrichment as a mapper of the
ensemble identifier to external names understood by Kegg.
"""
def __init__(self, dataset, mart="ENSEMBL_MART_ENSEMBL"):
logger.info("Init Mart")
from bioservices import BioMart
self.biomart = BioMart()
self.datasets = self.biomart.get_datasets(mart)
self._dataset = None
try:
self.dataset = dataset
except:
logger.critical("Invalid dataset. checks datasets attributse")
def _set_dataset(self, dataset):
if dataset not in self.datasets["name"].values:
raise ValueError(
"Invalid dataset {}. Check the Choose amongst {}".format(
dataset, self.datasets
)
)
self._dataset = dataset
self.attributes = self.biomart.attributes(dataset=dataset)
self.filters = self.biomart.filters(dataset=dataset)
def _get_dataset(self):
return self._dataset
dataset = property(_get_dataset, _set_dataset)
def query(
self,
attributes=["ensembl_gene_id", "go_id", "entrezgene_id", "external_gene_name"],
):
logger.info("Please wait. This may take a while depending on your connection")
self.biomart.new_query()
self.biomart.add_dataset_to_xml(self.dataset)
for attribute in attributes:
if attribute not in self.attributes:
logger.error(
"{} not found in the dataset {}".format(attribute, self.dataset)
)
raise ValueError
self.biomart.add_attribute_to_xml(attribute)
xml = self.biomart.get_xml()
results = self.biomart.query(xml)
import pandas as pd
import io
df = pd.read_csv(io.StringIO(results), sep="\t")
df.columns = attributes
# df = df.set_index('ensembl_gene_id')
# name should be the name used by kegg
return df
def save(self, df, filename=None):
"""df is the output of :meth:`~query`. This function save it keeping
track of day/month/year and dataset."""
import time
date = time.localtime()
if filename is None:
filename = "biomart_{}__{}_{}_{}.csv".format(
self.dataset, date.tm_year, date.tm_mon, date.tm_mday
)
logger.info("Saving into {}".format(filename))
df.to_csv(filename, index=False)
| en | 0.788798 | # -*- coding: utf-8 -*- # # This file is part of Sequana software # # Copyright (c) 2016-2020 - Sequana Development Team # # File author(s): # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # Distributed under the terms of the 3-clause BSD license. # The full license is in the LICENSE file, distributed with this software. # # website: https://github.com/sequana/sequana # documentation: http://sequana.readthedocs.io # ############################################################################## # from sequana.rnadiff import RNADiffResults # This will read your rnadiff results and tstore the rlevants genes into # mygenes_u, mygenes_down, mygenes attributes. By default, we keep only the genes with a adjusted pvalue <= 0.05. The fold_change threshold is on a log2 scale and set to 0 (meaning no filtering). Only one value is requested and used to filter out positive and negative fold change (on the log2 scale). In other word, a log2 fold change threshold of 2 means that we filter out values between -2 and 2. If you prefer to work in natural scale, just set the parameter fc_threshold, which overwrite the log2_fc_threshold parameter. :: pe = PantherEnrichment("input_file.tsv", taxon=10090, log2_fc_threshold=1) # compute enrichment for genes down and up pe.compute_enrichment_down() pe.compute_enrichment_up() # Results for up case is stored in pe.enrichment # then, we plot the most mportat go terms df_up = pe.plot_go_terms("up") df = pe.plot_go_terms("up", pe.MF) pe.save_chart(df, "chart_MF_up.png") # all 3 main ontology df = pe.plot_go_terms("up") pe.save_chart(df, "chart_up.png") e.stats contains some statistics. One important is the list of unmapped genes. The results from the GO enrichment are stored in the attributes enrichment. There, we have again adjusted p-value and a fold enrichment, which can in turn be filtered or not. You can retrieve the cleaned data using the get_data method. You can also plot the GO terms that are significantly enriched using:: e.plot_go_terms(['GO:0003674', 'GO:0008150', 'GO:0005575']) This function returns the dataframe used during the plotting. If you want to look at the up regulated genes only:: e.compute_enrichment(pe.mygenes_up, 83333) df = e.plot_go_terms(['GO:0003674', 'GO:0008150', 'GO:0005575'], log=False, include_negative_enrichment=False, fontsize=8, sort_by='fold_enrichment', show_pvalues=True, fdr_threshold=0.05) The number of genes is limited to about 3100 depending (don't ask me why, this seem to be a hard-coded limitation on PantherDB website). In such case, you should add a filter e.g on padj or fold change rnadiff if provided, superseeds the input filename. This is useful for debugging # users can set the fold change threshold in the log2 scale or normal # scale. # self.aspects = {"MF": "molecular_function"} # panth accepts onyl ~2-3000 genes at max. Let us restrict the analysis # to the first 2000 genes based on their log2 fold change 2000 + and # 2000 negatives # used in report module :param enrichment_test: Fisher or Binomial :param correction: FDR or Bonferonni The field **number_in_reference** indicates from the reference, the number of genes that have a given ontolgy term. For instance, 998 genes have the term. This is stored in **number_in_reference**. If the reference contains 4391 genes, and you provided 49 genes , the **expected** number of genes that have this ontology term is 49*998/4391 that is 11.1369, which is stored in **"expected**. Now, if you actually find that 14 out of 49 genes have the term, you need to compare the numbers 11.1369 and 14. Are they really different ? The ratio 14 / 11.1369 is stored in **fold_enrichment**. The pvalue and FDR are stored as well. Some genes may be missing If you provide 50 genes, you may end up with only 45 being mapped onto panther db database. This may explain some differences with the expected value. Fold enrichment is the number_in_list / expected ratio. Another close metric is the fractional difference: (observed - expected) / expected. This metric is slighlty less than the fold enrichment To get the number f genes from the database, simply take one output, and compute number_in_reference * number_in_list / expected The fold enrichment is also called odd-ratio. # taxid=83333 # ecoli # for each ontology categorym we will store one key/value item # pragma: no cover # Here, looking at the FDr, it appears that when using bonferroni, # all FDR are set to zeros. Moreover, when using Fisher tests and # FDR (supposibly a FDR_BH, the results are noisy as compare to a # test from statsmodels. Moreover, when using binomial test, the FDR # is not computed... So, we will recompute the FDR ourself # pragma: no cover ; too slow Mapping information from pantherDB for the lisf of genes We also store uniprot persistent id # pragma: no cover # Here we show the GO terms that have number in list > 0 # Note, that this is dangerous to look only at this picture without # the reference plot, which data is not available thourg the pathner API From all input GO term that have been found and stored in enrichment[ONTOLOGY]['result'], we keep those with fdr<0.05. We also exclude UNCLASSIFIED entries. The final dataframe is returned :: pe.get_data("GO:0003674") # First, we select the required ontologies and build a common data set # there was only one hit, we expect: # remove unclassified GO terms # extract the ID and label # some extra information for convenience # Some user may want to include GO terms with fold enrichment # significanyly below 1 or not. # filter out FDR>0.05 # df stores the entire data set # subdf will store the subset (max of n_features, and add dummy values) # Select a subset of the data to keep the best max_features in terms of # pValue # Keeping only a part of the data, sorting by pValue # We get all levels for each go id. # They are stored by MF, CC or BP # now, for the subdf, which is used to plot the results, we add dummy # rows to make the yticks range scale nicer. # ignore the dummy values # here we define a size for each entry. # For the dummy entries, size is null (int(bool(x))) makes sure # it is not shown # The plot itself. we stretch wheen there is lots of features # pylab.barh(range(N), pylab.log2(subdf.fold_enrichment), color="r", # label="pvalue>0.05; FDR>0.05") # pylab.axvline(1, color="gray", ls="--") # pylab.axvline(-1, color="gray", ls="--") # pylab.barh(range(N), subdf.fold_enrichment, color="r", # label="not significant") # set color bar height # define the labels # Refine the fontsize of ylabel if not many # deal with the x-axis now. what is the range ? # go into log2 space # dealwith fold change below 0. # The pvalues: # ax.set_xlim([0, max(-pylab.log10(subdf.pValue))*1.2]) # now, let us add a legend # get back the dataframe without the dummies # Here we filter the data to keep only the relevant go terms as shown in # panther pie chart # assert ontology in ['MF', 'BP', 'CC'] # Some go terms maybe obsolet or renamed. Looking at other functions # may not work simply because the ID has changed. # now figure out the distance to main ancestor # we can try several times # if _id != self.ancestors[ontology]: pe = PantherEnrichment("B4052-V1.T1vsT0.complete.xls", fc_threshold=5, padj_threshold=0.05) df = pe.plot_go_terms("down", log=True, compute_levels=False) pe.save_chart(df, "chart.png") # if dataframe, get 'id' column, otherwise expect a list or string of go # terms separated by commas # remove obsolets Kegg Pathways enrichment from DGE results DGE = Differentially Gene Expression Current input is the output of the RNADiff analysis. This is a file than can be read by RNADiffResults When performing a GDE analysis, feature counts are computed using an input GFF. Depending on your parameters the gene names may be saved as ensembl identifiers or gene names. If you have gene names understood by Kegg, you simply need to use this code:: ke = KeggPathwayEnrichment("rnadiff", "eco") #"eco" for E. coli here this calls ke.compute_enrichment() that stores the up, down and all results in the attribute :attr:`enrichment` as a dictionary. You can now plot the results:: ke.barplot('down') and save enriched pathways as follows:: up = ke.save_significant_pathways("up") down = ke.save_significant_pathways("down") up.to_csv("kegg_pathway_up_regulated.csv") down.to_csv("kegg_pathway_down_regulated.csv") This class works like a charm for ecoli with GFF that uses gene names. For mus musculus, organism is **mmu** (not **mus*). you will need to have a mapping of the Ensembl ID into Kegg IDs (actually gene name). You can perform the conversion using BioServices/BioMart. We have implemented a simple function inside Sequana:: from sequana.enrichment import Mart conv = Mart("mmusculus_gene_ensembl") df = conf.query() conf.save(df) You can then import the dataframe, as follows using the mapper argument:: import pandas as pd df = pd.read_csv("biomart.csv") df = df.rename({"external_gene_name":"name", "ensembl_gene_id": "ensembl"}, axis=1) df = df.set_index("ensembl", inplace=True) KeggPathwayEnrichment("path_to_rnadiff", "mmu", mapper=df) More generally, when starting KeggPathwayEnrichment, we read all pathways. This may change with time. So, you can save the pathways:: ke.export_pathways_to_json() And read them back:: ke = KeggPathwayEnrichment("path_to_rnadiff", "mmu", mapper=df, preload_directory="kegg_pathways/mmu") df = ke.scatterplot('down') tight_layout() savefig("B4052_T1vsT0_KE_scatterplot_down.png") df = ke.scatterplot('up') savefig("B4052_T1vsT0_KE_scatterplot_up.png") In some cases, the input identifiers are converted into names thanks to the input mapper (csv file). Yet, if the external name are from one species and you use another species in kegg, the kegg names may be upper case while your species' name are in lower case. In such situations, you may set input identifiers are upper case setting the convert_input_gene_to_upper_case parameter to True # the dataframe should already contain the correct columns and index # This is just loading all pathways once for all # preload is a directory with all pathways in it # Some cleanup. Note that if we read the json file, this is different # since already cleanup but this code does no harm # save gene sets # some pathways reports genes as a dictionary id:'gene name; description' ('.eg. eco') # others reports genes as a dictionary id:'description' # save all pathways info # takes the df and populate the name and size of the found pathways # we also sort by adjusted p-value # we keep adj p-value <=0.05 # FIXME rnadiff object is not imported anymore. This function is not functional # #000000" # dcolor = "white" --> does not work with the post requests unlike get # requests # pragma: no cover # This does not do any enrichment. Just save all pathways once for all # with useful information # pragma: no cover category should be up, down or all # select the relevant pathways Returns pathways that contain the gene name ke.find_pathways_by_gene("ysgA") # First let us find the kegg ID Save tables and visualisations of the complete enrichmment analysis. # FIXME: For now fixing a nmax to 10000 to be sure to have all results # (this could be improved by having self.complete_df and self.filtered_df attributes) # TODO: Implement significant pathways export here (got an ID # error before, so commenting) # self.save_significant_pathways( # category, tag=tag, outdir=(outdir / "pathways") # ) # In case of no enrichment results, create empty files stating so # FIXME: I think this table is redundant with previous csv export. Is it correct ? # > So commenting for now # df.to_csv(outdir / (common_out.name + ".csv"), index=None) # This is useful to keep an exact track of the pathways that were used. # They can be loaded back. If so, we use kegg service only in # :meth:`find_pathways_by_gene` method and # not tested. This is tested trough bioservics and takes a long time # pragma: no cover conv = Mart(dataset="mmusculus_gene_ensembl") # you could choose hsapiens_gene_ensembl for instance df = conv.query() df.set_index("ensembl_gene_id") conv.save(df) The file can now be loaded in KeggPathwayEnrichment as a mapper of the ensemble identifier to external names understood by Kegg. # df = df.set_index('ensembl_gene_id') # name should be the name used by kegg df is the output of :meth:`~query`. This function save it keeping track of day/month/year and dataset. | 2.11521 | 2 |
layint_runtime_api/models/__init__.py | LayeredInsight/layint_runtime_api_python | 1 | 6613792 | <filename>layint_runtime_api/models/__init__.py<gh_stars>1-10
# coding: utf-8
"""
Layered Witness & Control
LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.9.7
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .alert_events import AlertEvents
from .config import Config
from .configs import Configs
from .container import Container
from .container_log import ContainerLog
from .container_logs import ContainerLogs
from .containers import Containers
from .dossier import Dossier
from .dossier_template_response import DossierTemplateResponse
from .image import Image
from .images import Images
from .inline_response_200 import InlineResponse200
from .limit import Limit
from .policies import Policies
from .policy import Policy
from .policy_rule import PolicyRule
from .process_limit import ProcessLimit
from .registries import Registries
from .registry import Registry
from .resource_not_found_error import ResourceNotFoundError
from .syscall_limit import SyscallLimit
from .unauthorized_error import UnauthorizedError
from .update_config import UpdateConfig
| <filename>layint_runtime_api/models/__init__.py<gh_stars>1-10
# coding: utf-8
"""
Layered Witness & Control
LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.9.7
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .alert_events import AlertEvents
from .config import Config
from .configs import Configs
from .container import Container
from .container_log import ContainerLog
from .container_logs import ContainerLogs
from .containers import Containers
from .dossier import Dossier
from .dossier_template_response import DossierTemplateResponse
from .image import Image
from .images import Images
from .inline_response_200 import InlineResponse200
from .limit import Limit
from .policies import Policies
from .policy import Policy
from .policy_rule import PolicyRule
from .process_limit import ProcessLimit
from .registries import Registries
from .registry import Registry
from .resource_not_found_error import ResourceNotFoundError
from .syscall_limit import SyscallLimit
from .unauthorized_error import UnauthorizedError
from .update_config import UpdateConfig
| en | 0.737293 | # coding: utf-8 Layered Witness & Control LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com). OpenAPI spec version: 0.9.7 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git # import models into model package | 1.362192 | 1 |
Python/ladder593.py | JWang169/LintCodeJava | 1 | 6613793 | import sys
class Solution:
"""
@param A: An integer array
@return: An integer
"""
def stoneGame2(self, A):
# circle 你就懵逼了
# 复制一份数组接在原来的数组后面
# 找下标范围len(A)的和最小值
n = len(A)
if n <= 1:
return 0
B = A + A # double size list
dp = [[0] * 2 * n for _ in range(2 * n) ]
# still go through all length
for length in range(2, n + 1):
for i in range(2 * n - length + 1): # starting point
j = i + length - 1
dp[i][j] = sys.maxsize
score = self.getScore(i, j, B)
for k in range(i, j):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k + 1][j] + score)
res = sys.maxsize
for i in range(n):
res = min(dp[i][i+n-1], res)
return res
def getScore(self, i, j, B):
return sum(B[i : j + 1])
| import sys
class Solution:
"""
@param A: An integer array
@return: An integer
"""
def stoneGame2(self, A):
# circle 你就懵逼了
# 复制一份数组接在原来的数组后面
# 找下标范围len(A)的和最小值
n = len(A)
if n <= 1:
return 0
B = A + A # double size list
dp = [[0] * 2 * n for _ in range(2 * n) ]
# still go through all length
for length in range(2, n + 1):
for i in range(2 * n - length + 1): # starting point
j = i + length - 1
dp[i][j] = sys.maxsize
score = self.getScore(i, j, B)
for k in range(i, j):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k + 1][j] + score)
res = sys.maxsize
for i in range(n):
res = min(dp[i][i+n-1], res)
return res
def getScore(self, i, j, B):
return sum(B[i : j + 1])
| en | 0.207382 | @param A: An integer array @return: An integer # circle 你就懵逼了 # 复制一份数组接在原来的数组后面 # 找下标范围len(A)的和最小值 # double size list # still go through all length # starting point | 3.271942 | 3 |
src/data/reader.py | bbchond/user-activity-generator | 5 | 6613794 | from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Tuple, Type, Union
from pandas import DataFrame, read_csv
from ..util.integrity import recursive_sha256
from .filetype import FileType
class Reader(ABC):
def __init__(self, path: Path):
if not path.is_file():
raise ValueError(f"Unsupported path (Not a file): {path}")
if not path.exists():
raise ValueError(f"Path does not exist: {path}")
self.path = path
@abstractmethod
def read(self, columns: Dict[str, "DataType"]) -> DataFrame:
pass
@property
def hash(self) -> str:
return recursive_sha256(self.path)
class DataType(Enum):
INT64 = "int64"
FLOAT64 = "float64"
BOOL = "bool"
DATETIME64 = "datetime64"
OBJECT = "object"
CATEGORY = "category"
TIMEDELTA = "timedelta"
class CsvReader(Reader):
def __init__(self, path: Path):
Reader.__init__(self, path)
def read(self, columns: Dict[str, Reader.DataType]) -> DataFrame:
pandas_columns = {name: type_enum.value for name, type_enum in columns.items()}
df = read_csv(self.path, dtype=pandas_columns)
for key, datatype in columns.items():
if key not in df:
if datatype in {Reader.DataType.FLOAT64, Reader.DataType.INT64}:
df[key] = 0
elif datatype in {Reader.DataType.CATEGORY, Reader.DataType.CATEGORY}:
df[key] = ""
elif datatype == Reader.DataType.BOOL:
df[key] = False
return df.astype(pandas_columns)
| from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Tuple, Type, Union
from pandas import DataFrame, read_csv
from ..util.integrity import recursive_sha256
from .filetype import FileType
class Reader(ABC):
def __init__(self, path: Path):
if not path.is_file():
raise ValueError(f"Unsupported path (Not a file): {path}")
if not path.exists():
raise ValueError(f"Path does not exist: {path}")
self.path = path
@abstractmethod
def read(self, columns: Dict[str, "DataType"]) -> DataFrame:
pass
@property
def hash(self) -> str:
return recursive_sha256(self.path)
class DataType(Enum):
INT64 = "int64"
FLOAT64 = "float64"
BOOL = "bool"
DATETIME64 = "datetime64"
OBJECT = "object"
CATEGORY = "category"
TIMEDELTA = "timedelta"
class CsvReader(Reader):
def __init__(self, path: Path):
Reader.__init__(self, path)
def read(self, columns: Dict[str, Reader.DataType]) -> DataFrame:
pandas_columns = {name: type_enum.value for name, type_enum in columns.items()}
df = read_csv(self.path, dtype=pandas_columns)
for key, datatype in columns.items():
if key not in df:
if datatype in {Reader.DataType.FLOAT64, Reader.DataType.INT64}:
df[key] = 0
elif datatype in {Reader.DataType.CATEGORY, Reader.DataType.CATEGORY}:
df[key] = ""
elif datatype == Reader.DataType.BOOL:
df[key] = False
return df.astype(pandas_columns)
| none | 1 | 3.28392 | 3 | |
corehq/motech/repeaters/migrations/0001_adjust_auth_field_format.py | dannyroberts/commcare-hq | 0 | 6613795 | <reponame>dannyroberts/commcare-hq<filename>corehq/motech/repeaters/migrations/0001_adjust_auth_field_format.py
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations
from corehq.motech.repeaters.models import Repeater
from corehq.motech.repeaters.utils import migrate_repeater
from corehq.util.couch import iter_update, DocUpdate
def migrate_auth_field(apps, schema_editor):
repeater_ids = [row['id'] for row in Repeater.view(
'repeaters/repeaters',
include_docs=False,
reduce=False,
wrap_doc=False
)]
iter_update(
db=Repeater.get_db(),
fn=migrate_repeater,
ids=repeater_ids,
)
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.RunPython(migrate_auth_field),
]
| from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations
from corehq.motech.repeaters.models import Repeater
from corehq.motech.repeaters.utils import migrate_repeater
from corehq.util.couch import iter_update, DocUpdate
def migrate_auth_field(apps, schema_editor):
repeater_ids = [row['id'] for row in Repeater.view(
'repeaters/repeaters',
include_docs=False,
reduce=False,
wrap_doc=False
)]
iter_update(
db=Repeater.get_db(),
fn=migrate_repeater,
ids=repeater_ids,
)
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.RunPython(migrate_auth_field),
] | none | 1 | 1.784548 | 2 | |
tests/client/test_utils.py | pardo/python-scrapinghub | 163 | 6613796 | import os
import pytest
from codecs import encode
import mock
from scrapinghub.client.utils import parse_auth, parse_job_key
def test_parse_auth_none():
with pytest.raises(RuntimeError):
parse_auth(None)
@mock.patch.dict(os.environ, {'SH_APIKEY': 'testkey'})
def test_parse_auth_none_with_env():
assert parse_auth(None) == ('testkey', '')
@mock.patch.dict(os.environ, {'SH_APIKEY': 'testkey', 'SHUB_JOBAUTH': 'jwt'})
def test_parse_auth_none_with_multiple_env():
assert parse_auth(None) == ('testkey', '')
def test_parse_auth_tuple():
assert parse_auth(('test', 'test')) == ('test', 'test')
assert parse_auth(('apikey', '')) == ('apikey', '')
with pytest.raises(ValueError):
parse_auth(('user', 'pass', 'bad-param'))
with pytest.raises(ValueError):
parse_auth((None, None))
with pytest.raises(ValueError):
parse_auth((1234, ''))
def test_parse_auth_not_string():
with pytest.raises(ValueError):
parse_auth(12345)
def test_parse_auth_simple():
assert parse_auth('user:pass') == ('user', 'pass')
def test_parse_auth_apikey():
apikey = 'c3a3c298c2b8c3a6c291c284c3a9'
assert parse_auth(apikey) == (apikey, '')
def test_parse_auth_jwt_token():
test_job, test_token = '1/<PASSWORD>', 'some.jwt.token'
raw_token = (test_job + ':' + test_token).encode('utf8')
encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
assert parse_auth(encoded_token) == (test_job, test_token)
def test_parse_auth_jwt_token_with_jwt_token_env():
dummy_test_job, dummy_test_token = '1/2/3', 'some.dummy.jwt.token'
raw_token = (dummy_test_job + ':' + dummy_test_token).encode('utf8')
dummy_encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
test_job, test_token = '1/2/3', 'some.jwt.token'
raw_token = (test_job + ':' + test_token).encode('utf8')
encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
with mock.patch.dict(os.environ, {'SHUB_JOBAUTH': dummy_encoded_token}):
assert parse_auth(encoded_token) == (test_job, test_token)
def test_parse_auth_none_with_jwt_token_env():
test_job, test_token = '1/2/3', 'some.jwt.token'
raw_token = (test_job + ':' + test_token).encode('utf8')
encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
with mock.patch.dict(os.environ, {'SHUB_JOBAUTH': encoded_token}):
assert parse_auth(None) == (test_job, test_token)
def test_parse_job_key():
job_key = parse_job_key('123/10/11')
assert job_key.project_id == '123'
assert job_key.spider_id == '10'
assert job_key.job_id == '11'
def test_parse_job_key_non_numeric():
with pytest.raises(ValueError):
parse_job_key('123/a/6')
def test_parse_job_key_incorrect_length():
with pytest.raises(ValueError):
parse_job_key('123/1')
| import os
import pytest
from codecs import encode
import mock
from scrapinghub.client.utils import parse_auth, parse_job_key
def test_parse_auth_none():
with pytest.raises(RuntimeError):
parse_auth(None)
@mock.patch.dict(os.environ, {'SH_APIKEY': 'testkey'})
def test_parse_auth_none_with_env():
assert parse_auth(None) == ('testkey', '')
@mock.patch.dict(os.environ, {'SH_APIKEY': 'testkey', 'SHUB_JOBAUTH': 'jwt'})
def test_parse_auth_none_with_multiple_env():
assert parse_auth(None) == ('testkey', '')
def test_parse_auth_tuple():
assert parse_auth(('test', 'test')) == ('test', 'test')
assert parse_auth(('apikey', '')) == ('apikey', '')
with pytest.raises(ValueError):
parse_auth(('user', 'pass', 'bad-param'))
with pytest.raises(ValueError):
parse_auth((None, None))
with pytest.raises(ValueError):
parse_auth((1234, ''))
def test_parse_auth_not_string():
with pytest.raises(ValueError):
parse_auth(12345)
def test_parse_auth_simple():
assert parse_auth('user:pass') == ('user', 'pass')
def test_parse_auth_apikey():
apikey = 'c3a3c298c2b8c3a6c291c284c3a9'
assert parse_auth(apikey) == (apikey, '')
def test_parse_auth_jwt_token():
test_job, test_token = '1/<PASSWORD>', 'some.jwt.token'
raw_token = (test_job + ':' + test_token).encode('utf8')
encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
assert parse_auth(encoded_token) == (test_job, test_token)
def test_parse_auth_jwt_token_with_jwt_token_env():
dummy_test_job, dummy_test_token = '1/2/3', 'some.dummy.jwt.token'
raw_token = (dummy_test_job + ':' + dummy_test_token).encode('utf8')
dummy_encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
test_job, test_token = '1/2/3', 'some.jwt.token'
raw_token = (test_job + ':' + test_token).encode('utf8')
encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
with mock.patch.dict(os.environ, {'SHUB_JOBAUTH': dummy_encoded_token}):
assert parse_auth(encoded_token) == (test_job, test_token)
def test_parse_auth_none_with_jwt_token_env():
test_job, test_token = '1/2/3', 'some.jwt.token'
raw_token = (test_job + ':' + test_token).encode('utf8')
encoded_token = encode(raw_token, 'hex_codec').decode('ascii')
with mock.patch.dict(os.environ, {'SHUB_JOBAUTH': encoded_token}):
assert parse_auth(None) == (test_job, test_token)
def test_parse_job_key():
job_key = parse_job_key('123/10/11')
assert job_key.project_id == '123'
assert job_key.spider_id == '10'
assert job_key.job_id == '11'
def test_parse_job_key_non_numeric():
with pytest.raises(ValueError):
parse_job_key('123/a/6')
def test_parse_job_key_incorrect_length():
with pytest.raises(ValueError):
parse_job_key('123/1')
| none | 1 | 2.482111 | 2 | |
galaxy/api/v2/tests/test_collection_import_views.py | bmclaughlin/galaxy | 904 | 6613797 | <filename>galaxy/api/v2/tests/test_collection_import_views.py<gh_stars>100-1000
# (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.contrib.auth import get_user_model
from pulpcore import constants as pulp_const
from pulpcore.app import models as pulp_models
from rest_framework.test import APITestCase
from rest_framework import status as http_codes
from galaxy.main import models
UserModel = get_user_model()
class TestCollectionImportView(APITestCase):
url = 'http://testserver/api/v2/collection-imports/{id}/'
def setUp(self):
super().setUp()
self.user = UserModel.objects.create_user(
username='testuser', password='<PASSWORD>')
self.namespace = models.Namespace.objects.create(
pk=12, name='mynamespace')
self.namespace.owners.set([self.user])
self.client.login(username=self.user.username, password='<PASSWORD>')
def test_collection_import_waiting(self):
pulp_task = pulp_models.Task.objects.create(
pk=24,
job_id='0c978c4e-7aba-4a22-be39-de3a433fb687',
state=pulp_const.TASK_STATES.WAITING,
)
models.CollectionImport.objects.create(
pk=42,
namespace=self.namespace,
name='mycollection',
version='1.2.3',
pulp_task=pulp_task,
)
response = self.client.get(self.url.format(id=42))
assert response.status_code == http_codes.HTTP_200_OK
assert response.json() == {
'id': 42,
# 'href': '/api/v1/collection-imports/42/',
'job_id': '0c978c4e-7aba-4a22-be39-de3a433fb687',
'error': None,
'started_at': None,
'finished_at': None,
'state': 'waiting',
'namespace': {
'id': 12,
'href': 'http://testserver/api/v1/namespaces/12/',
'name': 'mynamespace',
},
'name': 'mycollection',
'version': '1.2.3',
'messages': [],
'lint_records': [],
'imported_version': None,
}
def test_collection_import_complete(self):
pulp_task = pulp_models.Task.objects.create(
pk=24,
job_id='0c978c4e-7aba-4a22-be39-de3a433fb687',
state=pulp_const.TASK_STATES.COMPLETED,
started_at='2019-04-09T09:58:02-04:00',
finished_at='2019-04-09T09:58:59-04:00',
)
collection = models.Collection.objects.create(
pk=25,
namespace=self.namespace,
name='mycollection',
)
version = models.CollectionVersion.objects.create(
pk=26,
collection=collection,
version='1.2.3',
)
models.CollectionImport.objects.create(
pk=42,
namespace=self.namespace,
name='mycollection',
version='1.2.3',
pulp_task=pulp_task,
imported_version=version,
messages=[
{
'level': 'INFO',
'message': 'Task started',
'time': 1554818284.0956235,
},
{
'level': 'INFO',
'message': 'Task finished',
'time': 1554818305.9033494,
}
],
lint_records=[
{
'code': 'TEST0001',
'type': 'test',
'message': 'Test lint record',
'severity': 4,
'score_type': 'test'
},
]
)
response = self.client.get(self.url.format(id=42))
assert response.status_code == http_codes.HTTP_200_OK
assert response.json() == {
'id': 42,
# 'href': '/api/v1/collection-imports/42/',
'job_id': '0c978c4e-7aba-4a22-be39-de3a433fb687',
'error': None,
'started_at': '2019-04-09T09:58:02-04:00',
'finished_at': '2019-04-09T09:58:59-04:00',
'state': 'completed',
'namespace': {
'id': 12,
'href': 'http://testserver/api/v1/namespaces/12/',
'name': 'mynamespace',
},
'name': 'mycollection',
'version': '1.2.3',
'lint_records': [
{
'code': 'TEST0001',
'message': 'Test lint record',
'score_type': 'test',
'severity': 4,
'type': 'test',
}
],
'messages': [
{
'level': 'INFO',
'message': 'Task started',
'time': '2019-04-09T09:58:04.095623-04:00',
},
{
'level': 'INFO',
'message': 'Task finished',
'time': '2019-04-09T09:58:25.903349-04:00',
}
],
'imported_version': {
'href': 'http://testserver/api/v2/collection-versions/26/',
'id': 26,
},
}
def test_fail_method_not_allowed(self):
for method in ['POST', 'PUT', 'PATCH', 'DELETE']:
response = self.client.generic(method, self.url.format(id=145))
assert (response.status_code
== http_codes.HTTP_405_METHOD_NOT_ALLOWED)
| <filename>galaxy/api/v2/tests/test_collection_import_views.py<gh_stars>100-1000
# (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.contrib.auth import get_user_model
from pulpcore import constants as pulp_const
from pulpcore.app import models as pulp_models
from rest_framework.test import APITestCase
from rest_framework import status as http_codes
from galaxy.main import models
UserModel = get_user_model()
class TestCollectionImportView(APITestCase):
url = 'http://testserver/api/v2/collection-imports/{id}/'
def setUp(self):
super().setUp()
self.user = UserModel.objects.create_user(
username='testuser', password='<PASSWORD>')
self.namespace = models.Namespace.objects.create(
pk=12, name='mynamespace')
self.namespace.owners.set([self.user])
self.client.login(username=self.user.username, password='<PASSWORD>')
def test_collection_import_waiting(self):
pulp_task = pulp_models.Task.objects.create(
pk=24,
job_id='0c978c4e-7aba-4a22-be39-de3a433fb687',
state=pulp_const.TASK_STATES.WAITING,
)
models.CollectionImport.objects.create(
pk=42,
namespace=self.namespace,
name='mycollection',
version='1.2.3',
pulp_task=pulp_task,
)
response = self.client.get(self.url.format(id=42))
assert response.status_code == http_codes.HTTP_200_OK
assert response.json() == {
'id': 42,
# 'href': '/api/v1/collection-imports/42/',
'job_id': '0c978c4e-7aba-4a22-be39-de3a433fb687',
'error': None,
'started_at': None,
'finished_at': None,
'state': 'waiting',
'namespace': {
'id': 12,
'href': 'http://testserver/api/v1/namespaces/12/',
'name': 'mynamespace',
},
'name': 'mycollection',
'version': '1.2.3',
'messages': [],
'lint_records': [],
'imported_version': None,
}
def test_collection_import_complete(self):
pulp_task = pulp_models.Task.objects.create(
pk=24,
job_id='0c978c4e-7aba-4a22-be39-de3a433fb687',
state=pulp_const.TASK_STATES.COMPLETED,
started_at='2019-04-09T09:58:02-04:00',
finished_at='2019-04-09T09:58:59-04:00',
)
collection = models.Collection.objects.create(
pk=25,
namespace=self.namespace,
name='mycollection',
)
version = models.CollectionVersion.objects.create(
pk=26,
collection=collection,
version='1.2.3',
)
models.CollectionImport.objects.create(
pk=42,
namespace=self.namespace,
name='mycollection',
version='1.2.3',
pulp_task=pulp_task,
imported_version=version,
messages=[
{
'level': 'INFO',
'message': 'Task started',
'time': 1554818284.0956235,
},
{
'level': 'INFO',
'message': 'Task finished',
'time': 1554818305.9033494,
}
],
lint_records=[
{
'code': 'TEST0001',
'type': 'test',
'message': 'Test lint record',
'severity': 4,
'score_type': 'test'
},
]
)
response = self.client.get(self.url.format(id=42))
assert response.status_code == http_codes.HTTP_200_OK
assert response.json() == {
'id': 42,
# 'href': '/api/v1/collection-imports/42/',
'job_id': '0c978c4e-7aba-4a22-be39-de3a433fb687',
'error': None,
'started_at': '2019-04-09T09:58:02-04:00',
'finished_at': '2019-04-09T09:58:59-04:00',
'state': 'completed',
'namespace': {
'id': 12,
'href': 'http://testserver/api/v1/namespaces/12/',
'name': 'mynamespace',
},
'name': 'mycollection',
'version': '1.2.3',
'lint_records': [
{
'code': 'TEST0001',
'message': 'Test lint record',
'score_type': 'test',
'severity': 4,
'type': 'test',
}
],
'messages': [
{
'level': 'INFO',
'message': 'Task started',
'time': '2019-04-09T09:58:04.095623-04:00',
},
{
'level': 'INFO',
'message': 'Task finished',
'time': '2019-04-09T09:58:25.903349-04:00',
}
],
'imported_version': {
'href': 'http://testserver/api/v2/collection-versions/26/',
'id': 26,
},
}
def test_fail_method_not_allowed(self):
for method in ['POST', 'PUT', 'PATCH', 'DELETE']:
response = self.client.generic(method, self.url.format(id=145))
assert (response.status_code
== http_codes.HTTP_405_METHOD_NOT_ALLOWED)
| en | 0.843238 | # (c) 2012-2019, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. # 'href': '/api/v1/collection-imports/42/', # 'href': '/api/v1/collection-imports/42/', | 1.972265 | 2 |
src/easy_exchange_rates/eer.py | oddaspa/easy-exchange-rates | 2 | 6613798 | import requests
from datetime import datetime
import pandas as pd
class API():
def __init__(self):
self.base_url = 'https://api.exchangerate.host/timeseries?'
def get_exchange_rates(self, base_currency="EUR", start_date="2020-01-01", end_date="2020-01-04",targets=[]):
date_format = "%Y-%m-%d"
a = datetime.strptime(start_date, date_format)
b = datetime.strptime(end_date, date_format)
delta = b - a
if delta.days > 366:
print("Can not retieve more than 366 days of data")
return False
query = self.base_url + f'start_date={start_date}&end_date={end_date}&base={base_currency}'
if targets:
targets = ','.join(targets)
query += f'&symbols={targets}'
response = requests.get(query).json()
if response["success"]:
return response["rates"]
else:
return response
def to_dataframe(self,ts):
return pd.DataFrame(ts).T
def rolling_average(self, df, window=7):
roll = df.rolling(window).mean()
roll.columns = [f"roll_{window}_{tag}" for tag in list(roll.columns)]
return df.merge(roll,right_index=True, left_index=True)
def rolling_max(self, df, window=7):
roll = df.rolling(window).max()
roll.columns = [f"roll_{window}_{tag}" for tag in list(roll.columns)]
return df.merge(roll,right_index=True, left_index=True) | import requests
from datetime import datetime
import pandas as pd
class API():
def __init__(self):
self.base_url = 'https://api.exchangerate.host/timeseries?'
def get_exchange_rates(self, base_currency="EUR", start_date="2020-01-01", end_date="2020-01-04",targets=[]):
date_format = "%Y-%m-%d"
a = datetime.strptime(start_date, date_format)
b = datetime.strptime(end_date, date_format)
delta = b - a
if delta.days > 366:
print("Can not retieve more than 366 days of data")
return False
query = self.base_url + f'start_date={start_date}&end_date={end_date}&base={base_currency}'
if targets:
targets = ','.join(targets)
query += f'&symbols={targets}'
response = requests.get(query).json()
if response["success"]:
return response["rates"]
else:
return response
def to_dataframe(self,ts):
return pd.DataFrame(ts).T
def rolling_average(self, df, window=7):
roll = df.rolling(window).mean()
roll.columns = [f"roll_{window}_{tag}" for tag in list(roll.columns)]
return df.merge(roll,right_index=True, left_index=True)
def rolling_max(self, df, window=7):
roll = df.rolling(window).max()
roll.columns = [f"roll_{window}_{tag}" for tag in list(roll.columns)]
return df.merge(roll,right_index=True, left_index=True) | none | 1 | 3.230995 | 3 | |
run_keras_server.py | graba4/simple-rest-api | 0 | 6613799 | <filename>run_keras_server.py
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
#import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications import imagenet_utils
from PIL import Image
import numpy as np
import flask
import io
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
model = None
def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global model
model = ResNet50(weights="imagenet")
def prepare_image(image, target):
# if the image mode is not RGB, convert it
if image.mode != "RGB":
image = image.convert("RGB")
# resize the input image and preprocess it
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# return the processed image
return image
# method for preparing how json data will look like
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("image"):
# read the image in PIL format
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image = prepare_image(image, target=(224, 224))
# classify the input image and then initialize the list
# of predictions to return to the client
preds = model.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
# loop over the results and add them to the list of
# returned predictions
for (imagenetID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# beside main task query example
@app.route('/query-example')
def query_example():
# if key doesn't exist, returns None
language = flask.request.args.get('language')
# if key doesn't exist, returns a 400, bad request error
#framework = flask.request.args['framework']
# if key doesn't exist, returns None
framework = flask.request.args.get('framework')
# if key doesn't exist, returns None
website = flask.request.args.get('website')
return '''
<h1>The language value is: {}</h1>
<h1>The framework value is: {}</h1>
<h1>The website value is: {}'''.format(language, framework, website)
# allow both GET and POST requests
@app.route('/form-example', methods=['GET', 'POST'])
def form_example():
# handle the POST request
if flask.request.method == 'POST':
language = flask.request.form.get('language')
framework = flask.request.form.get('framework')
return '''
<h1>The language value is: {}</h1>
<h1>The framework value is: {}</h1>'''.format(language, framework)
return '''
<form method="POST">
<div><label>Language: <input type="text" name="language"></label></div>
<div><label>Framework: <input type="text" name="framework"></label></div>
<input type="submit" value="Submit">
</form>'''
# GET requests will be blocked
# poruke mozes slati preko CURL-a
@app.route('/json-example', methods=['POST'])
def json_example():
request_data = flask.request.get_json()
language = None
framework = None
python_version = None
example = None
boolean_test = None
if request_data:
if 'language' in request_data:
language = request_data['language']
if 'framework' in request_data:
framework = request_data['framework']
if 'version_info' in request_data:
if 'python' in request_data['version_info']:
python_version = request_data['version_info']['python']
if 'examples' in request_data:
if (type(request_data['examples']) == list) and (len(request_data['examples']) > 0):
example = request_data['examples'][0]
if 'boolean_test' in request_data:
boolean_test = request_data['boolean_test']
return '''
The language value is: {}
The framework value is: {}
The Python version is: {}
The item at index 0 in the example list is: {}
The boolean value is: {}'''.format(language, framework, python_version, example, boolean_test)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
load_model()
app.run() | <filename>run_keras_server.py
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
#import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications import imagenet_utils
from PIL import Image
import numpy as np
import flask
import io
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
model = None
def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global model
model = ResNet50(weights="imagenet")
def prepare_image(image, target):
# if the image mode is not RGB, convert it
if image.mode != "RGB":
image = image.convert("RGB")
# resize the input image and preprocess it
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# return the processed image
return image
# method for preparing how json data will look like
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("image"):
# read the image in PIL format
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image = prepare_image(image, target=(224, 224))
# classify the input image and then initialize the list
# of predictions to return to the client
preds = model.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
# loop over the results and add them to the list of
# returned predictions
for (imagenetID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# beside main task query example
@app.route('/query-example')
def query_example():
# if key doesn't exist, returns None
language = flask.request.args.get('language')
# if key doesn't exist, returns a 400, bad request error
#framework = flask.request.args['framework']
# if key doesn't exist, returns None
framework = flask.request.args.get('framework')
# if key doesn't exist, returns None
website = flask.request.args.get('website')
return '''
<h1>The language value is: {}</h1>
<h1>The framework value is: {}</h1>
<h1>The website value is: {}'''.format(language, framework, website)
# allow both GET and POST requests
@app.route('/form-example', methods=['GET', 'POST'])
def form_example():
# handle the POST request
if flask.request.method == 'POST':
language = flask.request.form.get('language')
framework = flask.request.form.get('framework')
return '''
<h1>The language value is: {}</h1>
<h1>The framework value is: {}</h1>'''.format(language, framework)
return '''
<form method="POST">
<div><label>Language: <input type="text" name="language"></label></div>
<div><label>Framework: <input type="text" name="framework"></label></div>
<input type="submit" value="Submit">
</form>'''
# GET requests will be blocked
# poruke mozes slati preko CURL-a
@app.route('/json-example', methods=['POST'])
def json_example():
request_data = flask.request.get_json()
language = None
framework = None
python_version = None
example = None
boolean_test = None
if request_data:
if 'language' in request_data:
language = request_data['language']
if 'framework' in request_data:
framework = request_data['framework']
if 'version_info' in request_data:
if 'python' in request_data['version_info']:
python_version = request_data['version_info']['python']
if 'examples' in request_data:
if (type(request_data['examples']) == list) and (len(request_data['examples']) > 0):
example = request_data['examples'][0]
if 'boolean_test' in request_data:
boolean_test = request_data['boolean_test']
return '''
The language value is: {}
The framework value is: {}
The Python version is: {}
The item at index 0 in the example list is: {}
The boolean value is: {}'''.format(language, framework, python_version, example, boolean_test)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
load_model()
app.run() | en | 0.726794 | # USAGE # Start the server: # python run_keras_server.py # Submit a request via cURL: # curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict' # Submita a request via Python: # python simple_request.py # import the necessary packages #import tensorflow as tf # initialize our Flask application and the Keras model # load the pre-trained Keras model (here we are using a model # pre-trained on ImageNet and provided by Keras, but you can # substitute in your own networks just as easily) # if the image mode is not RGB, convert it # resize the input image and preprocess it # return the processed image # method for preparing how json data will look like # initialize the data dictionary that will be returned from the # view # ensure an image was properly uploaded to our endpoint # read the image in PIL format # preprocess the image and prepare it for classification # classify the input image and then initialize the list # of predictions to return to the client # loop over the results and add them to the list of # returned predictions # indicate that the request was a success # return the data dictionary as a JSON response # beside main task query example # if key doesn't exist, returns None # if key doesn't exist, returns a 400, bad request error #framework = flask.request.args['framework'] # if key doesn't exist, returns None # if key doesn't exist, returns None <h1>The language value is: {}</h1> <h1>The framework value is: {}</h1> <h1>The website value is: {} # allow both GET and POST requests # handle the POST request <h1>The language value is: {}</h1> <h1>The framework value is: {}</h1> <form method="POST"> <div><label>Language: <input type="text" name="language"></label></div> <div><label>Framework: <input type="text" name="framework"></label></div> <input type="submit" value="Submit"> </form> # GET requests will be blocked # poruke mozes slati preko CURL-a The language value is: {} The framework value is: {} The Python version is: {} The item at index 0 in the example list is: {} The boolean value is: {} # if this is the main thread of execution first load the model and # then start the server | 3.533336 | 4 |
tests/dataloader/test_single_turn_dialog.py | silverriver/contk | 1 | 6613800 | import copy
import pytest
from contk.dataloader import SingleTurnDialog, OpenSubtitles
from contk.metric import MetricBase
class TestSingleTurnDialog():
def base_test_init(self, dl):
assert isinstance(dl, SingleTurnDialog)
assert isinstance(dl.ext_vocab, list)
assert dl.ext_vocab[:4] == ["<pad>", "<unk>", "<go>", "<eos>"]
assert [dl.pad_id, dl.unk_id, dl.go_id, dl.eos_id] == [0, 1, 2, 3]
assert isinstance(dl.key_name, list)
assert dl.key_name
for word in dl.key_name:
assert isinstance(word, str)
assert isinstance(dl.vocab_list, list)
assert dl.vocab_list[:len(dl.ext_vocab)] == dl.ext_vocab
assert isinstance(dl.word2id, dict)
assert len(dl.word2id) == len(dl.vocab_list)
for i, word in enumerate(dl.vocab_list):
assert isinstance(word, str)
assert dl.word2id[word] == i
assert dl.vocab_size == len(dl.vocab_list)
post = dl.data["train"]['post']
resp = dl.data["train"]['resp']
assert len(post) == len(resp)
assert isinstance(post[0], list)
assert isinstance(resp[0], list)
assert post[0][0] == dl.go_id
assert post[0][-1] == dl.eos_id
assert resp[0][0] == dl.go_id
assert resp[0][-1] == dl.eos_id
def base_test_restart(self, dl):
with pytest.raises(ValueError):
dl.restart("unknown set")
with pytest.raises(ValueError):
dl.restart("train")
record_index = copy.copy(dl.index["train"])
dl.restart("train", batch_size=3, shuffle=False)
assert record_index == dl.index['train']
assert dl.batch_id["train"] == 0
assert dl.batch_size["train"] == 3
dl.restart("train", shuffle=True)
assert dl.batch_id["train"] == 0
record_index = copy.copy(dl.index["train"])
dl.restart("train", shuffle=False)
assert record_index == dl.index['train']
assert dl.batch_id["train"] == 0
def base_test_get_batch(self, dl):
assert len(dl.index["train"]) >= 2
batch = dl.get_batch("train", [0, 1])
assert len(batch["post_length"]) == 2
assert len(batch["resp_length"]) == 2
assert batch["post"].shape[0] == 2
assert batch["resp"].shape[0] == 2
def base_test_get_next_batch(self, dl):
with pytest.raises(ValueError):
dl.get_next_batch("unknown set")
with pytest.raises(RuntimeError):
dl.get_next_batch("train")
dl.restart("train", 7)
sample_num = 0
while True:
batch = dl.get_next_batch("train", ignore_left_samples=True)
if not batch:
break
assert batch["post"].shape[0] == 7
sample_num += batch["post"].shape[0]
assert sample_num + 7 >= len(dl.data["train"]["post"])
dl.restart("train", 7)
sample_num = 0
while True:
batch = dl.get_next_batch("train")
if not batch:
break
sample_num += batch["post"].shape[0]
assert sample_num == len(dl.data["train"]["post"])
def base_test_convert(self, dl):
sent_id = [0, 1, 2]
sent = ["<pad>", "<unk>", "<go>"]
assert sent == dl.index_to_sen(sent_id)
assert sent_id == dl.sen_to_index(sent)
sent = ["<unk>", "<go>", "<pad>", "<unkownword>", "<pad>", "<go>"]
sent_id = [1, 2, 0, 1, 0, 2]
assert sent_id == dl.sen_to_index(sent)
sent_id = [0, 1, 2, 0, 0, 3, 1, 0, 0]
sent = ["<pad>", "<unk>", "<go>", "<pad>", "<pad>", "<eos>", "<unk>", "<pad>", "<pad>"]
assert sent == dl.index_to_sen(sent_id, trim=False)
sent = ["<pad>", "<unk>", "<go>"]
assert sent == dl.index_to_sen(sent_id)
def base_test_teacher_forcing_metric(self, dl):
assert isinstance(dl.get_teacher_forcing_metric(), MetricBase)
def base_test_teacher_inference_metric(self, dl):
assert isinstance(dl.get_inference_metric(), MetricBase)
def base_test_multi_runs(self, dl_list):
assert all(x.vocab_list == dl_list[0].vocab_list for x in dl_list)
@pytest.fixture
def load_opensubtitles():
def _load_opensubtitles():
return OpenSubtitles("./tests/dataloader/dummy_opensubtitles")
return _load_opensubtitles
class TestOpenSubtitles(TestSingleTurnDialog):
def test_init(self, load_opensubtitles):
super().base_test_init(load_opensubtitles())
def test_restart(self, load_opensubtitles):
super().base_test_restart(load_opensubtitles())
def test_get_batch(self, load_opensubtitles):
super().base_test_get_batch(load_opensubtitles())
def test_get_next_batch(self, load_opensubtitles):
super().base_test_get_next_batch(load_opensubtitles())
def test_convert(self, load_opensubtitles):
super().base_test_convert(load_opensubtitles())
def test_init_multi_runs(self, load_opensubtitles):
super().base_test_multi_runs([load_opensubtitles() for i in range(3)])
| import copy
import pytest
from contk.dataloader import SingleTurnDialog, OpenSubtitles
from contk.metric import MetricBase
class TestSingleTurnDialog():
def base_test_init(self, dl):
assert isinstance(dl, SingleTurnDialog)
assert isinstance(dl.ext_vocab, list)
assert dl.ext_vocab[:4] == ["<pad>", "<unk>", "<go>", "<eos>"]
assert [dl.pad_id, dl.unk_id, dl.go_id, dl.eos_id] == [0, 1, 2, 3]
assert isinstance(dl.key_name, list)
assert dl.key_name
for word in dl.key_name:
assert isinstance(word, str)
assert isinstance(dl.vocab_list, list)
assert dl.vocab_list[:len(dl.ext_vocab)] == dl.ext_vocab
assert isinstance(dl.word2id, dict)
assert len(dl.word2id) == len(dl.vocab_list)
for i, word in enumerate(dl.vocab_list):
assert isinstance(word, str)
assert dl.word2id[word] == i
assert dl.vocab_size == len(dl.vocab_list)
post = dl.data["train"]['post']
resp = dl.data["train"]['resp']
assert len(post) == len(resp)
assert isinstance(post[0], list)
assert isinstance(resp[0], list)
assert post[0][0] == dl.go_id
assert post[0][-1] == dl.eos_id
assert resp[0][0] == dl.go_id
assert resp[0][-1] == dl.eos_id
def base_test_restart(self, dl):
with pytest.raises(ValueError):
dl.restart("unknown set")
with pytest.raises(ValueError):
dl.restart("train")
record_index = copy.copy(dl.index["train"])
dl.restart("train", batch_size=3, shuffle=False)
assert record_index == dl.index['train']
assert dl.batch_id["train"] == 0
assert dl.batch_size["train"] == 3
dl.restart("train", shuffle=True)
assert dl.batch_id["train"] == 0
record_index = copy.copy(dl.index["train"])
dl.restart("train", shuffle=False)
assert record_index == dl.index['train']
assert dl.batch_id["train"] == 0
def base_test_get_batch(self, dl):
assert len(dl.index["train"]) >= 2
batch = dl.get_batch("train", [0, 1])
assert len(batch["post_length"]) == 2
assert len(batch["resp_length"]) == 2
assert batch["post"].shape[0] == 2
assert batch["resp"].shape[0] == 2
def base_test_get_next_batch(self, dl):
with pytest.raises(ValueError):
dl.get_next_batch("unknown set")
with pytest.raises(RuntimeError):
dl.get_next_batch("train")
dl.restart("train", 7)
sample_num = 0
while True:
batch = dl.get_next_batch("train", ignore_left_samples=True)
if not batch:
break
assert batch["post"].shape[0] == 7
sample_num += batch["post"].shape[0]
assert sample_num + 7 >= len(dl.data["train"]["post"])
dl.restart("train", 7)
sample_num = 0
while True:
batch = dl.get_next_batch("train")
if not batch:
break
sample_num += batch["post"].shape[0]
assert sample_num == len(dl.data["train"]["post"])
def base_test_convert(self, dl):
sent_id = [0, 1, 2]
sent = ["<pad>", "<unk>", "<go>"]
assert sent == dl.index_to_sen(sent_id)
assert sent_id == dl.sen_to_index(sent)
sent = ["<unk>", "<go>", "<pad>", "<unkownword>", "<pad>", "<go>"]
sent_id = [1, 2, 0, 1, 0, 2]
assert sent_id == dl.sen_to_index(sent)
sent_id = [0, 1, 2, 0, 0, 3, 1, 0, 0]
sent = ["<pad>", "<unk>", "<go>", "<pad>", "<pad>", "<eos>", "<unk>", "<pad>", "<pad>"]
assert sent == dl.index_to_sen(sent_id, trim=False)
sent = ["<pad>", "<unk>", "<go>"]
assert sent == dl.index_to_sen(sent_id)
def base_test_teacher_forcing_metric(self, dl):
assert isinstance(dl.get_teacher_forcing_metric(), MetricBase)
def base_test_teacher_inference_metric(self, dl):
assert isinstance(dl.get_inference_metric(), MetricBase)
def base_test_multi_runs(self, dl_list):
assert all(x.vocab_list == dl_list[0].vocab_list for x in dl_list)
@pytest.fixture
def load_opensubtitles():
def _load_opensubtitles():
return OpenSubtitles("./tests/dataloader/dummy_opensubtitles")
return _load_opensubtitles
class TestOpenSubtitles(TestSingleTurnDialog):
def test_init(self, load_opensubtitles):
super().base_test_init(load_opensubtitles())
def test_restart(self, load_opensubtitles):
super().base_test_restart(load_opensubtitles())
def test_get_batch(self, load_opensubtitles):
super().base_test_get_batch(load_opensubtitles())
def test_get_next_batch(self, load_opensubtitles):
super().base_test_get_next_batch(load_opensubtitles())
def test_convert(self, load_opensubtitles):
super().base_test_convert(load_opensubtitles())
def test_init_multi_runs(self, load_opensubtitles):
super().base_test_multi_runs([load_opensubtitles() for i in range(3)])
| none | 1 | 2.109286 | 2 | |
scripts/tests/test_mem.py | mfkiwl/pifive-cpu | 6 | 6613801 | from wbdbgbus import DebugBus
import time
from tqdm import tqdm
import random
from serial import Serial
DBG_PORT = "/dev/tty.usbserial-120001"
DBG_BAUD = 115200
MEM_RD_BASE = 0xA000_0000
MEM_WR_BASE = 0xC000_0000
MEM_SIZE = 8 * 1024 * 1024 # In bytes
READ_WR_SIDE = True
GAPS = range(1, 20+1)
GAPS_BYTE = [4*x for x in GAPS]
def regen_hash():
global HASH_PARAM
HASH_PARAM = str(hex(random.randint(1e9, 1e10)))
regen_hash()
with DebugBus(DBG_PORT, DBG_BAUD, fifo_size=1, timeout=0) as fpga:
fpga.reset()
print("Test gapped access")
for gap in tqdm(GAPS_BYTE):
for i in range(20):
addr = i * gap
if addr >= MEM_SIZE:
break
val = hash(HASH_PARAM + str(i)) & 0xFFFF_FFFF
fpga.write(MEM_WR_BASE+addr, val)
#assert fpga.read(MEM_RD_BASE+addr)[0] == val
if READ_WR_SIDE:
assert fpga.read(MEM_WR_BASE+addr)[0] == val
r = list(range(20))
for i in r:
addr = i * gap
if addr >= MEM_SIZE:
break
val = hash(HASH_PARAM + str(i)) & 0xFFFF_FFFF
assert fpga.read(MEM_RD_BASE+addr)[0] == val
if READ_WR_SIDE:
assert fpga.read(MEM_WR_BASE+addr)[0] == val
random.shuffle(r)
for i in r:
addr = i * gap
if addr >= MEM_SIZE:
break
val = hash(HASH_PARAM + str(i)) & 0xFFFF_FFFF
assert fpga.read(MEM_RD_BASE+addr)[0] == val
if READ_WR_SIDE:
assert fpga.read(MEM_WR_BASE+addr)[0] == val
| from wbdbgbus import DebugBus
import time
from tqdm import tqdm
import random
from serial import Serial
DBG_PORT = "/dev/tty.usbserial-120001"
DBG_BAUD = 115200
MEM_RD_BASE = 0xA000_0000
MEM_WR_BASE = 0xC000_0000
MEM_SIZE = 8 * 1024 * 1024 # In bytes
READ_WR_SIDE = True
GAPS = range(1, 20+1)
GAPS_BYTE = [4*x for x in GAPS]
def regen_hash():
global HASH_PARAM
HASH_PARAM = str(hex(random.randint(1e9, 1e10)))
regen_hash()
with DebugBus(DBG_PORT, DBG_BAUD, fifo_size=1, timeout=0) as fpga:
fpga.reset()
print("Test gapped access")
for gap in tqdm(GAPS_BYTE):
for i in range(20):
addr = i * gap
if addr >= MEM_SIZE:
break
val = hash(HASH_PARAM + str(i)) & 0xFFFF_FFFF
fpga.write(MEM_WR_BASE+addr, val)
#assert fpga.read(MEM_RD_BASE+addr)[0] == val
if READ_WR_SIDE:
assert fpga.read(MEM_WR_BASE+addr)[0] == val
r = list(range(20))
for i in r:
addr = i * gap
if addr >= MEM_SIZE:
break
val = hash(HASH_PARAM + str(i)) & 0xFFFF_FFFF
assert fpga.read(MEM_RD_BASE+addr)[0] == val
if READ_WR_SIDE:
assert fpga.read(MEM_WR_BASE+addr)[0] == val
random.shuffle(r)
for i in r:
addr = i * gap
if addr >= MEM_SIZE:
break
val = hash(HASH_PARAM + str(i)) & 0xFFFF_FFFF
assert fpga.read(MEM_RD_BASE+addr)[0] == val
if READ_WR_SIDE:
assert fpga.read(MEM_WR_BASE+addr)[0] == val
| en | 0.423022 | # In bytes #assert fpga.read(MEM_RD_BASE+addr)[0] == val | 2.316638 | 2 |
tests/test_syntax_checkers.py | ehatton/sending | 0 | 6613802 | import os
import pytest
import subprocess
from pathlib import Path
from unittest import mock
from sending.syntax_checkers import LogFileChecker, FlatFileChecker, SyntaxChecker
@pytest.fixture
def logfile_checker():
logfile_checker = LogFileChecker("tests/fixtures/files/test_logfile.log")
return logfile_checker
@pytest.fixture
def flatfile_checker():
flatfile_checker = FlatFileChecker("tests/fixtures/files/test_tremblfile_1.new")
return flatfile_checker
def test_logfile_checker_ok(logfile_checker):
assert logfile_checker.ok is False
def test_flatfile_checker_ok(flatfile_checker):
assert flatfile_checker.ok is False
def test_logfile_checker_error_file(logfile_checker):
expected_error_file = "synerr.tmp"
assert logfile_checker._error_file == expected_error_file
def test_flatfile_checker_error_file(flatfile_checker):
expected_error_file = "tests/fixtures/files/test_tremblfile_1.new.log"
assert flatfile_checker._error_file == expected_error_file
@mock.patch("subprocess.run")
def test_syntax_checker_start_process(mock_run):
syntax_checker = SyntaxChecker("tests/fixtures/files/logfiles/logfile.log")
test_cmd = ["echo", "hello"]
syntax_checker._cmd = test_cmd
syntax_checker._start_process()
mock_run.assert_called_once_with(
test_cmd, capture_output=True, check=True, text=True
)
def test_logfile_checker_cmd(logfile_checker):
expected_cmd = ["runplug", logfile_checker.file_to_check, "-check"]
assert logfile_checker._cmd == expected_cmd
def test_flatfile_checker_cmd(flatfile_checker):
spsyntax_path = os.path.join(os.environ["BINPROT"], "spsyntax.pl")
expected_cmd = ["perl", spsyntax_path, "-c", "-a", flatfile_checker.file_to_check]
assert flatfile_checker._cmd == expected_cmd
def test_logfile_checker_ok_with_error(logfile_checker):
synerr = Path("tests/fixtures/files/syntax_errors/synerr_with_error.tmp")
logfile_checker.error_report = synerr.read_text()
assert logfile_checker.ok is False
def test_logfile_checker_ok_without_error(logfile_checker):
synerr = Path("tests/fixtures/files/syntax_errors/synerr_without_error.tmp")
logfile_checker.error_report = synerr.read_text()
assert logfile_checker.ok is True
def test_flatfile_checker_ok_with_error(flatfile_checker):
error_file = Path(
"tests/fixtures/files/syntax_errors/flatfile_check_with_errors.new.log"
)
flatfile_checker.error_report = error_file.read_text()
assert flatfile_checker.ok is False
def test_flatfile_checker_ok_without_error(flatfile_checker):
error_file = Path(
"tests/fixtures/files/syntax_errors/flatfile_check_without_errors.new.log"
)
flatfile_checker.error_report = error_file.read_text()
assert flatfile_checker.ok is True
| import os
import pytest
import subprocess
from pathlib import Path
from unittest import mock
from sending.syntax_checkers import LogFileChecker, FlatFileChecker, SyntaxChecker
@pytest.fixture
def logfile_checker():
logfile_checker = LogFileChecker("tests/fixtures/files/test_logfile.log")
return logfile_checker
@pytest.fixture
def flatfile_checker():
flatfile_checker = FlatFileChecker("tests/fixtures/files/test_tremblfile_1.new")
return flatfile_checker
def test_logfile_checker_ok(logfile_checker):
assert logfile_checker.ok is False
def test_flatfile_checker_ok(flatfile_checker):
assert flatfile_checker.ok is False
def test_logfile_checker_error_file(logfile_checker):
expected_error_file = "synerr.tmp"
assert logfile_checker._error_file == expected_error_file
def test_flatfile_checker_error_file(flatfile_checker):
expected_error_file = "tests/fixtures/files/test_tremblfile_1.new.log"
assert flatfile_checker._error_file == expected_error_file
@mock.patch("subprocess.run")
def test_syntax_checker_start_process(mock_run):
syntax_checker = SyntaxChecker("tests/fixtures/files/logfiles/logfile.log")
test_cmd = ["echo", "hello"]
syntax_checker._cmd = test_cmd
syntax_checker._start_process()
mock_run.assert_called_once_with(
test_cmd, capture_output=True, check=True, text=True
)
def test_logfile_checker_cmd(logfile_checker):
expected_cmd = ["runplug", logfile_checker.file_to_check, "-check"]
assert logfile_checker._cmd == expected_cmd
def test_flatfile_checker_cmd(flatfile_checker):
spsyntax_path = os.path.join(os.environ["BINPROT"], "spsyntax.pl")
expected_cmd = ["perl", spsyntax_path, "-c", "-a", flatfile_checker.file_to_check]
assert flatfile_checker._cmd == expected_cmd
def test_logfile_checker_ok_with_error(logfile_checker):
synerr = Path("tests/fixtures/files/syntax_errors/synerr_with_error.tmp")
logfile_checker.error_report = synerr.read_text()
assert logfile_checker.ok is False
def test_logfile_checker_ok_without_error(logfile_checker):
synerr = Path("tests/fixtures/files/syntax_errors/synerr_without_error.tmp")
logfile_checker.error_report = synerr.read_text()
assert logfile_checker.ok is True
def test_flatfile_checker_ok_with_error(flatfile_checker):
error_file = Path(
"tests/fixtures/files/syntax_errors/flatfile_check_with_errors.new.log"
)
flatfile_checker.error_report = error_file.read_text()
assert flatfile_checker.ok is False
def test_flatfile_checker_ok_without_error(flatfile_checker):
error_file = Path(
"tests/fixtures/files/syntax_errors/flatfile_check_without_errors.new.log"
)
flatfile_checker.error_report = error_file.read_text()
assert flatfile_checker.ok is True
| none | 1 | 2.260923 | 2 | |
node/alphabet.py | muddyfish/PYKE | 24 | 6613803 | #!/usr/bin/env python
from node.generic_variable import Variable
class Alphabet(Variable):
char = "G"
contents = "abcdefghijklmnopqrstuvwxyz" | #!/usr/bin/env python
from node.generic_variable import Variable
class Alphabet(Variable):
char = "G"
contents = "abcdefghijklmnopqrstuvwxyz" | ru | 0.26433 | #!/usr/bin/env python | 2.42015 | 2 |
chaospy/distributions/collection/trunc_exponential.py | krystophny/chaospy | 1 | 6613804 | <filename>chaospy/distributions/collection/trunc_exponential.py
"""Truncated exponential distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
from .deprecate import deprecation_warning
class truncexpon(Dist):
"""Truncated exponential distribution."""
def __init__(self, b):
Dist.__init__(self, b=b)
def _pdf(self, x, b):
return numpy.exp(-x)/(1-numpy.exp(-b))
def _cdf(self, x, b):
return (1.0-numpy.exp(-x))/(1-numpy.exp(-b))
def _ppf(self, q, b):
return -numpy.log(1-q+q*numpy.exp(-b))
def _bnd(self, x, b):
return 0.0, b
class TruncExponential(Add):
"""
Truncated exponential distribution.
Args:
upper (float, Dist) : Location of upper threshold
scale (float, Dist) : Scaling parameter in the exponential distribution
shift (float, Dist) : Location parameter
Examples:
>>> distribution = chaospy.TruncExponential(2, 4)
>>> print(distribution)
TruncExponential(scale=4, shift=0, upper=2)
>>> q = numpy.linspace(0, 1, 5)
>>> print(numpy.around(distribution.inv(q), 4))
[0. 0.4142 0.8763 1.3988 2. ]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0. 0.25 0.5 0.75 1. ]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0.6354 0.5729 0.5104 0.4479 0.3854]
>>> print(numpy.around(distribution.sample(4), 4))
[1.1891 0.1852 1.873 0.8415]
>>> print(numpy.around(distribution.mom(1), 4))
0.917
>>> print(numpy.around(distribution.ttr([1, 2, 3]), 4))
[[1.0163 1.0024 1.0008]
[0.3292 0.2671 0.2572]]
"""
def __init__(self, upper=1, scale=1, shift=0):
self._repr = {"upper": upper, "scale": scale, "shift": shift}
Add.__init__(
self, left=truncexpon((upper-shift)*1./scale)*scale, right=shift)
Truncexpon = deprecation_warning(TruncExponential, "Truncexpon")
| <filename>chaospy/distributions/collection/trunc_exponential.py
"""Truncated exponential distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
from .deprecate import deprecation_warning
class truncexpon(Dist):
"""Truncated exponential distribution."""
def __init__(self, b):
Dist.__init__(self, b=b)
def _pdf(self, x, b):
return numpy.exp(-x)/(1-numpy.exp(-b))
def _cdf(self, x, b):
return (1.0-numpy.exp(-x))/(1-numpy.exp(-b))
def _ppf(self, q, b):
return -numpy.log(1-q+q*numpy.exp(-b))
def _bnd(self, x, b):
return 0.0, b
class TruncExponential(Add):
"""
Truncated exponential distribution.
Args:
upper (float, Dist) : Location of upper threshold
scale (float, Dist) : Scaling parameter in the exponential distribution
shift (float, Dist) : Location parameter
Examples:
>>> distribution = chaospy.TruncExponential(2, 4)
>>> print(distribution)
TruncExponential(scale=4, shift=0, upper=2)
>>> q = numpy.linspace(0, 1, 5)
>>> print(numpy.around(distribution.inv(q), 4))
[0. 0.4142 0.8763 1.3988 2. ]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0. 0.25 0.5 0.75 1. ]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0.6354 0.5729 0.5104 0.4479 0.3854]
>>> print(numpy.around(distribution.sample(4), 4))
[1.1891 0.1852 1.873 0.8415]
>>> print(numpy.around(distribution.mom(1), 4))
0.917
>>> print(numpy.around(distribution.ttr([1, 2, 3]), 4))
[[1.0163 1.0024 1.0008]
[0.3292 0.2671 0.2572]]
"""
def __init__(self, upper=1, scale=1, shift=0):
self._repr = {"upper": upper, "scale": scale, "shift": shift}
Add.__init__(
self, left=truncexpon((upper-shift)*1./scale)*scale, right=shift)
Truncexpon = deprecation_warning(TruncExponential, "Truncexpon")
| en | 0.430098 | Truncated exponential distribution. Truncated exponential distribution. Truncated exponential distribution. Args: upper (float, Dist) : Location of upper threshold scale (float, Dist) : Scaling parameter in the exponential distribution shift (float, Dist) : Location parameter Examples: >>> distribution = chaospy.TruncExponential(2, 4) >>> print(distribution) TruncExponential(scale=4, shift=0, upper=2) >>> q = numpy.linspace(0, 1, 5) >>> print(numpy.around(distribution.inv(q), 4)) [0. 0.4142 0.8763 1.3988 2. ] >>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4)) [0. 0.25 0.5 0.75 1. ] >>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4)) [0.6354 0.5729 0.5104 0.4479 0.3854] >>> print(numpy.around(distribution.sample(4), 4)) [1.1891 0.1852 1.873 0.8415] >>> print(numpy.around(distribution.mom(1), 4)) 0.917 >>> print(numpy.around(distribution.ttr([1, 2, 3]), 4)) [[1.0163 1.0024 1.0008] [0.3292 0.2671 0.2572]] | 2.420948 | 2 |
pytorch_lightning/utilities/parsing.py | girishponkiya/pytorch-lightning | 1 | 6613805 | <filename>pytorch_lightning/utilities/parsing.py
from argparse import Namespace
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
Copied from the python implementation distutils.utils.strtobool
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
>>> strtobool('YES')
1
>>> strtobool('FALSE')
0
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError(f'invalid truth value {val}')
def clean_namespace(hparams):
"""
Removes all functions from hparams so we can pickle
:param hparams:
:return:
"""
if isinstance(hparams, Namespace):
del_attrs = []
for k in hparams.__dict__:
if callable(getattr(hparams, k)):
del_attrs.append(k)
for k in del_attrs:
delattr(hparams, k)
elif isinstance(hparams, dict):
del_attrs = []
for k, v in hparams.items():
if callable(v):
del_attrs.append(k)
for k in del_attrs:
del hparams[k]
| <filename>pytorch_lightning/utilities/parsing.py
from argparse import Namespace
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
Copied from the python implementation distutils.utils.strtobool
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
>>> strtobool('YES')
1
>>> strtobool('FALSE')
0
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError(f'invalid truth value {val}')
def clean_namespace(hparams):
"""
Removes all functions from hparams so we can pickle
:param hparams:
:return:
"""
if isinstance(hparams, Namespace):
del_attrs = []
for k in hparams.__dict__:
if callable(getattr(hparams, k)):
del_attrs.append(k)
for k in del_attrs:
delattr(hparams, k)
elif isinstance(hparams, dict):
del_attrs = []
for k, v in hparams.items():
if callable(v):
del_attrs.append(k)
for k in del_attrs:
del hparams[k]
| en | 0.259833 | Convert a string representation of truth to true (1) or false (0). Copied from the python implementation distutils.utils.strtobool True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. >>> strtobool('YES') 1 >>> strtobool('FALSE') 0 Removes all functions from hparams so we can pickle :param hparams: :return: | 3.246315 | 3 |
Part 1/Chapter 4/example 1.2.py | MineSelf2016/PythonInEconomicManagement | 0 | 6613806 | age = int(input("请输入一个整数表示你的年龄: "))
# age = "21"
# print(type(age))
if age >= 18:
print("你要对自己负责了")
else:
print("你还未成年,请在父母的庇护下慢慢成熟!")
# print(age)
| age = int(input("请输入一个整数表示你的年龄: "))
# age = "21"
# print(type(age))
if age >= 18:
print("你要对自己负责了")
else:
print("你还未成年,请在父母的庇护下慢慢成熟!")
# print(age)
| en | 0.800295 | # age = "21" # print(type(age)) # print(age) | 3.975041 | 4 |
ckan/tests/lib/test_auth_tkt.py | ziveo/ckan | 1 | 6613807 | # encoding: utf-8
"""
Test the added methods used by this subclass of
repoze.who.plugins.auth_tkt.AuthTktCookiePlugin
Subclassing FunctionalTestBase ensures the original config is restored
after each test.
"""
import pytest
from ckan.tests import helpers
from ckan.lib.repoze_plugins.auth_tkt import make_plugin
@pytest.mark.ckan_config("who.httponly", True)
def test_httponly_expected_cookies_with_config_httponly_true():
"""
The returned cookies are in the format we expect, with HttpOnly flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
@pytest.mark.usefixtures("clean_db")
@pytest.mark.ckan_config("who.httponly", False)
def test_httponly_expected_cookies_with_config_httponly_false():
"""
The returned cookies are in the format we expect, without HttpOnly
flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0'),
]
assert cookies == expected_cookies
def test_httponly_expected_cookies_without_config_httponly():
"""
The returned cookies are in the format we expect, with HttpOnly flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
@pytest.mark.ckan_config("who.secure", True)
def test_secure_expected_cookies_with_config_secure_true():
"""
The returned cookies are in the format we expect, with secure flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; secure; HttpOnly'),
(
"Set-Cookie",
'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; secure; HttpOnly',
),
(
"Set-Cookie",
'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; secure; HttpOnly',
),
]
assert cookies == expected_cookies
@pytest.mark.ckan_config("who.secure", False)
def test_secure_expected_cookies_with_config_secure_false():
"""
The returned cookies are in the format we expect, without secure
flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
def test_secure_expected_cookies_without_config_secure():
"""
The returned cookies are in the format we expect, without secure flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
def test_timeout_not_set_in_config():
"""
Creating a CkanAuthTktCookiePlugin instance without setting timeout in
config sets correct values in CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout is None
assert plugin.reissue_time is None
@pytest.mark.ckan_config("who.timeout", 9000)
def test_timeout_set_in_config():
"""
Setting who.timeout in config sets correct values in
CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout == 9000
assert plugin.reissue_time == 900
@pytest.mark.ckan_config("who.timeout", 9000)
@pytest.mark.ckan_config("who.reissue_time", 200)
def test_reissue_set_in_config():
"""
Setting who.reissue in config sets correct values in
CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout == 9000
assert plugin.reissue_time == 200
| # encoding: utf-8
"""
Test the added methods used by this subclass of
repoze.who.plugins.auth_tkt.AuthTktCookiePlugin
Subclassing FunctionalTestBase ensures the original config is restored
after each test.
"""
import pytest
from ckan.tests import helpers
from ckan.lib.repoze_plugins.auth_tkt import make_plugin
@pytest.mark.ckan_config("who.httponly", True)
def test_httponly_expected_cookies_with_config_httponly_true():
"""
The returned cookies are in the format we expect, with HttpOnly flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
@pytest.mark.usefixtures("clean_db")
@pytest.mark.ckan_config("who.httponly", False)
def test_httponly_expected_cookies_with_config_httponly_false():
"""
The returned cookies are in the format we expect, without HttpOnly
flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0'),
]
assert cookies == expected_cookies
def test_httponly_expected_cookies_without_config_httponly():
"""
The returned cookies are in the format we expect, with HttpOnly flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
@pytest.mark.ckan_config("who.secure", True)
def test_secure_expected_cookies_with_config_secure_true():
"""
The returned cookies are in the format we expect, with secure flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; secure; HttpOnly'),
(
"Set-Cookie",
'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; secure; HttpOnly',
),
(
"Set-Cookie",
'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; secure; HttpOnly',
),
]
assert cookies == expected_cookies
@pytest.mark.ckan_config("who.secure", False)
def test_secure_expected_cookies_with_config_secure_false():
"""
The returned cookies are in the format we expect, without secure
flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
def test_secure_expected_cookies_without_config_secure():
"""
The returned cookies are in the format we expect, without secure flag.
"""
plugin = make_plugin(secret="sosecret")
cookies = plugin._get_cookies(
environ={"SERVER_NAME": "0.0.0.0"}, value="HELLO"
)
expected_cookies = [
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=0.0.0.0; HttpOnly'),
("Set-Cookie", 'auth_tkt="HELLO"; Path=/; Domain=.0.0.0.0; HttpOnly'),
]
assert cookies == expected_cookies
def test_timeout_not_set_in_config():
"""
Creating a CkanAuthTktCookiePlugin instance without setting timeout in
config sets correct values in CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout is None
assert plugin.reissue_time is None
@pytest.mark.ckan_config("who.timeout", 9000)
def test_timeout_set_in_config():
"""
Setting who.timeout in config sets correct values in
CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout == 9000
assert plugin.reissue_time == 900
@pytest.mark.ckan_config("who.timeout", 9000)
@pytest.mark.ckan_config("who.reissue_time", 200)
def test_reissue_set_in_config():
"""
Setting who.reissue in config sets correct values in
CkanAuthTktCookiePlugin instance.
"""
plugin = make_plugin(secret="sosecret")
assert plugin.timeout == 9000
assert plugin.reissue_time == 200
| en | 0.802959 | # encoding: utf-8 Test the added methods used by this subclass of repoze.who.plugins.auth_tkt.AuthTktCookiePlugin Subclassing FunctionalTestBase ensures the original config is restored after each test. The returned cookies are in the format we expect, with HttpOnly flag. The returned cookies are in the format we expect, without HttpOnly flag. The returned cookies are in the format we expect, with HttpOnly flag. The returned cookies are in the format we expect, with secure flag. The returned cookies are in the format we expect, without secure flag. The returned cookies are in the format we expect, without secure flag. Creating a CkanAuthTktCookiePlugin instance without setting timeout in config sets correct values in CkanAuthTktCookiePlugin instance. Setting who.timeout in config sets correct values in CkanAuthTktCookiePlugin instance. Setting who.reissue in config sets correct values in CkanAuthTktCookiePlugin instance. | 1.816151 | 2 |
unified/apis/hpc.py | LoganCook/reporting-unified | 0 | 6613808 | <filename>unified/apis/hpc.py
from . import app, configure, request, instance_method
from . import get_or_create, commit
from . import QueryResource, BaseIngestResource, RangeQuery
from ..models.hpc import Queue, Host, Owner, Allocation, Job
class QueueResource(QueryResource):
"""HPC Queue"""
query_class = Queue
class HostResource(QueryResource):
"""HPC Host"""
query_class = Host
class HostSummary(RangeQuery):
def _get(self, id='', **kwargs):
return instance_method(Host, 'summarise', id,
default={},
start_ts=kwargs['start'],
end_ts=kwargs['end'])
class OwnerResource(QueryResource):
"""HPC Job Owner"""
query_class = Owner
class OwnerSummary(RangeQuery):
def _get(self, id='', **kwargs):
return instance_method(Owner, 'summarise', id,
start_ts=kwargs['start'],
end_ts=kwargs['end'])
class AllocationResource(QueryResource):
"""HPC Job-Host Allocation"""
query_class = Allocation
class AllocationSummary(RangeQuery):
def _get(self, **kwargs):
return Allocation.summarise(start_ts=kwargs['start'], end_ts=kwargs['end'])
class AllocationRuntimeSummary(RangeQuery):
"""Gets job run statistics finished between start_ts and end_ts.
Similar to AllocationSummary but includes run time. Grouped by host
"""
def _get(self, **kwargs):
return Allocation.summarise_runtime(start_ts=kwargs['start'], end_ts=kwargs['end'])
class JobResource(QueryResource):
"""HPC Job"""
query_class = Job
class JobList(RangeQuery):
def _get(self, **kwargs):
return Job.list(start_ts=kwargs['start'], end_ts=kwargs['end'])
class JobSummary(RangeQuery):
def _get(self, **kwargs):
return Job.summarise(start_ts=kwargs['start'], end_ts=kwargs['end'])
class IngestResource(BaseIngestResource):
def ingest(self):
"""Ingest jobs."""
messages = [message
for message in request.get_json(force=True)
if message["data"].get("state") == "exited"]
for message in messages:
data = message["data"]
queue = get_or_create(Queue, name=data["queue"])
owner = get_or_create(Owner, name=data["owner"])
job = get_or_create(Job, job_id=data["jobid"])
job.name = data["jobname"]
job.queue = queue
job.owner = owner
job.start = data["start"]
job.end = data["end"]
total_cores = 0
for hostname, slots in data["exec_host"].items():
host = get_or_create(Host, name=hostname)
get_or_create(Allocation, job=job, host=host, cores=len(slots))
total_cores += len(slots)
job.cores = total_cores
job.cpu_seconds = total_cores * (data["end"] - data["start"])
commit()
return "", 204
def setup():
"""Let's roll."""
resources = {
"/host": HostResource,
"/host/<id>/summary": HostSummary,
"/queue": QueueResource,
"/owner": OwnerResource,
"/owner/<id>/summary": OwnerSummary,
"/job": JobResource,
"/job/list": JobList,
"/job/summary": JobSummary,
"/allocation": AllocationResource,
"/allocation/summary": AllocationSummary,
"/allocation/runtime/summary": AllocationRuntimeSummary,
"/ingest": IngestResource
}
configure(resources)
setup()
| <filename>unified/apis/hpc.py
from . import app, configure, request, instance_method
from . import get_or_create, commit
from . import QueryResource, BaseIngestResource, RangeQuery
from ..models.hpc import Queue, Host, Owner, Allocation, Job
class QueueResource(QueryResource):
"""HPC Queue"""
query_class = Queue
class HostResource(QueryResource):
"""HPC Host"""
query_class = Host
class HostSummary(RangeQuery):
def _get(self, id='', **kwargs):
return instance_method(Host, 'summarise', id,
default={},
start_ts=kwargs['start'],
end_ts=kwargs['end'])
class OwnerResource(QueryResource):
"""HPC Job Owner"""
query_class = Owner
class OwnerSummary(RangeQuery):
def _get(self, id='', **kwargs):
return instance_method(Owner, 'summarise', id,
start_ts=kwargs['start'],
end_ts=kwargs['end'])
class AllocationResource(QueryResource):
"""HPC Job-Host Allocation"""
query_class = Allocation
class AllocationSummary(RangeQuery):
def _get(self, **kwargs):
return Allocation.summarise(start_ts=kwargs['start'], end_ts=kwargs['end'])
class AllocationRuntimeSummary(RangeQuery):
"""Gets job run statistics finished between start_ts and end_ts.
Similar to AllocationSummary but includes run time. Grouped by host
"""
def _get(self, **kwargs):
return Allocation.summarise_runtime(start_ts=kwargs['start'], end_ts=kwargs['end'])
class JobResource(QueryResource):
"""HPC Job"""
query_class = Job
class JobList(RangeQuery):
def _get(self, **kwargs):
return Job.list(start_ts=kwargs['start'], end_ts=kwargs['end'])
class JobSummary(RangeQuery):
def _get(self, **kwargs):
return Job.summarise(start_ts=kwargs['start'], end_ts=kwargs['end'])
class IngestResource(BaseIngestResource):
def ingest(self):
"""Ingest jobs."""
messages = [message
for message in request.get_json(force=True)
if message["data"].get("state") == "exited"]
for message in messages:
data = message["data"]
queue = get_or_create(Queue, name=data["queue"])
owner = get_or_create(Owner, name=data["owner"])
job = get_or_create(Job, job_id=data["jobid"])
job.name = data["jobname"]
job.queue = queue
job.owner = owner
job.start = data["start"]
job.end = data["end"]
total_cores = 0
for hostname, slots in data["exec_host"].items():
host = get_or_create(Host, name=hostname)
get_or_create(Allocation, job=job, host=host, cores=len(slots))
total_cores += len(slots)
job.cores = total_cores
job.cpu_seconds = total_cores * (data["end"] - data["start"])
commit()
return "", 204
def setup():
"""Let's roll."""
resources = {
"/host": HostResource,
"/host/<id>/summary": HostSummary,
"/queue": QueueResource,
"/owner": OwnerResource,
"/owner/<id>/summary": OwnerSummary,
"/job": JobResource,
"/job/list": JobList,
"/job/summary": JobSummary,
"/allocation": AllocationResource,
"/allocation/summary": AllocationSummary,
"/allocation/runtime/summary": AllocationRuntimeSummary,
"/ingest": IngestResource
}
configure(resources)
setup()
| en | 0.952273 | HPC Queue HPC Host HPC Job Owner HPC Job-Host Allocation Gets job run statistics finished between start_ts and end_ts. Similar to AllocationSummary but includes run time. Grouped by host HPC Job Ingest jobs. Let's roll. | 2.2409 | 2 |
helper/calcuateAccuracy.py | byubrg/CAMDA_CMAP_Challenge | 2 | 6613809 | from math import sqrt
def getAccuracy(truePositives, trueNegatives, falsePostives, falseNegatives):
return((truePositives + trueNegatives) / float(truePositives + trueNegatives + falseNegatives + falsePostives))
def getSensitivity(truePositives, falseNegatives):
return(truePositives/float(truePositives + falseNegatives))
def getSpecificity(trueNegatives, falsePostives):
return(trueNegatives/float(trueNegatives + falsePostives))
def getMCC(truePositives, trueNegatives, falsePostives, falseNegatives):
return((truePositives * trueNegatives - falsePostives * falseNegatives) / sqrt((truePositives + falsePostives) * (truePositives + falseNegatives) * (trueNegatives + falsePostives) * (trueNegatives + falseNegatives)))
def printConfusionCalculations(TP, TN, FP, FN) :
print("accuracy: " + str(getAccuracy(TP, TN, FP, FN)))
print("sensitivity: " + str(getSensitivity(TP, FN)))
print("specificity: " + str(getSpecificity(TN, FP)))
print("MCC: " + str(getMCC(TP, TN, FP, FN)))
def getConfusionInformation(TP, TN, FP, FN) :
return getAccuracy(TP, TN, FP, FN), getSensitivity(TP, FN), getSpecificity(TN, FP), getMCC(TP, TN, FP, FN)
| from math import sqrt
def getAccuracy(truePositives, trueNegatives, falsePostives, falseNegatives):
return((truePositives + trueNegatives) / float(truePositives + trueNegatives + falseNegatives + falsePostives))
def getSensitivity(truePositives, falseNegatives):
return(truePositives/float(truePositives + falseNegatives))
def getSpecificity(trueNegatives, falsePostives):
return(trueNegatives/float(trueNegatives + falsePostives))
def getMCC(truePositives, trueNegatives, falsePostives, falseNegatives):
return((truePositives * trueNegatives - falsePostives * falseNegatives) / sqrt((truePositives + falsePostives) * (truePositives + falseNegatives) * (trueNegatives + falsePostives) * (trueNegatives + falseNegatives)))
def printConfusionCalculations(TP, TN, FP, FN) :
print("accuracy: " + str(getAccuracy(TP, TN, FP, FN)))
print("sensitivity: " + str(getSensitivity(TP, FN)))
print("specificity: " + str(getSpecificity(TN, FP)))
print("MCC: " + str(getMCC(TP, TN, FP, FN)))
def getConfusionInformation(TP, TN, FP, FN) :
return getAccuracy(TP, TN, FP, FN), getSensitivity(TP, FN), getSpecificity(TN, FP), getMCC(TP, TN, FP, FN)
| none | 1 | 2.966809 | 3 | |
MessagePushQueue.py | violas-core/violas-push-server | 0 | 6613810 | <reponame>violas-core/violas-push-server
import logging
from Singleton import Singleton
class MessagePushQueue(Singleton):
def __init__(self):
self.pushQueue = []
def AddMessage(self, version):
self.pushQueue.append(version)
logging.debug(f"Add new item: {version}, Queue size: {len(self.pushQueue)}")
def PopMessage(self):
try:
item = self.pushQueue.pop(0)
logging.debug(f"Pop item: {item}, Queue size: {len(self.pushQueue)}")
except IndexError:
# logging.debug(f"Queue is empty.")
return None
return item
| import logging
from Singleton import Singleton
class MessagePushQueue(Singleton):
def __init__(self):
self.pushQueue = []
def AddMessage(self, version):
self.pushQueue.append(version)
logging.debug(f"Add new item: {version}, Queue size: {len(self.pushQueue)}")
def PopMessage(self):
try:
item = self.pushQueue.pop(0)
logging.debug(f"Pop item: {item}, Queue size: {len(self.pushQueue)}")
except IndexError:
# logging.debug(f"Queue is empty.")
return None
return item | en | 0.211611 | # logging.debug(f"Queue is empty.") | 3.061261 | 3 |
setup.py | tomography/mpiarray | 1 | 6613811 | from distutils.core import setup
setup(name='mpiarray',
version='1.1',
description='Numpy Arrays Distributed over MPI',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['mpiarray'],
) | from distutils.core import setup
setup(name='mpiarray',
version='1.1',
description='Numpy Arrays Distributed over MPI',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['mpiarray'],
) | none | 1 | 0.95859 | 1 | |
python/cendalytics/feedback/core/svc/generate_meta_sentiment.py | jiportilla/ontology | 0 | 6613812 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class GenerateMetaSentiment(BaseObject):
""" Retrieve Source Records for Feedback Sentiment Processing """
def __init__(self,
df_summary: DataFrame,
is_debug: bool = False):
"""
Created:
16-Jan-2020
<EMAIL>
* the refactoring of a notebook from
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1746
:param df_summary:
DataFrame of this Report:
'feedback_tag_<DATE>-summary-<TS>.csv'
e.g.,
'feedback_tag_20191202-summary-1575690754.csv'
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_summary = df_summary
def _summarize(self) -> DataFrame:
record_ids = sorted(self._df_summary['RecordID'].unique())
master = []
for record_id in record_ids:
df2 = self._df_summary[self._df_summary['RecordID'] == record_id]
def _region():
country = df2['Country'].unique()[0]
region = df2['Region'].unique()[0]
if region.lower() == 'africa':
return "mea"
if region.lower() == 'middle east':
return "mea"
if region.lower() == 'asia':
return "ap"
if country.lower() in ["australia", "new zealand", "sri lanka", "india"]:
return "ap"
if country.lower() in ["china", "hong kong", "taiwan"]:
return "gcg"
return region
cons = len(df2[df2['Category'] == 'Cons'])
pros = len(df2[df2['Category'] == 'Pros'])
suggestions = len(df2[df2['Category'] == 'Suggestions'])
def adjudicate():
if cons >= pros - 1 and cons > suggestions:
return "Cons"
if pros > cons and pros > suggestions + 1:
return "Pros"
return "Suggestions"
for i in range(0, 10):
master.append({
"Category": adjudicate(),
"Country": df2['Country'].unique()[0],
"Leadership": df2['Leadership'].unique()[0],
"RecordID": df2['RecordID'].unique()[0],
"Region": _region(),
"Schema": df2['Schema'].unique()[0],
"Tag": df2['Tag'].unique()[0],
"Tenure": df2['Tenure'].unique()[0]})
df_output = pd.DataFrame(master)
return df_output
def process(self) -> DataFrame:
return self._summarize()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class GenerateMetaSentiment(BaseObject):
""" Retrieve Source Records for Feedback Sentiment Processing """
def __init__(self,
df_summary: DataFrame,
is_debug: bool = False):
"""
Created:
16-Jan-2020
<EMAIL>
* the refactoring of a notebook from
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1746
:param df_summary:
DataFrame of this Report:
'feedback_tag_<DATE>-summary-<TS>.csv'
e.g.,
'feedback_tag_20191202-summary-1575690754.csv'
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._df_summary = df_summary
def _summarize(self) -> DataFrame:
record_ids = sorted(self._df_summary['RecordID'].unique())
master = []
for record_id in record_ids:
df2 = self._df_summary[self._df_summary['RecordID'] == record_id]
def _region():
country = df2['Country'].unique()[0]
region = df2['Region'].unique()[0]
if region.lower() == 'africa':
return "mea"
if region.lower() == 'middle east':
return "mea"
if region.lower() == 'asia':
return "ap"
if country.lower() in ["australia", "new zealand", "sri lanka", "india"]:
return "ap"
if country.lower() in ["china", "hong kong", "taiwan"]:
return "gcg"
return region
cons = len(df2[df2['Category'] == 'Cons'])
pros = len(df2[df2['Category'] == 'Pros'])
suggestions = len(df2[df2['Category'] == 'Suggestions'])
def adjudicate():
if cons >= pros - 1 and cons > suggestions:
return "Cons"
if pros > cons and pros > suggestions + 1:
return "Pros"
return "Suggestions"
for i in range(0, 10):
master.append({
"Category": adjudicate(),
"Country": df2['Country'].unique()[0],
"Leadership": df2['Leadership'].unique()[0],
"RecordID": df2['RecordID'].unique()[0],
"Region": _region(),
"Schema": df2['Schema'].unique()[0],
"Tag": df2['Tag'].unique()[0],
"Tenure": df2['Tenure'].unique()[0]})
df_output = pd.DataFrame(master)
return df_output
def process(self) -> DataFrame:
return self._summarize()
| en | 0.404508 | #!/usr/bin/env python # -*- coding: UTF-8 -*- Retrieve Source Records for Feedback Sentiment Processing Created: 16-Jan-2020 <EMAIL> * the refactoring of a notebook from https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1746 :param df_summary: DataFrame of this Report: 'feedback_tag_<DATE>-summary-<TS>.csv' e.g., 'feedback_tag_20191202-summary-1575690754.csv' :param is_debug: | 2.572932 | 3 |
tests/test_module.py | cvpines/pysamplespace | 0 | 6613813 | import toml
import samplespace
import docs.source.conf
PROJECT_FILE = '../pyproject.toml'
def test_version():
with open(PROJECT_FILE, 'r') as f:
config = toml.load(f)
version = config['tool']['poetry']['version']
assert samplespace.__version__ == version
assert docs.source.conf.release == version
| import toml
import samplespace
import docs.source.conf
PROJECT_FILE = '../pyproject.toml'
def test_version():
with open(PROJECT_FILE, 'r') as f:
config = toml.load(f)
version = config['tool']['poetry']['version']
assert samplespace.__version__ == version
assert docs.source.conf.release == version
| none | 1 | 2.022324 | 2 | |
2018/aoc2018_5b.py | ByteCommander/AdventOfCode | 2 | 6613814 | <filename>2018/aoc2018_5b.py<gh_stars>1-10
# Advent Of Code 2018, day 5, part 2
# http://adventofcode.com/2018/day/5
# solution by ByteCommander, 2018-12-05
from collections import deque
from string import ascii_lowercase
with open("inputs/aoc2018_5.txt") as file:
whole_molecule = file.read().strip()
shortest = len(whole_molecule)
for blocker in ascii_lowercase:
molecule = deque(whole_molecule.replace(blocker, "").replace(blocker.upper(), ""))
result = []
while molecule:
if not result:
result.append(molecule.popleft())
unit = molecule.popleft() if molecule else ""
if unit.swapcase() == result[-1]:
result.pop()
else:
result.append(unit)
shortest = min(shortest, len(result))
print(f"The shortest fully reacted molecule after removing one type is {shortest} units long.")
| <filename>2018/aoc2018_5b.py<gh_stars>1-10
# Advent Of Code 2018, day 5, part 2
# http://adventofcode.com/2018/day/5
# solution by ByteCommander, 2018-12-05
from collections import deque
from string import ascii_lowercase
with open("inputs/aoc2018_5.txt") as file:
whole_molecule = file.read().strip()
shortest = len(whole_molecule)
for blocker in ascii_lowercase:
molecule = deque(whole_molecule.replace(blocker, "").replace(blocker.upper(), ""))
result = []
while molecule:
if not result:
result.append(molecule.popleft())
unit = molecule.popleft() if molecule else ""
if unit.swapcase() == result[-1]:
result.pop()
else:
result.append(unit)
shortest = min(shortest, len(result))
print(f"The shortest fully reacted molecule after removing one type is {shortest} units long.")
| en | 0.642523 | # Advent Of Code 2018, day 5, part 2 # http://adventofcode.com/2018/day/5 # solution by ByteCommander, 2018-12-05 | 3.070715 | 3 |
base/models.py | saurabhpetkar/club_portal | 2 | 6613815 | <gh_stars>1-10
import os
from django.core.exceptions import ValidationError
from django.db import models, IntegrityError, transaction
from django.contrib.auth.models import User
# Create your models here.
from django.db.models import signals, ProtectedError
from django.dispatch import receiver
def club_name_validator(name):
if '-' in name:
raise ValidationError('The name can not contain \'-\'')
def club_logo_upload(instance, filename):
return os.path.join('clubs', instance.name.replace(' ', '_'), filename)
class Club(models.Model):
name = models.CharField(max_length=100, validators=[club_name_validator], db_index=True)
full_name = models.CharField(max_length=100)
date_formed = models.DateField(auto_now_add=True)
email = models.EmailField(null=True, blank=True)
about = models.TextField(help_text="Say a few lines about your club", null=True, blank=True, max_length=500)
is_active = models.BooleanField(default=True)
is_supported = models.BooleanField(default=True)
# num_users = models.IntegerField(default=0)
back_img = models.ImageField(upload_to=club_logo_upload, blank=True, null=True)
def __str__(self):
return self.name
@property
def slug(self):
return self.name.replace(' ', '-')
def calc_users(self):
self.num_users = self.clubmember_set.all().count()
self.save()
class Meta:
ordering = ['name']
class ClubSettings(models.Model):
club = models.OneToOneField(to=Club, on_delete=models.CASCADE)
class ClubMentor(models.Model):
user = models.ForeignKey(to=User, on_delete=models.PROTECT)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.club.name + " - " + self.user.username
class Meta:
unique_together = ('user', 'club')
def __str__(self):
return self.club.name + "-" + self.user.username
class ClubPresident(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT)
# We can't allow user to be deleted unless new president is set
club = models.OneToOneField(Club, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.club.name + "-" + self.user.username
class Meta:
unique_together = ('user', 'club')
class ClubModerator(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.user.username + ' moderator')
class Meta:
unique_together = ('user', 'club')
def __str__(self):
return self.club.name + "-" + self.user.username
class ClubMember(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
is_approved = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username + '-' + self.club.name
class Meta:
unique_together = ('user', 'club')
class Notification(models.Model):
sender = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="sentNotifications")
club = models.ForeignKey(Club, on_delete=models.CASCADE, null=True, blank=True)
# is_read = models.BooleanField(default=False, null=False)
receivers = models.ManyToManyField(
User, related_name="receivedNotifications", blank=True)
title = models.TextField()
message = models.TextField()
sent_at = models.DateTimeField(auto_now_add=True)
class EmailProvider(models.Model):
name = models.TextField()
current = models.IntegerField(default=0)
limit = models.IntegerField(null=False)
# in days, when to reset the current count
reset = models.IntegerField()
last_reset = models.DateTimeField(auto_now_add=True)
# @receiver(signals.post_save, sender=Club)
# def random_back_image(sender, instance, created, **kwargs):
# if created:
#
# if not hasattr(instance.back_img,'url'):
# pass
@receiver(signals.post_save, sender=ClubMember)
def member_follow_club(sender, instance, created, **kwargs):
# If a user is made a moderator, also make him a member of the club
try:
instance.user.userprofile.following_clubs.add(instance.club)
except IntegrityError:
# User is already a member. Do Nothing
pass
@receiver(signals.post_save, sender=ClubModerator)
def moderator_add_member(sender, instance, created, **kwargs):
# If a user is made a moderator, also make him a member of the club
try:
with transaction.atomic(): # We need to make atomic as code may throw integrity error
ClubMember.objects.create(user=instance.user, club=instance.club, is_approved=True)
except IntegrityError:
# User is already a member. Do Nothing
pass
@receiver(signals.post_save, sender=ClubPresident)
def president_add_moderator_member(sender, instance, created, **kwargs):
# If a user is made a president, also make him a member and moderator of the club
try:
with transaction.atomic(): # We need to make atomic as code may throw integrity error
ClubModerator.objects.create(user=instance.user, club=instance.club)
# Just making moderator is enough, as while making moderator, the above signal will be called, which will make him a member
except IntegrityError:
# User is already a moderator. Do Nothing
pass
@receiver(signals.post_save, sender=ClubMentor)
def mentor_add_moderator(sender, instance, created, **kwargs):
try:
with transaction.atomic():
ClubModerator.objects.create(user=instance.user, club=instance.club)
except IntegrityError:
# User is already a moderator. Do Nothing
pass
@receiver(signals.pre_delete, sender=ClubModerator)
def moderator_delete_protect_president(sender, instance, **kwargs):
# If a moderator is deleted, and if user is also president, prevent delete of moderator
if instance.club.clubpresident.user == instance.user:
raise ProtectedError("Can not delete moderator who is also president", instance)
@receiver(signals.pre_delete, sender=ClubMember)
def member_delete_moderator(sender, instance, **kwargs):
# If a member is deleted, also delete moderator
mod_instance = ClubModerator.objects.filter(user=instance.user, club=instance.club)
if mod_instance.exists():
mod_instance.delete()
| import os
from django.core.exceptions import ValidationError
from django.db import models, IntegrityError, transaction
from django.contrib.auth.models import User
# Create your models here.
from django.db.models import signals, ProtectedError
from django.dispatch import receiver
def club_name_validator(name):
if '-' in name:
raise ValidationError('The name can not contain \'-\'')
def club_logo_upload(instance, filename):
return os.path.join('clubs', instance.name.replace(' ', '_'), filename)
class Club(models.Model):
name = models.CharField(max_length=100, validators=[club_name_validator], db_index=True)
full_name = models.CharField(max_length=100)
date_formed = models.DateField(auto_now_add=True)
email = models.EmailField(null=True, blank=True)
about = models.TextField(help_text="Say a few lines about your club", null=True, blank=True, max_length=500)
is_active = models.BooleanField(default=True)
is_supported = models.BooleanField(default=True)
# num_users = models.IntegerField(default=0)
back_img = models.ImageField(upload_to=club_logo_upload, blank=True, null=True)
def __str__(self):
return self.name
@property
def slug(self):
return self.name.replace(' ', '-')
def calc_users(self):
self.num_users = self.clubmember_set.all().count()
self.save()
class Meta:
ordering = ['name']
class ClubSettings(models.Model):
club = models.OneToOneField(to=Club, on_delete=models.CASCADE)
class ClubMentor(models.Model):
user = models.ForeignKey(to=User, on_delete=models.PROTECT)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.club.name + " - " + self.user.username
class Meta:
unique_together = ('user', 'club')
def __str__(self):
return self.club.name + "-" + self.user.username
class ClubPresident(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT)
# We can't allow user to be deleted unless new president is set
club = models.OneToOneField(Club, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.club.name + "-" + self.user.username
class Meta:
unique_together = ('user', 'club')
class ClubModerator(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.user.username + ' moderator')
class Meta:
unique_together = ('user', 'club')
def __str__(self):
return self.club.name + "-" + self.user.username
class ClubMember(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
is_approved = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username + '-' + self.club.name
class Meta:
unique_together = ('user', 'club')
class Notification(models.Model):
sender = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="sentNotifications")
club = models.ForeignKey(Club, on_delete=models.CASCADE, null=True, blank=True)
# is_read = models.BooleanField(default=False, null=False)
receivers = models.ManyToManyField(
User, related_name="receivedNotifications", blank=True)
title = models.TextField()
message = models.TextField()
sent_at = models.DateTimeField(auto_now_add=True)
class EmailProvider(models.Model):
name = models.TextField()
current = models.IntegerField(default=0)
limit = models.IntegerField(null=False)
# in days, when to reset the current count
reset = models.IntegerField()
last_reset = models.DateTimeField(auto_now_add=True)
# @receiver(signals.post_save, sender=Club)
# def random_back_image(sender, instance, created, **kwargs):
# if created:
#
# if not hasattr(instance.back_img,'url'):
# pass
@receiver(signals.post_save, sender=ClubMember)
def member_follow_club(sender, instance, created, **kwargs):
# If a user is made a moderator, also make him a member of the club
try:
instance.user.userprofile.following_clubs.add(instance.club)
except IntegrityError:
# User is already a member. Do Nothing
pass
@receiver(signals.post_save, sender=ClubModerator)
def moderator_add_member(sender, instance, created, **kwargs):
# If a user is made a moderator, also make him a member of the club
try:
with transaction.atomic(): # We need to make atomic as code may throw integrity error
ClubMember.objects.create(user=instance.user, club=instance.club, is_approved=True)
except IntegrityError:
# User is already a member. Do Nothing
pass
@receiver(signals.post_save, sender=ClubPresident)
def president_add_moderator_member(sender, instance, created, **kwargs):
# If a user is made a president, also make him a member and moderator of the club
try:
with transaction.atomic(): # We need to make atomic as code may throw integrity error
ClubModerator.objects.create(user=instance.user, club=instance.club)
# Just making moderator is enough, as while making moderator, the above signal will be called, which will make him a member
except IntegrityError:
# User is already a moderator. Do Nothing
pass
@receiver(signals.post_save, sender=ClubMentor)
def mentor_add_moderator(sender, instance, created, **kwargs):
try:
with transaction.atomic():
ClubModerator.objects.create(user=instance.user, club=instance.club)
except IntegrityError:
# User is already a moderator. Do Nothing
pass
@receiver(signals.pre_delete, sender=ClubModerator)
def moderator_delete_protect_president(sender, instance, **kwargs):
# If a moderator is deleted, and if user is also president, prevent delete of moderator
if instance.club.clubpresident.user == instance.user:
raise ProtectedError("Can not delete moderator who is also president", instance)
@receiver(signals.pre_delete, sender=ClubMember)
def member_delete_moderator(sender, instance, **kwargs):
# If a member is deleted, also delete moderator
mod_instance = ClubModerator.objects.filter(user=instance.user, club=instance.club)
if mod_instance.exists():
mod_instance.delete() | en | 0.956109 | # Create your models here. # num_users = models.IntegerField(default=0) # We can't allow user to be deleted unless new president is set # is_read = models.BooleanField(default=False, null=False) # in days, when to reset the current count # @receiver(signals.post_save, sender=Club) # def random_back_image(sender, instance, created, **kwargs): # if created: # # if not hasattr(instance.back_img,'url'): # pass # If a user is made a moderator, also make him a member of the club # User is already a member. Do Nothing # If a user is made a moderator, also make him a member of the club # We need to make atomic as code may throw integrity error # User is already a member. Do Nothing # If a user is made a president, also make him a member and moderator of the club # We need to make atomic as code may throw integrity error # Just making moderator is enough, as while making moderator, the above signal will be called, which will make him a member # User is already a moderator. Do Nothing # User is already a moderator. Do Nothing # If a moderator is deleted, and if user is also president, prevent delete of moderator # If a member is deleted, also delete moderator | 2.364765 | 2 |
lepidoptera/migrations/0019_first_populate_cached_binomial.py | BelgianBiodiversityPlatform/catalogue-lepidoptera-belgium-webapp | 2 | 6613816 | # Generated by Django 2.0.13 on 2019-04-23 08:51
from django.db import migrations
def noop(apps, schema_editor):
pass
def first_populate_cached_binomial(apps, schema_editor):
# The Genus model was recently added a new binomial_name denormalized field. We
# should make sure it's initially populated
Species = apps.get_model('lepidoptera', 'Species')
for s in Species.objects.all():
s.binomial_name = '{genus} {specific_epithet}'.format(genus=s.genus_name, specific_epithet=s.name)
s.save()
class Migration(migrations.Migration):
dependencies = [
('lepidoptera', '0018_species_binomial_name'),
]
operations = [
migrations.RunPython(first_populate_cached_binomial, noop),
]
| # Generated by Django 2.0.13 on 2019-04-23 08:51
from django.db import migrations
def noop(apps, schema_editor):
pass
def first_populate_cached_binomial(apps, schema_editor):
# The Genus model was recently added a new binomial_name denormalized field. We
# should make sure it's initially populated
Species = apps.get_model('lepidoptera', 'Species')
for s in Species.objects.all():
s.binomial_name = '{genus} {specific_epithet}'.format(genus=s.genus_name, specific_epithet=s.name)
s.save()
class Migration(migrations.Migration):
dependencies = [
('lepidoptera', '0018_species_binomial_name'),
]
operations = [
migrations.RunPython(first_populate_cached_binomial, noop),
]
| en | 0.973137 | # Generated by Django 2.0.13 on 2019-04-23 08:51 # The Genus model was recently added a new binomial_name denormalized field. We # should make sure it's initially populated | 2.08984 | 2 |
antarest/study/storage/variantstudy/model/command/update_config.py | AntaresSimulatorTeam/antaREST | 2 | 6613817 | <gh_stars>1-10
from typing import Dict, Any, Union, List, Optional
from antarest.core.custom_types import JSON, SUB_JSON
from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy
from antarest.study.storage.rawstudy.model.filesystem.ini_file_node import (
IniFileNode,
)
from antarest.study.storage.variantstudy.model.model import CommandDTO
from antarest.study.storage.variantstudy.model.command.common import (
CommandOutput,
CommandName,
)
from antarest.study.storage.variantstudy.model.command.icommand import (
ICommand,
MATCH_SIGNATURE_SEPARATOR,
)
class UpdateConfig(ICommand):
target: str
data: Union[str, int, float, bool, JSON]
def __init__(self, **data: Any) -> None:
super().__init__(
command_name=CommandName.UPDATE_CONFIG, version=1, **data
)
def _apply(self, study_data: FileStudy) -> CommandOutput:
url = self.target.split("/")
tree_node = study_data.tree.get_node(url)
if not isinstance(tree_node, IniFileNode):
return CommandOutput(
status=False,
message=f"Study node at path {self.target} is invalid",
)
study_data.tree.save(self.data, url)
return CommandOutput(status=True, message="ok")
def to_dto(self) -> CommandDTO:
return CommandDTO(
action=CommandName.UPDATE_CONFIG.value,
args={
"target": self.target,
"data": self.data,
},
)
def match_signature(self) -> str:
return str(
self.command_name.value + MATCH_SIGNATURE_SEPARATOR + self.target
)
def match(self, other: ICommand, equal: bool = False) -> bool:
if not isinstance(other, UpdateConfig):
return False
simple_match = self.target == other.target
if not equal:
return simple_match
return simple_match and self.data == other.data
def revert(
self, history: List["ICommand"], base: Optional[FileStudy] = None
) -> List["ICommand"]:
for command in reversed(history):
if (
isinstance(command, UpdateConfig)
and command.target == self.target
):
return [command]
if base is not None:
from antarest.study.storage.variantstudy.model.command.utils_extractor import (
CommandExtraction,
)
return [
(
self.command_context.command_extractor
or CommandExtraction(self.command_context.matrix_service)
).generate_update_config(base.tree, self.target.split("/"))
]
return []
def _create_diff(self, other: "ICommand") -> List["ICommand"]:
return [other]
def get_inner_matrices(self) -> List[str]:
return []
| from typing import Dict, Any, Union, List, Optional
from antarest.core.custom_types import JSON, SUB_JSON
from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy
from antarest.study.storage.rawstudy.model.filesystem.ini_file_node import (
IniFileNode,
)
from antarest.study.storage.variantstudy.model.model import CommandDTO
from antarest.study.storage.variantstudy.model.command.common import (
CommandOutput,
CommandName,
)
from antarest.study.storage.variantstudy.model.command.icommand import (
ICommand,
MATCH_SIGNATURE_SEPARATOR,
)
class UpdateConfig(ICommand):
target: str
data: Union[str, int, float, bool, JSON]
def __init__(self, **data: Any) -> None:
super().__init__(
command_name=CommandName.UPDATE_CONFIG, version=1, **data
)
def _apply(self, study_data: FileStudy) -> CommandOutput:
url = self.target.split("/")
tree_node = study_data.tree.get_node(url)
if not isinstance(tree_node, IniFileNode):
return CommandOutput(
status=False,
message=f"Study node at path {self.target} is invalid",
)
study_data.tree.save(self.data, url)
return CommandOutput(status=True, message="ok")
def to_dto(self) -> CommandDTO:
return CommandDTO(
action=CommandName.UPDATE_CONFIG.value,
args={
"target": self.target,
"data": self.data,
},
)
def match_signature(self) -> str:
return str(
self.command_name.value + MATCH_SIGNATURE_SEPARATOR + self.target
)
def match(self, other: ICommand, equal: bool = False) -> bool:
if not isinstance(other, UpdateConfig):
return False
simple_match = self.target == other.target
if not equal:
return simple_match
return simple_match and self.data == other.data
def revert(
self, history: List["ICommand"], base: Optional[FileStudy] = None
) -> List["ICommand"]:
for command in reversed(history):
if (
isinstance(command, UpdateConfig)
and command.target == self.target
):
return [command]
if base is not None:
from antarest.study.storage.variantstudy.model.command.utils_extractor import (
CommandExtraction,
)
return [
(
self.command_context.command_extractor
or CommandExtraction(self.command_context.matrix_service)
).generate_update_config(base.tree, self.target.split("/"))
]
return []
def _create_diff(self, other: "ICommand") -> List["ICommand"]:
return [other]
def get_inner_matrices(self) -> List[str]:
return [] | none | 1 | 2.218308 | 2 | |
src/vgg19/theano_model/preliminary_model.py | dkdanielkost/Theano-Style-Transfer | 0 | 6613818 | from vgg19_model import VGG19
import re
import theano
import theano.tensor as T
def RMSprop(cost, params, lr=1, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def generate_img(content, style, white_noise, layer_weights, bias_weights):
# puts params in [Weights, bias] format for gradient/update step, might turn into function later
'''
params=[]
for key in layer_weights.keys():
ln = key.split('_')
for b_key in bias_weights.keys():
bn = b_key.split('_')
if (ln[0]==bn[0]) and (ln[1]==bn[1]):
params += [theano.shared(value=layer_weights[key], borrow=True),
theano.shared(value=bias_weights[b_key], borrow=True)]
'''
wn = theano.shared(value=white_noise.reshape(224,224,3), name='wn', borrow=True)
vgg19 = VGG19(input_image_shape=(1,3,224,224))
#cont_vgg = VGG19(input_image_shape=content.shape)
#style_vgg = VGG19(input_image_shape=style.shape)
#wn_vgg = VGG19(input_image_shape=white_noise.shape)
get_content_layer = theano.function(inputs=[vgg19.input],
outputs=vgg19.conv4_2.output)
get_style_layers = theano.function(inputs=[vgg19.input],
outputs=[vgg19.conv1_1.output, vgg19.conv2_1.output,
vgg19.conv3_1.output, vgg19.conv4_1.output,
vgg19.conv5_1.output])
#get_params = theano.function(inputs=[vgg19.input], outputs = [vgg19.conv1_1.params])
######### Stuff above this only needs to be run once ##########
#sum(np.mean(np.square(L1[i]+L2[i])) for i in range(len(L1)))
#might be just wn
contents = T.mean(T.sqr(get_content_layer(wn.get_value())-get_content_layer(content)))
wn_style = get_style_layers(wn.get_value())
pnt_style = get_style_layers(style)
# might need to edit #
styles = (T.mean(T.sqr(wn_style[0]-pnt_style[0])) + T.mean(T.sqr(wn_style[1]-pnt_style[1])) +
T.mean(T.sqr(wn_style[2]-pnt_style[2])) + T.mean(T.sqr(wn_style[3]-pnt_style[3])) +
T.mean(T.sqr(wn_style[4]-pnt_style[4])))
cost = contents + styles
#updates = RMSprop(cost, params)
img_grad = T.grad(cost, wn)
train_model = theano.function([], cost, updates=[(wn,wn-img_grad)])
train_model()
#def train_wn(train_func):
#train_func()
'''
import theano.tensor as T
ima = T.dtensor3('ima')
sty = T.dtensor3('sty')
wn = theano.shared(value=white_noise.reshape(224,224,3), name='wn', borrow=True)
params = [wn]
cost = T.mean(T.sqr(wn-ima))+T.mean(T.sqr(wn-sty))
updates = RMSprop(cost, params)
train_model = theano.function([ima, sty], cost, updates=updates)
for i in range(1000):
train_model(tubingen, starry_night)
plt.imshow(wn.get_value())
''' | from vgg19_model import VGG19
import re
import theano
import theano.tensor as T
def RMSprop(cost, params, lr=1, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def generate_img(content, style, white_noise, layer_weights, bias_weights):
# puts params in [Weights, bias] format for gradient/update step, might turn into function later
'''
params=[]
for key in layer_weights.keys():
ln = key.split('_')
for b_key in bias_weights.keys():
bn = b_key.split('_')
if (ln[0]==bn[0]) and (ln[1]==bn[1]):
params += [theano.shared(value=layer_weights[key], borrow=True),
theano.shared(value=bias_weights[b_key], borrow=True)]
'''
wn = theano.shared(value=white_noise.reshape(224,224,3), name='wn', borrow=True)
vgg19 = VGG19(input_image_shape=(1,3,224,224))
#cont_vgg = VGG19(input_image_shape=content.shape)
#style_vgg = VGG19(input_image_shape=style.shape)
#wn_vgg = VGG19(input_image_shape=white_noise.shape)
get_content_layer = theano.function(inputs=[vgg19.input],
outputs=vgg19.conv4_2.output)
get_style_layers = theano.function(inputs=[vgg19.input],
outputs=[vgg19.conv1_1.output, vgg19.conv2_1.output,
vgg19.conv3_1.output, vgg19.conv4_1.output,
vgg19.conv5_1.output])
#get_params = theano.function(inputs=[vgg19.input], outputs = [vgg19.conv1_1.params])
######### Stuff above this only needs to be run once ##########
#sum(np.mean(np.square(L1[i]+L2[i])) for i in range(len(L1)))
#might be just wn
contents = T.mean(T.sqr(get_content_layer(wn.get_value())-get_content_layer(content)))
wn_style = get_style_layers(wn.get_value())
pnt_style = get_style_layers(style)
# might need to edit #
styles = (T.mean(T.sqr(wn_style[0]-pnt_style[0])) + T.mean(T.sqr(wn_style[1]-pnt_style[1])) +
T.mean(T.sqr(wn_style[2]-pnt_style[2])) + T.mean(T.sqr(wn_style[3]-pnt_style[3])) +
T.mean(T.sqr(wn_style[4]-pnt_style[4])))
cost = contents + styles
#updates = RMSprop(cost, params)
img_grad = T.grad(cost, wn)
train_model = theano.function([], cost, updates=[(wn,wn-img_grad)])
train_model()
#def train_wn(train_func):
#train_func()
'''
import theano.tensor as T
ima = T.dtensor3('ima')
sty = T.dtensor3('sty')
wn = theano.shared(value=white_noise.reshape(224,224,3), name='wn', borrow=True)
params = [wn]
cost = T.mean(T.sqr(wn-ima))+T.mean(T.sqr(wn-sty))
updates = RMSprop(cost, params)
train_model = theano.function([ima, sty], cost, updates=updates)
for i in range(1000):
train_model(tubingen, starry_night)
plt.imshow(wn.get_value())
''' | en | 0.545512 | # puts params in [Weights, bias] format for gradient/update step, might turn into function later params=[] for key in layer_weights.keys(): ln = key.split('_') for b_key in bias_weights.keys(): bn = b_key.split('_') if (ln[0]==bn[0]) and (ln[1]==bn[1]): params += [theano.shared(value=layer_weights[key], borrow=True), theano.shared(value=bias_weights[b_key], borrow=True)] #cont_vgg = VGG19(input_image_shape=content.shape) #style_vgg = VGG19(input_image_shape=style.shape) #wn_vgg = VGG19(input_image_shape=white_noise.shape) #get_params = theano.function(inputs=[vgg19.input], outputs = [vgg19.conv1_1.params]) ######### Stuff above this only needs to be run once ########## #sum(np.mean(np.square(L1[i]+L2[i])) for i in range(len(L1))) #might be just wn # might need to edit # #updates = RMSprop(cost, params) #def train_wn(train_func): #train_func() import theano.tensor as T ima = T.dtensor3('ima') sty = T.dtensor3('sty') wn = theano.shared(value=white_noise.reshape(224,224,3), name='wn', borrow=True) params = [wn] cost = T.mean(T.sqr(wn-ima))+T.mean(T.sqr(wn-sty)) updates = RMSprop(cost, params) train_model = theano.function([ima, sty], cost, updates=updates) for i in range(1000): train_model(tubingen, starry_night) plt.imshow(wn.get_value()) | 2.078978 | 2 |
gdtoolkit/linter/types.py | dzil123/godot-gdscript-toolkit | 361 | 6613819 | <reponame>dzil123/godot-gdscript-toolkit
from dataclasses import dataclass
@dataclass
class Range:
begin: int
end: int
| from dataclasses import dataclass
@dataclass
class Range:
begin: int
end: int | none | 1 | 1.901465 | 2 | |
shot_detector/features/norms/base_norm.py | w495/shot_detector | 18 | 6613820 | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
class BaseNorm(object):
"""
...
"""
def norm(self, vector, **kwargs):
"""
:param vector:
:param kwargs:
:return:
"""
length = self.__class__.length(vector, **kwargs)
return length
@classmethod
def length(cls, vector, **kwargs):
"""
:param vector:
:param kwargs:
:return:
"""
raise NotImplementedError('length')
| # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
class BaseNorm(object):
"""
...
"""
def norm(self, vector, **kwargs):
"""
:param vector:
:param kwargs:
:return:
"""
length = self.__class__.length(vector, **kwargs)
return length
@classmethod
def length(cls, vector, **kwargs):
"""
:param vector:
:param kwargs:
:return:
"""
raise NotImplementedError('length')
| en | 0.726302 | # -*- coding: utf8 -*- This is part of shot detector. Produced by w495 at 2017.05.04 04:18:27 ... :param vector: :param kwargs: :return: :param vector: :param kwargs: :return: | 2.505796 | 3 |
apps/agents/migrations/0001_initial.py | l3l3l/vulnman | 0 | 6613821 | <gh_stars>0
# Generated by Django 3.2.9 on 2021-12-15 08:01
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Agent',
fields=[
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('key', models.CharField(max_length=128, primary_key=True, serialize=False)),
('name', models.CharField(max_length=28)),
],
options={
'verbose_name': 'Token',
'verbose_name_plural': 'Tokens',
'abstract': False,
},
),
migrations.CreateModel(
name='AgentQueue',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('command', models.TextField()),
('exit_code', models.IntegerField(blank=True, null=True)),
('output', models.TextField(blank=True, null=True)),
('in_progress', models.BooleanField(default=False)),
('execution_started', models.DateTimeField(blank=True, null=True)),
('execution_ended', models.DateTimeField(blank=True, null=True)),
('agent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='agents.agent')),
],
options={
'ordering': ['-date_updated'],
},
),
]
| # Generated by Django 3.2.9 on 2021-12-15 08:01
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Agent',
fields=[
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('key', models.CharField(max_length=128, primary_key=True, serialize=False)),
('name', models.CharField(max_length=28)),
],
options={
'verbose_name': 'Token',
'verbose_name_plural': 'Tokens',
'abstract': False,
},
),
migrations.CreateModel(
name='AgentQueue',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('command', models.TextField()),
('exit_code', models.IntegerField(blank=True, null=True)),
('output', models.TextField(blank=True, null=True)),
('in_progress', models.BooleanField(default=False)),
('execution_started', models.DateTimeField(blank=True, null=True)),
('execution_ended', models.DateTimeField(blank=True, null=True)),
('agent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='agents.agent')),
],
options={
'ordering': ['-date_updated'],
},
),
] | en | 0.908322 | # Generated by Django 3.2.9 on 2021-12-15 08:01 | 1.753247 | 2 |
hacktivizm.py | hawk-0fcx/port | 1 | 6613822 | from tkinter import*
import socket
pen = Tk()
pen.geometry("330x500")
pen.title("HACKTİVİZM - H4WK OFCX")
def tarama():
s1=str(enturl.get())
liste=[21,22,23,25,80,139,443,445,3389]
try:
for port in liste:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result=sock.connect_ex((s1,port))
if result== 0 :
listsonuc.insert(1, "Port{} -> Açık".format(port))
else:
listsonuc.insert(1, "Port{} -> Kapalı ".format(port))
sock.close()
except sock.error:
print("Bilgisayar Yok")
lburl = Label(pen, text="URL veya İP Adresi", font="Verdana 12 ", fg="black",bg="white")
lburl.place(x=60, y=20)
listsonuc = Listbox(pen, font="Verdana 12 bold", width="25", height="17",fg="white", bg="black")
listsonuc.place(x=27, y=140)
enturl = Entry(pen, font="Verdana 12 bold", fg="blue")
enturl.place(x=50 ,y=50)
btntara=Button(pen, text="Portları Tara ", font="Verdana 12 bold",fg="white" ,bg="black", command=tarama)
btntara.place(x=80, y=90)
pen.mainloop()
| from tkinter import*
import socket
pen = Tk()
pen.geometry("330x500")
pen.title("HACKTİVİZM - H4WK OFCX")
def tarama():
s1=str(enturl.get())
liste=[21,22,23,25,80,139,443,445,3389]
try:
for port in liste:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result=sock.connect_ex((s1,port))
if result== 0 :
listsonuc.insert(1, "Port{} -> Açık".format(port))
else:
listsonuc.insert(1, "Port{} -> Kapalı ".format(port))
sock.close()
except sock.error:
print("Bilgisayar Yok")
lburl = Label(pen, text="URL veya İP Adresi", font="Verdana 12 ", fg="black",bg="white")
lburl.place(x=60, y=20)
listsonuc = Listbox(pen, font="Verdana 12 bold", width="25", height="17",fg="white", bg="black")
listsonuc.place(x=27, y=140)
enturl = Entry(pen, font="Verdana 12 bold", fg="blue")
enturl.place(x=50 ,y=50)
btntara=Button(pen, text="Portları Tara ", font="Verdana 12 bold",fg="white" ,bg="black", command=tarama)
btntara.place(x=80, y=90)
pen.mainloop()
| none | 1 | 3.218194 | 3 | |
suites/Operations/CommitteeMember/CommitteeFrozenBalanceDeposit.py | echoprotocol/pytests | 1 | 6613823 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from common.base_test import BaseTest
from project import INIT0_PK
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to
SUITE = {
"description": "Operation 'committee_frozen_balance_deposit'"
}
@lcc.prop("main", "type")
@lcc.tags("operations", "committee_member_operations", "committee_frozen_balance_deposit")
@lcc.suite("Check work of operation 'committee_frozen_balance_deposit'", rank=1)
class CommitteeFrozenBalanceDeposit(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.init0 = None
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
lcc.log_info("API identifiers are: database='{}'".format(self.__database_api_identifier))
self.committee_members_info = self.get_active_committee_members_info(self.__database_api_identifier)
self.init0 = self.committee_members_info[0]["account_id"]
lcc.log_info("Echo initial accounts: {}".format(self.init0))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of operation 'committee_frozen_balance_deposit'")
def method_main_check(self, get_random_integer):
amount_to_freeze = get_random_integer + 10
lcc.set_step("Get first active committee member id and account id")
committee_member_id = self.committee_members_info[0]["committee_id"]
lcc.log_info("Committee member id: '{}' and account id: '{}'".format(committee_member_id, self.init0))
lcc.set_step("Get active committee member balance")
params = [self.init0, [self.echo_asset]]
response_id = self.send_request(
self.get_request("get_account_balances", params), self.__database_api_identifier
)
current_balance = self.get_response(response_id)["result"][0]["amount"]
lcc.log_info("{} account id balance: {}".format(self.init0, current_balance))
lcc.set_step("Check active committee member frozen balance")
response_id = self.send_request(
self.get_request("get_committee_frozen_balance", [committee_member_id]), self.__database_api_identifier
)
current_frozen_balance = self.get_response(response_id)["result"]["amount"]
lcc.log_info("{} account id frozen balance: {}".format(self.init0, current_frozen_balance))
lcc.set_step("Freeze part of active committee member balance")
operation = self.echo_ops.get_committee_frozen_balance_deposit_operation(
echo=self.echo,
committee_member=committee_member_id,
committee_member_account=self.init0,
amount=amount_to_freeze,
asset_id=self.echo_asset,
signer=INIT0_PK
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
if not self.is_operation_completed(broadcast_result, expected_static_variant=0):
raise Exception("Balance is not freezed")
lcc.log_info("Freeze {} assets".format(amount_to_freeze))
lcc.set_step("Get required fee for 'committee_frozen_balance_deposit' operation")
response_id = self.send_request(
self.get_request("get_required_fees", [[operation[:-1]], self.echo_asset]), self.__database_api_identifier
)
fee_amount = self.get_response(response_id)["result"][0]["amount"]
lcc.log_info("Required fee: '{}'".format(fee_amount))
lcc.set_step("Check active committee member balance after performing operation")
params = [self.init0, [self.echo_asset]]
response_id = self.send_request(
self.get_request("get_account_balances", params), self.__database_api_identifier
)
balance_after_freeze = self.get_response(response_id)["result"][0]["amount"]
check_that(
"balance reduced",
int(current_balance) - int(balance_after_freeze),
equal_to(amount_to_freeze + fee_amount),
quiet=True
)
lcc.set_step("Check active committee member frozen balance")
response_id = self.send_request(
self.get_request("get_committee_frozen_balance", [committee_member_id]), self.__database_api_identifier
)
response = self.get_response(response_id)
frozen_balance = response["result"]["amount"]
check_that("frozen balance", frozen_balance, equal_to(current_frozen_balance + amount_to_freeze), quiet=True)
| # -*- coding: utf-8 -*-
from common.base_test import BaseTest
from project import INIT0_PK
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to
SUITE = {
"description": "Operation 'committee_frozen_balance_deposit'"
}
@lcc.prop("main", "type")
@lcc.tags("operations", "committee_member_operations", "committee_frozen_balance_deposit")
@lcc.suite("Check work of operation 'committee_frozen_balance_deposit'", rank=1)
class CommitteeFrozenBalanceDeposit(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.init0 = None
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
lcc.log_info("API identifiers are: database='{}'".format(self.__database_api_identifier))
self.committee_members_info = self.get_active_committee_members_info(self.__database_api_identifier)
self.init0 = self.committee_members_info[0]["account_id"]
lcc.log_info("Echo initial accounts: {}".format(self.init0))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of operation 'committee_frozen_balance_deposit'")
def method_main_check(self, get_random_integer):
amount_to_freeze = get_random_integer + 10
lcc.set_step("Get first active committee member id and account id")
committee_member_id = self.committee_members_info[0]["committee_id"]
lcc.log_info("Committee member id: '{}' and account id: '{}'".format(committee_member_id, self.init0))
lcc.set_step("Get active committee member balance")
params = [self.init0, [self.echo_asset]]
response_id = self.send_request(
self.get_request("get_account_balances", params), self.__database_api_identifier
)
current_balance = self.get_response(response_id)["result"][0]["amount"]
lcc.log_info("{} account id balance: {}".format(self.init0, current_balance))
lcc.set_step("Check active committee member frozen balance")
response_id = self.send_request(
self.get_request("get_committee_frozen_balance", [committee_member_id]), self.__database_api_identifier
)
current_frozen_balance = self.get_response(response_id)["result"]["amount"]
lcc.log_info("{} account id frozen balance: {}".format(self.init0, current_frozen_balance))
lcc.set_step("Freeze part of active committee member balance")
operation = self.echo_ops.get_committee_frozen_balance_deposit_operation(
echo=self.echo,
committee_member=committee_member_id,
committee_member_account=self.init0,
amount=amount_to_freeze,
asset_id=self.echo_asset,
signer=INIT0_PK
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
if not self.is_operation_completed(broadcast_result, expected_static_variant=0):
raise Exception("Balance is not freezed")
lcc.log_info("Freeze {} assets".format(amount_to_freeze))
lcc.set_step("Get required fee for 'committee_frozen_balance_deposit' operation")
response_id = self.send_request(
self.get_request("get_required_fees", [[operation[:-1]], self.echo_asset]), self.__database_api_identifier
)
fee_amount = self.get_response(response_id)["result"][0]["amount"]
lcc.log_info("Required fee: '{}'".format(fee_amount))
lcc.set_step("Check active committee member balance after performing operation")
params = [self.init0, [self.echo_asset]]
response_id = self.send_request(
self.get_request("get_account_balances", params), self.__database_api_identifier
)
balance_after_freeze = self.get_response(response_id)["result"][0]["amount"]
check_that(
"balance reduced",
int(current_balance) - int(balance_after_freeze),
equal_to(amount_to_freeze + fee_amount),
quiet=True
)
lcc.set_step("Check active committee member frozen balance")
response_id = self.send_request(
self.get_request("get_committee_frozen_balance", [committee_member_id]), self.__database_api_identifier
)
response = self.get_response(response_id)
frozen_balance = response["result"]["amount"]
check_that("frozen balance", frozen_balance, equal_to(current_frozen_balance + amount_to_freeze), quiet=True) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.258602 | 2 |
conference/schedule/views.py | djangocon/2017.djangocon.eu | 7 | 6613824 | <filename>conference/schedule/views.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.views.generic import DetailView, ListView
from conference.schedule.models import Slot
class SlotList(ListView):
model = Slot
context_object_name = 'talks'
def get_context_data(self, **kwargs):
context = super(SlotList, self).get_context_data(**kwargs)
context['talks'] = super(SlotList, self).get_queryset().filter(sprint_days=False)
context['workshops'] = super(SlotList, self).get_queryset().filter(sprint_days=True)
return context
class SlotDetail(DetailView):
model = Slot
context_object_name = 'talk'
def get_context_data(self, **kwargs):
context = super(SlotDetail, self).get_context_data(**kwargs)
context['meta'] = self.get_object().as_meta(self.request)
return context
| <filename>conference/schedule/views.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.views.generic import DetailView, ListView
from conference.schedule.models import Slot
class SlotList(ListView):
model = Slot
context_object_name = 'talks'
def get_context_data(self, **kwargs):
context = super(SlotList, self).get_context_data(**kwargs)
context['talks'] = super(SlotList, self).get_queryset().filter(sprint_days=False)
context['workshops'] = super(SlotList, self).get_queryset().filter(sprint_days=True)
return context
class SlotDetail(DetailView):
model = Slot
context_object_name = 'talk'
def get_context_data(self, **kwargs):
context = super(SlotDetail, self).get_context_data(**kwargs)
context['meta'] = self.get_object().as_meta(self.request)
return context
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.957877 | 2 |
nysa/cbuilder/device_manager.py | CospanDesign/nysa | 15 | 6613825 | <gh_stars>10-100
'''
Copyright (c) 2015 <NAME> (<EMAIL>)
This file is part of Nysa.
(http://wiki.cospandesign.com/index.php?title=Nysa.org)
Nysa is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
Nysa is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Nysa; If not, see <http://www.gnu.org/licenses/>.
'''
'''
Functions to manage devices
'''
import os
import sys
import json
from sdb_component import SDBError
from collections import OrderedDict as odict
__author__ = "<EMAIL> (<NAME>)"
LOCAL_DEVICE_LIST = os.path.join(os.path.dirname(__file__), os.pardir, "data", "local_devices", "devices.json")
LOCAL_DEVICE_LIST = os.path.abspath(LOCAL_DEVICE_LIST)
def get_device_list():
"""Return a list of device names where the index corresponds to the device
identification number
Args:
Nothing
Returns:
(list): List of devices
(index corresponds to devices identification number)
Raises:
Nothing
"""
dev_tags = {}
dev_list = []
index = 0
length = 0
try:
f = open(LOCAL_DEVICE_LIST, "r")
sdb_tags = json.load(f, object_pairs_hook = odict)
except TypeError as err:
print "JSON Error: %s" % str(err)
raise SDBError("DRT Error: %s", str(err))
dev_tags = sdb_tags["devices"]
length = len(dev_tags.keys())
int_dict = {}
for key in dev_tags:
#change the hex number into a integer
index = None
id_val = dev_tags[key]["ID"]
if isinstance(id_val, str) or isinstance(id_val, unicode):
index = int(id_val[2:], 16)
else:
index = id_val
dev_tags[key]["name"] = key
#print "index: %d" % index
int_dict[index] = dev_tags[key]
ordered_keys = int_dict.keys()
dev_list = []
for key in ordered_keys:
dev_list.append(int_dict[key])
return dev_list
def get_device_name_from_id(device_id):
"""return device name for the ID
Args:
ID (int): device id number
Return:
name (string): name of the device
Raises:
Nothing
"""
#print "Index: 0x%04X" % device_id
dev_tags = {}
try:
f = open(LOCAL_DEVICE_LIST, "r")
sdb_tags = json.load(f)
except TypeError as err:
print "JSON Error: %s" % str(err)
raise SDBError("DRT Error: %s", str(err))
dev_tags = sdb_tags["devices"]
did = 0
for device in dev_tags:
#print "Looking at: %s" % device
did = None
if (type(dev_tags[device]["ID"]) == str) or (type(dev_tags[device]["ID"]) == unicode):
did = int(dev_tags[device]["ID"], 16)
else:
did = dev_tags[device]["ID"]
if did == device_id:
return device
return "Unknown Device"
def get_device_id_from_name(name):
"""return the index of the device speicified by name
The name can be found in the devices.json file
Example: if name == GPIO, then 2 will be returned
Args:
name (string): name of the core to identify
Return:
device identification number
Raises:
Nothing
"""
dev_tags = {}
try:
f = open(LOCAL_DEVICE_LIST, "r")
sdb_tags = json.load(f)
except TypeError as err:
print "JSON Error: %s" % str(err)
raise SDBError("DRT Error: %s", str(err))
dev_tags = sdb_tags["devices"]
for key in dev_tags:
if name.lower().strip() == key.lower().strip():
name = key
if name not in dev_tags.keys():
raise SDBError("Name: %s is not a known type of devices" % name)
return int(dev_tags[name]["ID"], 0)
def get_device_type(index):
"""return the name of the device referenced by index
Args:
index (int): Integer of device index
Return:
(string) Name of the device
Raises:
Nothing
"""
dev_list = get_device_list()
return dev_list[index]["name"]
| '''
Copyright (c) 2015 <NAME> (<EMAIL>)
This file is part of Nysa.
(http://wiki.cospandesign.com/index.php?title=Nysa.org)
Nysa is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
Nysa is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Nysa; If not, see <http://www.gnu.org/licenses/>.
'''
'''
Functions to manage devices
'''
import os
import sys
import json
from sdb_component import SDBError
from collections import OrderedDict as odict
__author__ = "<EMAIL> (<NAME>)"
LOCAL_DEVICE_LIST = os.path.join(os.path.dirname(__file__), os.pardir, "data", "local_devices", "devices.json")
LOCAL_DEVICE_LIST = os.path.abspath(LOCAL_DEVICE_LIST)
def get_device_list():
"""Return a list of device names where the index corresponds to the device
identification number
Args:
Nothing
Returns:
(list): List of devices
(index corresponds to devices identification number)
Raises:
Nothing
"""
dev_tags = {}
dev_list = []
index = 0
length = 0
try:
f = open(LOCAL_DEVICE_LIST, "r")
sdb_tags = json.load(f, object_pairs_hook = odict)
except TypeError as err:
print "JSON Error: %s" % str(err)
raise SDBError("DRT Error: %s", str(err))
dev_tags = sdb_tags["devices"]
length = len(dev_tags.keys())
int_dict = {}
for key in dev_tags:
#change the hex number into a integer
index = None
id_val = dev_tags[key]["ID"]
if isinstance(id_val, str) or isinstance(id_val, unicode):
index = int(id_val[2:], 16)
else:
index = id_val
dev_tags[key]["name"] = key
#print "index: %d" % index
int_dict[index] = dev_tags[key]
ordered_keys = int_dict.keys()
dev_list = []
for key in ordered_keys:
dev_list.append(int_dict[key])
return dev_list
def get_device_name_from_id(device_id):
"""return device name for the ID
Args:
ID (int): device id number
Return:
name (string): name of the device
Raises:
Nothing
"""
#print "Index: 0x%04X" % device_id
dev_tags = {}
try:
f = open(LOCAL_DEVICE_LIST, "r")
sdb_tags = json.load(f)
except TypeError as err:
print "JSON Error: %s" % str(err)
raise SDBError("DRT Error: %s", str(err))
dev_tags = sdb_tags["devices"]
did = 0
for device in dev_tags:
#print "Looking at: %s" % device
did = None
if (type(dev_tags[device]["ID"]) == str) or (type(dev_tags[device]["ID"]) == unicode):
did = int(dev_tags[device]["ID"], 16)
else:
did = dev_tags[device]["ID"]
if did == device_id:
return device
return "Unknown Device"
def get_device_id_from_name(name):
"""return the index of the device speicified by name
The name can be found in the devices.json file
Example: if name == GPIO, then 2 will be returned
Args:
name (string): name of the core to identify
Return:
device identification number
Raises:
Nothing
"""
dev_tags = {}
try:
f = open(LOCAL_DEVICE_LIST, "r")
sdb_tags = json.load(f)
except TypeError as err:
print "JSON Error: %s" % str(err)
raise SDBError("DRT Error: %s", str(err))
dev_tags = sdb_tags["devices"]
for key in dev_tags:
if name.lower().strip() == key.lower().strip():
name = key
if name not in dev_tags.keys():
raise SDBError("Name: %s is not a known type of devices" % name)
return int(dev_tags[name]["ID"], 0)
def get_device_type(index):
"""return the name of the device referenced by index
Args:
index (int): Integer of device index
Return:
(string) Name of the device
Raises:
Nothing
"""
dev_list = get_device_list()
return dev_list[index]["name"] | en | 0.770877 | Copyright (c) 2015 <NAME> (<EMAIL>) This file is part of Nysa. (http://wiki.cospandesign.com/index.php?title=Nysa.org) Nysa is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or any later version. Nysa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Nysa; If not, see <http://www.gnu.org/licenses/>. Functions to manage devices Return a list of device names where the index corresponds to the device identification number Args: Nothing Returns: (list): List of devices (index corresponds to devices identification number) Raises: Nothing #change the hex number into a integer #print "index: %d" % index return device name for the ID Args: ID (int): device id number Return: name (string): name of the device Raises: Nothing #print "Index: 0x%04X" % device_id #print "Looking at: %s" % device return the index of the device speicified by name The name can be found in the devices.json file Example: if name == GPIO, then 2 will be returned Args: name (string): name of the core to identify Return: device identification number Raises: Nothing return the name of the device referenced by index Args: index (int): Integer of device index Return: (string) Name of the device Raises: Nothing | 2.285007 | 2 |
analyzer/codechecker_analyzer/analyzers/flag.py | ryankurte/codechecker | 1,601 | 6613826 | <reponame>ryankurte/codechecker<gh_stars>1000+
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
def has_flag(flag, cmd):
"""Return true if a cmd contains a flag or false if not."""
return bool(next((x for x in cmd if x.startswith(flag)), False))
def prepend_all(flag, params):
"""
Returns a list where all elements of "params" is prepended with the given
flag. For example in case "flag" is -f and "params" is ['a', 'b', 'c'] the
result is ['-f', 'a', '-f', 'b', '-f', 'c'].
"""
result = []
for param in params:
result.append(flag)
result.append(param)
return result
| # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
def has_flag(flag, cmd):
"""Return true if a cmd contains a flag or false if not."""
return bool(next((x for x in cmd if x.startswith(flag)), False))
def prepend_all(flag, params):
"""
Returns a list where all elements of "params" is prepended with the given
flag. For example in case "flag" is -f and "params" is ['a', 'b', 'c'] the
result is ['-f', 'a', '-f', 'b', '-f', 'c'].
"""
result = []
for param in params:
result.append(flag)
result.append(param)
return result | en | 0.451986 | # ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- Return true if a cmd contains a flag or false if not. Returns a list where all elements of "params" is prepended with the given flag. For example in case "flag" is -f and "params" is ['a', 'b', 'c'] the result is ['-f', 'a', '-f', 'b', '-f', 'c']. | 2.8642 | 3 |
jira_worklog/utils/printing.py | apragacz/git-jira-worklog | 1 | 6613827 | <filename>jira_worklog/utils/printing.py<gh_stars>1-10
from __future__ import absolute_import, unicode_literals, print_function
import sys
class bcolors:
HEADER = b'\033[95m'
OKBLUE = b'\033[94m'
OKGREEN = b'\033[92m'
WARNING = b'\033[93m'
FAIL = b'\033[91m'
ENDC = b'\033[0m'
BOLD = b'\033[1m'
UNDERLINE = b'\033[4m'
def cprint(*objects, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
color = kwargs.pop('color', None)
if kwargs:
raise TypeError(
'cprint() got an unexpected keyword argument \'{}\''.format(
next(iter(kwargs))
))
if color is None:
print(*objects, sep=sep, end=end, file=file)
else:
print(color, end='', file=file)
print(*objects, sep=sep, end=end, file=file)
print(bcolors.ENDC, end='', file=file)
| <filename>jira_worklog/utils/printing.py<gh_stars>1-10
from __future__ import absolute_import, unicode_literals, print_function
import sys
class bcolors:
HEADER = b'\033[95m'
OKBLUE = b'\033[94m'
OKGREEN = b'\033[92m'
WARNING = b'\033[93m'
FAIL = b'\033[91m'
ENDC = b'\033[0m'
BOLD = b'\033[1m'
UNDERLINE = b'\033[4m'
def cprint(*objects, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
color = kwargs.pop('color', None)
if kwargs:
raise TypeError(
'cprint() got an unexpected keyword argument \'{}\''.format(
next(iter(kwargs))
))
if color is None:
print(*objects, sep=sep, end=end, file=file)
else:
print(color, end='', file=file)
print(*objects, sep=sep, end=end, file=file)
print(bcolors.ENDC, end='', file=file)
| none | 1 | 2.509488 | 3 | |
bin/emoji_process.py | wks-sumo-logic/emoji-tools | 0 | 6613828 | <filename>bin/emoji_process.py
#!/usr/bin/env python3
"""
Downloads and processes the emoji list
"""
import os
import sys
import urllib.parse
import string
import requests
import bs4
### targeturl = sys.argv[1]
targeturl = 'https://unicode.org/emoji/charts/full-emoji-list.html'
htmlfile = os.path.basename(urllib.parse.urlsplit(targeturl).path)
targetfile = os.path.join( '/var/tmp', htmlfile )
def download_html_file(emojiurl, emojifile):
"""
Download the html file
"""
url = requests.get(emojiurl)
htmltext = url.text
with open(emojifile, 'w') as outputfile:
outputfile.write(htmltext)
def expandcode(codestring: str):
"""
Process the target name and code string item
"""
return chr(int(codestring.lstrip("U+").zfill(8), 16))
def process_emoji(ename, codelist):
"""
Process the target name and code list
"""
convertlist = list()
for codeitem in codelist.split():
### converted = convertcode(codeitem)
converted = expandcode(codeitem)
convertlist.append(converted)
separator = ''
codestring = separator.join(convertlist)
print('\"{}\",\"{}\"'.format(ename, codestring))
def convertcode(ecode):
"""
Process codelist
"""
ecode = ecode.replace('U+','')
lead = '\\\\' + 'u' + ecode
if len(ecode) != 4:
offset = (len(bin(int(ecode,16))) - 10 )
lead = str(hex(int(str((bin(int(ecode,16)))[2:offset]),2) + 55232))
lead = lead.replace('0x', "\\\\u")
tail = str(hex( (int(ecode, 16) & 1023 ) + 56320 ))
tail = tail.replace('0x', "\\\\u")
conversion = lead + tail
else:
conversion = lead
return conversion
def process_html_file(emojifile):
"""
Parse the html file and extract the name and code point
"""
print('\"{}\",\"{}\"'.format("emojiname", "emojicode"))
with open(emojifile) as emoji_html:
soup = bs4.BeautifulSoup(emoji_html, "html.parser")
for row in soup.find_all('tr'):
name = row.find('td', attrs={'class': 'name'})
if name is not None:
emojiname = name.text
emojiname = emojiname.translate(emojiname.maketrans('', '', string.punctuation))
emojiname = emojiname.replace(' ', '_')
emojiname = emojiname.replace('__', '_')
emojiname = emojiname.lower()
code = row.find('td', attrs={'class': 'code'})
if name is not None:
emojicode = code.text
if name is not None:
process_emoji(emojiname, emojicode)
def main():
"""
Driver for downloading, processing, and outputing emoticons
"""
download_html_file(targeturl,targetfile)
process_html_file(targetfile)
if __name__ == '__main__':
main()
| <filename>bin/emoji_process.py
#!/usr/bin/env python3
"""
Downloads and processes the emoji list
"""
import os
import sys
import urllib.parse
import string
import requests
import bs4
### targeturl = sys.argv[1]
targeturl = 'https://unicode.org/emoji/charts/full-emoji-list.html'
htmlfile = os.path.basename(urllib.parse.urlsplit(targeturl).path)
targetfile = os.path.join( '/var/tmp', htmlfile )
def download_html_file(emojiurl, emojifile):
"""
Download the html file
"""
url = requests.get(emojiurl)
htmltext = url.text
with open(emojifile, 'w') as outputfile:
outputfile.write(htmltext)
def expandcode(codestring: str):
"""
Process the target name and code string item
"""
return chr(int(codestring.lstrip("U+").zfill(8), 16))
def process_emoji(ename, codelist):
"""
Process the target name and code list
"""
convertlist = list()
for codeitem in codelist.split():
### converted = convertcode(codeitem)
converted = expandcode(codeitem)
convertlist.append(converted)
separator = ''
codestring = separator.join(convertlist)
print('\"{}\",\"{}\"'.format(ename, codestring))
def convertcode(ecode):
"""
Process codelist
"""
ecode = ecode.replace('U+','')
lead = '\\\\' + 'u' + ecode
if len(ecode) != 4:
offset = (len(bin(int(ecode,16))) - 10 )
lead = str(hex(int(str((bin(int(ecode,16)))[2:offset]),2) + 55232))
lead = lead.replace('0x', "\\\\u")
tail = str(hex( (int(ecode, 16) & 1023 ) + 56320 ))
tail = tail.replace('0x', "\\\\u")
conversion = lead + tail
else:
conversion = lead
return conversion
def process_html_file(emojifile):
"""
Parse the html file and extract the name and code point
"""
print('\"{}\",\"{}\"'.format("emojiname", "emojicode"))
with open(emojifile) as emoji_html:
soup = bs4.BeautifulSoup(emoji_html, "html.parser")
for row in soup.find_all('tr'):
name = row.find('td', attrs={'class': 'name'})
if name is not None:
emojiname = name.text
emojiname = emojiname.translate(emojiname.maketrans('', '', string.punctuation))
emojiname = emojiname.replace(' ', '_')
emojiname = emojiname.replace('__', '_')
emojiname = emojiname.lower()
code = row.find('td', attrs={'class': 'code'})
if name is not None:
emojicode = code.text
if name is not None:
process_emoji(emojiname, emojicode)
def main():
"""
Driver for downloading, processing, and outputing emoticons
"""
download_html_file(targeturl,targetfile)
process_html_file(targetfile)
if __name__ == '__main__':
main()
| en | 0.588474 | #!/usr/bin/env python3 Downloads and processes the emoji list ### targeturl = sys.argv[1] Download the html file Process the target name and code string item Process the target name and code list ### converted = convertcode(codeitem) Process codelist Parse the html file and extract the name and code point Driver for downloading, processing, and outputing emoticons | 3.148113 | 3 |
trulia/spiders/trulia_spider.py | bhavish14/Trulia_scrapy | 0 | 6613829 | import scrapy
from scrapy.http import Request
from trulia.items import TruliaItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spider import Rule
class trulia_spider(scrapy.Spider):
name = "trulia"
def __init__(self, region = '', city = '', *args, **kwargs):
super(trulia_spider, self).__init__(*args, **kwargs)
base_url = "https://www.trulia.com/"
city = city.replace(' ', '_')
url = base_url + '/' + region + '/' + city
self.start_urls = [url]
def start_requests(self):
allowed_domains = ["https://www.trulia.com/"]
for item in self.start_urls:
yield scrapy.Request(url=item, callback=self.parse)
def parse(self, response):
outer_wrapper = response.xpath('//li[@class="smlCol12 lrgCol8"]')
for item in outer_wrapper:
Item = {}
Item['retail_price'] = item.xpath(
'.//span[@class="cardPrice h5 man pan typeEmphasize noWrap typeTruncate"]/text()').extract()
if item.xpath('.//li[@data-auto-test="beds"]/text()'):
Item['bedrooms'] = item.xpath('.//li[@data-auto-test="beds"]/text()').extract()
Item['area'] = item.xpath('.//li[@data-auto-test="sqft"]/text()').extract()
if item.xpath('.//li[@data-auto-test="baths"]/text()'):
Item['barthrooms'] = item.xpath('.//li[@data-auto-test="baths"]/text()').extract()
Item['region'] = item.xpath('.//span[@itemprop="addressRegion"]/text()').extract()
Item['postal_code'] = item.xpath('.//span[@itemprop="postalCode"]/text()').extract()
Item['latitude'] = item.xpath('.//meta[@itemprop="latitude"]/@content').extract()
Item['longitude'] = item.xpath('.//meta[@itemprop="longitude"]/@content').extract()
Item['street_address'] = item.xpath('.//span[@itemprop="streetAddress"]/text()').extract()
yield Item
next_url = response.xpath('//a[@aria-label="Next page"]/@href').extract_first()
print(next_url)
yield Request(response.urljoin(next_url), callback=self.parse)
| import scrapy
from scrapy.http import Request
from trulia.items import TruliaItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spider import Rule
class trulia_spider(scrapy.Spider):
name = "trulia"
def __init__(self, region = '', city = '', *args, **kwargs):
super(trulia_spider, self).__init__(*args, **kwargs)
base_url = "https://www.trulia.com/"
city = city.replace(' ', '_')
url = base_url + '/' + region + '/' + city
self.start_urls = [url]
def start_requests(self):
allowed_domains = ["https://www.trulia.com/"]
for item in self.start_urls:
yield scrapy.Request(url=item, callback=self.parse)
def parse(self, response):
outer_wrapper = response.xpath('//li[@class="smlCol12 lrgCol8"]')
for item in outer_wrapper:
Item = {}
Item['retail_price'] = item.xpath(
'.//span[@class="cardPrice h5 man pan typeEmphasize noWrap typeTruncate"]/text()').extract()
if item.xpath('.//li[@data-auto-test="beds"]/text()'):
Item['bedrooms'] = item.xpath('.//li[@data-auto-test="beds"]/text()').extract()
Item['area'] = item.xpath('.//li[@data-auto-test="sqft"]/text()').extract()
if item.xpath('.//li[@data-auto-test="baths"]/text()'):
Item['barthrooms'] = item.xpath('.//li[@data-auto-test="baths"]/text()').extract()
Item['region'] = item.xpath('.//span[@itemprop="addressRegion"]/text()').extract()
Item['postal_code'] = item.xpath('.//span[@itemprop="postalCode"]/text()').extract()
Item['latitude'] = item.xpath('.//meta[@itemprop="latitude"]/@content').extract()
Item['longitude'] = item.xpath('.//meta[@itemprop="longitude"]/@content').extract()
Item['street_address'] = item.xpath('.//span[@itemprop="streetAddress"]/text()').extract()
yield Item
next_url = response.xpath('//a[@aria-label="Next page"]/@href').extract_first()
print(next_url)
yield Request(response.urljoin(next_url), callback=self.parse)
| none | 1 | 2.666811 | 3 | |
data/models/cultures.py | SIXMON/peps | 5 | 6613830 | <gh_stars>1-10
CULTURES = [(
'Autre', [
('Pas de culture', 'Pas de culture'),
('Toutes les cultures', 'Toutes les cultures')
],
),(
'Cultures', [
("Avoine", "Avoine"),
("Betterave fourragère", "Betterave fourragère"),
("Betterave sucrière", "Betterave sucrière"),
("Blé dur", "Blé dur"),
("Blé tendre d'hiver", "Blé tendre d'hiver"),
("Blé tendre de printemps", "Blé tendre de printemps"),
("Chanvre", "Chanvre"),
("Chia", "Chia"),
("Colza", "Colza"),
("Haricot", "Haricot"),
("Lablab", "Lablab"),
("Lentilles", "Lentilles"),
("Lin", "Lin"),
("<NAME>", "Lupin blanc"),
("Luzerne", "Luzerne"),
("Epeautre", "Epeautre"),
("Fétuque", "Fétuque"),
("Féverole", "Féverole"),
("Ma<NAME>ain", "Maïs grain"),
("Maïs ensilage", "Maïs ensilage"),
("Millet", "Millet"),
("Moutarde", "Moutarde"),
("Oeillette ou pavot", "Oeillette ou pavot"),
("Orge d’hiver", "Orge d’hiver"),
("Orge de printemps", "Orge de printemps"),
("Ortie", "Ortie"),
("Pois chiche", "Pois chiche"),
("Pois d'hiver", "Pois d'hiver"),
("Pois de printemps", "Pois de printemps"),
("Pomme de terre", "Pomme de terre"),
("Quinoa", "Quinoa"),
("Riz", "Riz"),
("Sarrasin", "Sarrasin"),
("Seigle", "Seigle"),
("Soja", "Soja"),
("Sorgho", "Sorgho"),
("Tournesol", "Tournesol"),
("Triticale", "Triticale")
]
), (
'Fourrages', [
("Graminées fourragères", "Graminées fourragères"),
("Légumineuses fourragères", "Légumineuses fourragères"),
("Protéagineux fourragers", "Protéagineux fourragers"),
("Prairies", "Prairies")
]
)]
| CULTURES = [(
'Autre', [
('Pas de culture', 'Pas de culture'),
('Toutes les cultures', 'Toutes les cultures')
],
),(
'Cultures', [
("Avoine", "Avoine"),
("Betterave fourragère", "Betterave fourragère"),
("Betterave sucrière", "Betterave sucrière"),
("Blé dur", "Blé dur"),
("Blé tendre d'hiver", "Blé tendre d'hiver"),
("Blé tendre de printemps", "Blé tendre de printemps"),
("Chanvre", "Chanvre"),
("Chia", "Chia"),
("Colza", "Colza"),
("Haricot", "Haricot"),
("Lablab", "Lablab"),
("Lentilles", "Lentilles"),
("Lin", "Lin"),
("<NAME>", "Lupin blanc"),
("Luzerne", "Luzerne"),
("Epeautre", "Epeautre"),
("Fétuque", "Fétuque"),
("Féverole", "Féverole"),
("Ma<NAME>ain", "Maïs grain"),
("Maïs ensilage", "Maïs ensilage"),
("Millet", "Millet"),
("Moutarde", "Moutarde"),
("Oeillette ou pavot", "Oeillette ou pavot"),
("Orge d’hiver", "Orge d’hiver"),
("Orge de printemps", "Orge de printemps"),
("Ortie", "Ortie"),
("Pois chiche", "Pois chiche"),
("Pois d'hiver", "Pois d'hiver"),
("Pois de printemps", "Pois de printemps"),
("Pomme de terre", "Pomme de terre"),
("Quinoa", "Quinoa"),
("Riz", "Riz"),
("Sarrasin", "Sarrasin"),
("Seigle", "Seigle"),
("Soja", "Soja"),
("Sorgho", "Sorgho"),
("Tournesol", "Tournesol"),
("Triticale", "Triticale")
]
), (
'Fourrages', [
("Graminées fourragères", "Graminées fourragères"),
("Légumineuses fourragères", "Légumineuses fourragères"),
("Protéagineux fourragers", "Protéagineux fourragers"),
("Prairies", "Prairies")
]
)] | none | 1 | 1.844315 | 2 | |
speech_code_1.py | sourav9064/python-speech-detection | 0 | 6613831 | <filename>speech_code_1.py
import speech_recognition as sr
import sys, time
r = sr.Recognizer()
mic = sr.Microphone()
with mic as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
try:
text = print(r.recognize_google(audio))
break
except:
print('Voice not detected')
break
| <filename>speech_code_1.py
import speech_recognition as sr
import sys, time
r = sr.Recognizer()
mic = sr.Microphone()
with mic as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
try:
text = print(r.recognize_google(audio))
break
except:
print('Voice not detected')
break
| none | 1 | 2.995747 | 3 | |
pype/plugins/nuke/publish/validate_output_resolution.py | kalisp/pype | 1 | 6613832 | import nuke
import pyblish.api
class RepairWriteResolutionDifference(pyblish.api.Action):
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
for instance in instances:
reformat = instance[0].dependencies()[0]
if reformat.Class() != "Reformat":
reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)])
xpos = instance[0].xpos()
ypos = instance[0].ypos() - 26
dependent_ypos = instance[0].dependencies()[0].ypos()
if (instance[0].ypos() - dependent_ypos) <= 51:
xpos += 110
reformat.setXYpos(xpos, ypos)
instance[0].setInput(0, reformat)
reformat["resize"].setValue("none")
class ValidateOutputResolution(pyblish.api.InstancePlugin):
"""Validates Output Resolution.
It is making sure the resolution of write's input is the same as
Format definition of script in Root node.
"""
order = pyblish.api.ValidatorOrder
optional = True
families = ["render", "render.local", "render.farm"]
label = "Write Resolution"
hosts = ["nuke"]
actions = [RepairWriteResolutionDifference]
def process(self, instance):
# Skip bounding box check if a crop node exists.
if instance[0].dependencies()[0].Class() == "Crop":
return
msg = "Bounding box is outside the format."
assert self.check_resolution(instance), msg
def check_resolution(self, instance):
node = instance[0]
root_width = instance.data["resolutionWidth"]
root_height = instance.data["resolutionHeight"]
write_width = node.format().width()
write_height = node.format().height()
if (root_width != write_width) or (root_height != write_height):
return None
else:
return True
| import nuke
import pyblish.api
class RepairWriteResolutionDifference(pyblish.api.Action):
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
for instance in instances:
reformat = instance[0].dependencies()[0]
if reformat.Class() != "Reformat":
reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)])
xpos = instance[0].xpos()
ypos = instance[0].ypos() - 26
dependent_ypos = instance[0].dependencies()[0].ypos()
if (instance[0].ypos() - dependent_ypos) <= 51:
xpos += 110
reformat.setXYpos(xpos, ypos)
instance[0].setInput(0, reformat)
reformat["resize"].setValue("none")
class ValidateOutputResolution(pyblish.api.InstancePlugin):
"""Validates Output Resolution.
It is making sure the resolution of write's input is the same as
Format definition of script in Root node.
"""
order = pyblish.api.ValidatorOrder
optional = True
families = ["render", "render.local", "render.farm"]
label = "Write Resolution"
hosts = ["nuke"]
actions = [RepairWriteResolutionDifference]
def process(self, instance):
# Skip bounding box check if a crop node exists.
if instance[0].dependencies()[0].Class() == "Crop":
return
msg = "Bounding box is outside the format."
assert self.check_resolution(instance), msg
def check_resolution(self, instance):
node = instance[0]
root_width = instance.data["resolutionWidth"]
root_height = instance.data["resolutionHeight"]
write_width = node.format().width()
write_height = node.format().height()
if (root_width != write_width) or (root_height != write_height):
return None
else:
return True
| en | 0.867224 | # Get the errored instances # Apply pyblish.logic to get the instances for the plug-in Validates Output Resolution. It is making sure the resolution of write's input is the same as Format definition of script in Root node. # Skip bounding box check if a crop node exists. | 2.363393 | 2 |
api/vagrant_registry/settings/base.py | polart/vagrant-registry | 8 | 6613833 | import logging
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SECRET_KEY = 'SUPER_SECRET_KEY_MUST_NOT_BE_EMPTY'
ALLOWED_HOSTS = '*'
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
]
LOCAL_APPS = [
'apps.boxes.apps.BoxesConfig',
'apps.users.apps.UsersConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'apps.middleware.HeadRequestRemoveContentMiddleware',
]
ROOT_URLCONF = 'vagrant_registry.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vagrant_registry.wsgi.application'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
AUTH_USER_MODEL = 'users.User'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'vagrant_registry',
'USER': 'vagrant_registry',
'PASSWORD': '<PASSWORD>',
'HOST': 'postgres',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static-api/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
STATICFILES_DIRS = [
os.path.join(os.path.dirname(BASE_DIR), "assets"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media')
PROTECTED_MEDIA_URL = '/protected_media/'
PROTECTED_MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'protected_media')
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_AUTHENTICATION_CLASSES': (
'apps.users.authentication.ExpiringTokenAuthentication',
'apps.users.authentication.QueryStringBasedTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'ALLOWED_VERSIONS': ('v1', ),
'DEFAULT_VERSION': 'v1',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.OrderingFilter',
'rest_framework.filters.SearchFilter',
),
}
SENDFILE_BACKEND = 'sendfile.backends.development'
BOX_UPLOAD_EXPIRE_AFTER = 24 # hours
TOKEN_EXPIRE_AFTER = 24 # hours
LOGIN_URL = '/admin/login/'
LOGOUT_URL = '/admin/logout/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s'
},
'server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(asctime)s] %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'server_console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'server'
},
'stream': {
'level': 'INFO',
'filters': ['require_debug_false'],
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console', 'stream'],
'level': 'INFO',
'propagate': True,
},
'django.server': {
'handlers': ['server_console'],
'level': 'INFO',
'propagate': False,
},
'apps': {
'handlers': ['console', 'stream'],
'level': 'DEBUG',
'propagate': True,
},
}
}
if 'test' in sys.argv:
logging.disable(logging.CRITICAL)
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media_test')
PROTECTED_MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'protected_media_test')
| import logging
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SECRET_KEY = 'SUPER_SECRET_KEY_MUST_NOT_BE_EMPTY'
ALLOWED_HOSTS = '*'
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
]
LOCAL_APPS = [
'apps.boxes.apps.BoxesConfig',
'apps.users.apps.UsersConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'apps.middleware.HeadRequestRemoveContentMiddleware',
]
ROOT_URLCONF = 'vagrant_registry.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vagrant_registry.wsgi.application'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
AUTH_USER_MODEL = 'users.User'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'vagrant_registry',
'USER': 'vagrant_registry',
'PASSWORD': '<PASSWORD>',
'HOST': 'postgres',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static-api/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
STATICFILES_DIRS = [
os.path.join(os.path.dirname(BASE_DIR), "assets"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media')
PROTECTED_MEDIA_URL = '/protected_media/'
PROTECTED_MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'protected_media')
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_AUTHENTICATION_CLASSES': (
'apps.users.authentication.ExpiringTokenAuthentication',
'apps.users.authentication.QueryStringBasedTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'ALLOWED_VERSIONS': ('v1', ),
'DEFAULT_VERSION': 'v1',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.OrderingFilter',
'rest_framework.filters.SearchFilter',
),
}
SENDFILE_BACKEND = 'sendfile.backends.development'
BOX_UPLOAD_EXPIRE_AFTER = 24 # hours
TOKEN_EXPIRE_AFTER = 24 # hours
LOGIN_URL = '/admin/login/'
LOGOUT_URL = '/admin/logout/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s'
},
'server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(asctime)s] %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'server_console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'server'
},
'stream': {
'level': 'INFO',
'filters': ['require_debug_false'],
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console', 'stream'],
'level': 'INFO',
'propagate': True,
},
'django.server': {
'handlers': ['server_console'],
'level': 'INFO',
'propagate': False,
},
'apps': {
'handlers': ['console', 'stream'],
'level': 'DEBUG',
'propagate': True,
},
}
}
if 'test' in sys.argv:
logging.disable(logging.CRITICAL)
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media_test')
PROTECTED_MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'protected_media_test')
| en | 0.564535 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ # hours # hours | 1.816737 | 2 |
saml_reader/cli.py | changodb/saml_reader | 2 | 6613834 | <gh_stars>1-10
"""
Command line interface for SAML Reader parser. These functions handle all
user interaction/display.
"""
import sys
import json
import argparse
from saml_reader.text_reader import TextReader
from saml_reader.validation.mongo import MongoFederationConfig, MongoVerifier, MongoComparisonValue
from saml_reader.saml.parser import DataTypeInvalid
from saml_reader import __version__
def cli(cl_args):
"""
Entrypoint for the command line interface. Handles parsing command line arguments.
Args:
cl_args (iterable): Command-line arguments. Possibilities:
- `<filepath>`: positional argument. Path to input file. If omitted,
data will be read in from stdin unless `--clip` is specified.
- `--stdin`: optional argument. Specifying will read data from stdin.
- `--clip`: optional argument. Specifying will read data from clipboard
- `--type <type>`: optional argument, default: 'xml'. Specifies the data type
to be read in. Must be one of: 'xml', 'base64', 'har'
- `--compare <file, optional>`: optional argument. Compare SAML data vs. data entered
by user. If no file is specified, application will prompt for values. If file
specified, must be JSON file which matches the attribute keys in
`mongo.VALIDATION_REGEX_BY_ATTRIB`
- `--summary`: optional argument. Will output a summary of relevant
data read from SAML response.
- `--summary-only`: optional argument. Only outputs summary info, does not perform
MongoDB Cloud tests
- `--version`: optional argument. Displays version information and exits.
- `--help`: optional argument. Displays help information and exits.
Returns:
None
"""
parser = argparse.ArgumentParser(prog="SAML Reader",
description='Read a SAML response and pull out '
'relevant values for diagnosing '
'federated authentication issues.')
# TODO: Look into having argparse verify if the path is valid
parser.add_argument('filepath', metavar="PATH", action='store',
default=None, nargs='?',
help='path for source file. If omitted, '
'input is assumed from stdin unless --clip is specified')
parser.add_argument('--stdin',
dest='stdin', action='store_true', required=False,
help='read data from stdin (this is default if not specified)')
parser.add_argument('--clip',
dest='clip', action='store_true', required=False,
help='read data from system clipboard')
parser.add_argument('--type',
dest='input_type', action='store', required=False,
choices=['xml', 'base64', 'har'], default='xml',
help='type of data being read in (default: xml)')
parser.add_argument('--compare',
dest='compare', action='store', required=False,
nargs='*',
help='enter values for comparison (no args = prompt, 1 arg = JSON file)')
parser.add_argument('--summary',
dest='summary', action='store_true', required=False,
help='displays full summary of the parsed SAML data')
parser.add_argument('--summary-only',
dest='summary_only', action='store_true', required=False,
help='do not run MongoDB-specific validation, only output summary')
parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}')
# TODO: Add XML pretty print option
parsed_args = parser.parse_args(cl_args)
if parsed_args.summary_only and parsed_args.compare is not None:
print("ERROR: Cannot specify --compare and --summary-only")
return
source = 'stdin'
filename = None
if parsed_args.filepath is None:
if parsed_args.clip:
source = 'clip'
else:
source = 'file'
filename = parsed_args.filepath
print(f"SAML READER")
print(f"----------------------")
print(f"Parsing SAML data...")
# Parse saml data before prompting for input values to not risk clipboard being erased
try:
saml_parser = TextReader(source, parsed_args.input_type, filename=filename)
except DataTypeInvalid:
if parsed_args.input_type == 'har':
print("We could not find the correct data in the HAR data specified.\n"
"Check to make sure that the input data is of the correct type.")
else:
print(f"The input data does not appear to be the specified input type '{parsed_args.input_type}'.\n"
f"Check to make sure that the input data is of the correct type.")
return
for msg in saml_parser.get_errors():
print(msg)
if not saml_parser.saml_is_valid():
return
print(f"Done")
federation_config = None
if parsed_args.compare is not None:
if len(parsed_args.compare) == 0:
federation_config = prompt_for_comparison_values()
else:
print("Parsing comparison values...")
federation_config = parse_comparison_values_from_json(parsed_args.compare[0])
print("Done")
print("------------")
verifier = MongoVerifier(saml_parser.get_saml(),
saml_parser.get_certificate(),
comparison_values=federation_config)
if not parsed_args.summary_only:
verifier.validate_configuration()
display_validation_results(verifier)
if parsed_args.summary or parsed_args.summary_only:
display_summary(verifier)
def display_validation_results(verifier):
"""
Display MongoDB Cloud-specific recommendations for identifiable issues
with the SAML data.
Args:
verifier (MongoVerifier): SAML and cert data
Returns:
None
"""
error_messages = verifier.get_error_messages()
if not error_messages:
print("No errors found! :)")
print("------------")
return
print("-----MONGODB CLOUD VERIFICATION-----")
for msg in error_messages:
print(f"\n{msg}\n------")
def display_summary(verifier):
"""
Display summary of parsed SAML data
Args:
verifier (MongoVerifier): SAML and cert data
Returns:
None
"""
print("\n-----SAML SUMMARY-----")
if verifier.has_certificate():
print(f"IDENTITY PROVIDER "
f"(from certificate):"
f"\n{verifier.get_identity_provider()}")
print("---")
print(f"ASSERTION CONSUMER SERVICE URL:"
f"\n{verifier.get_assertion_consumer_service_url() or '(this value is missing)'}")
print("---")
print(f"AUDIENCE URL:"
f"\n{verifier.get_audience_url() or '(this value is missing)'}")
print("---")
print(f"ISSUER URI:"
f"\n{verifier.get_issuer() or '(this value is missing)'}")
print("---")
print(f"ENCRYPTION ALGORITHM:"
f"\n{verifier.get_encryption_algorithm() or '(this value is missing)'}")
print("---")
print(f"NAME ID:"
f"\nValue: {verifier.get_name_id() or '(this value is missing)'}"
f"\nFormat: {verifier.get_name_id_format() or '(this value is missing)'}")
print("---")
# Checking for the required attributes for MongoDB Cloud
print(f"ATTRIBUTES:")
if not verifier.get_claim_attributes():
print("No claim attributes found")
else:
for name, value in verifier.get_claim_attributes().items():
print(f"Name: {name}")
if isinstance(value, list):
print("Values:")
for v in value:
print(f"- {v}")
else:
print(f"Value: {value}")
print("--")
def prompt_for_comparison_values():
"""
Prompt user to enter values for comparing with the SAML response data
Returns:
(MongoFederationConfig) object containing validated comparison values
"""
federation_config = MongoFederationConfig()
print("Please enter the following values for comparison with\n"
"values in the SAML response. Press Return to skip a value.")
comparison_values = [
MongoComparisonValue('firstName', "Customer First Name:", multi_value=False),
MongoComparisonValue('lastName', "Customer Last Name:", multi_value=False),
MongoComparisonValue('email', "Customer Email Address:", multi_value=False),
MongoComparisonValue('acs', "MongoDB Assertion Consumer Service URL:", multi_value=False),
MongoComparisonValue('audience', "MongoDB Audience URL:", multi_value=False),
MongoComparisonValue('domains', "Domain(s) associated with IdP:", multi_value=True),
MongoComparisonValue('issuer', "IdP Issuer URI:", multi_value=False),
MongoComparisonValue('encryption', "Encryption Algorithm (""SHA1"" or ""SHA256""):", multi_value=False),
MongoComparisonValue('role_mapping_expected', "Is customer expecting role mapping (y/N):",
multi_value=False, default="N")
]
for value in comparison_values:
federation_config.set_value(
value.get_name(),
value.prompt_for_user_input()
)
if federation_config.get_value('role_mapping_expected'):
member_of_values = MongoComparisonValue(
'memberOf',
"Expected role mapping group names (if unknown, leave blank):",
multi_value=True
).prompt_for_user_input()
federation_config.set_value(
'memberOf',
member_of_values
)
print("------------")
return federation_config
def parse_comparison_values_from_json(filename):
"""
Read comparison values from JSON file and validate
Args:
filename (basestring): path to JSON-formatted file with comparison values
See `saml_reader.mongo.VALIDATION_REGEX_BY_ATTRIB` for valid fields.
Returns:
(MongoFederationConfig) object containing validated comparison values
"""
with open(filename, 'r') as f:
comparison_values = json.load(f)
federation_config = MongoFederationConfig(**comparison_values)
return federation_config
def start_saml_reader():
# This is the CLI hook in setup.py
cli(sys.argv[1:])
if __name__ == '__main__':
start_saml_reader()
| """
Command line interface for SAML Reader parser. These functions handle all
user interaction/display.
"""
import sys
import json
import argparse
from saml_reader.text_reader import TextReader
from saml_reader.validation.mongo import MongoFederationConfig, MongoVerifier, MongoComparisonValue
from saml_reader.saml.parser import DataTypeInvalid
from saml_reader import __version__
def cli(cl_args):
"""
Entrypoint for the command line interface. Handles parsing command line arguments.
Args:
cl_args (iterable): Command-line arguments. Possibilities:
- `<filepath>`: positional argument. Path to input file. If omitted,
data will be read in from stdin unless `--clip` is specified.
- `--stdin`: optional argument. Specifying will read data from stdin.
- `--clip`: optional argument. Specifying will read data from clipboard
- `--type <type>`: optional argument, default: 'xml'. Specifies the data type
to be read in. Must be one of: 'xml', 'base64', 'har'
- `--compare <file, optional>`: optional argument. Compare SAML data vs. data entered
by user. If no file is specified, application will prompt for values. If file
specified, must be JSON file which matches the attribute keys in
`mongo.VALIDATION_REGEX_BY_ATTRIB`
- `--summary`: optional argument. Will output a summary of relevant
data read from SAML response.
- `--summary-only`: optional argument. Only outputs summary info, does not perform
MongoDB Cloud tests
- `--version`: optional argument. Displays version information and exits.
- `--help`: optional argument. Displays help information and exits.
Returns:
None
"""
parser = argparse.ArgumentParser(prog="SAML Reader",
description='Read a SAML response and pull out '
'relevant values for diagnosing '
'federated authentication issues.')
# TODO: Look into having argparse verify if the path is valid
parser.add_argument('filepath', metavar="PATH", action='store',
default=None, nargs='?',
help='path for source file. If omitted, '
'input is assumed from stdin unless --clip is specified')
parser.add_argument('--stdin',
dest='stdin', action='store_true', required=False,
help='read data from stdin (this is default if not specified)')
parser.add_argument('--clip',
dest='clip', action='store_true', required=False,
help='read data from system clipboard')
parser.add_argument('--type',
dest='input_type', action='store', required=False,
choices=['xml', 'base64', 'har'], default='xml',
help='type of data being read in (default: xml)')
parser.add_argument('--compare',
dest='compare', action='store', required=False,
nargs='*',
help='enter values for comparison (no args = prompt, 1 arg = JSON file)')
parser.add_argument('--summary',
dest='summary', action='store_true', required=False,
help='displays full summary of the parsed SAML data')
parser.add_argument('--summary-only',
dest='summary_only', action='store_true', required=False,
help='do not run MongoDB-specific validation, only output summary')
parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}')
# TODO: Add XML pretty print option
parsed_args = parser.parse_args(cl_args)
if parsed_args.summary_only and parsed_args.compare is not None:
print("ERROR: Cannot specify --compare and --summary-only")
return
source = 'stdin'
filename = None
if parsed_args.filepath is None:
if parsed_args.clip:
source = 'clip'
else:
source = 'file'
filename = parsed_args.filepath
print(f"SAML READER")
print(f"----------------------")
print(f"Parsing SAML data...")
# Parse saml data before prompting for input values to not risk clipboard being erased
try:
saml_parser = TextReader(source, parsed_args.input_type, filename=filename)
except DataTypeInvalid:
if parsed_args.input_type == 'har':
print("We could not find the correct data in the HAR data specified.\n"
"Check to make sure that the input data is of the correct type.")
else:
print(f"The input data does not appear to be the specified input type '{parsed_args.input_type}'.\n"
f"Check to make sure that the input data is of the correct type.")
return
for msg in saml_parser.get_errors():
print(msg)
if not saml_parser.saml_is_valid():
return
print(f"Done")
federation_config = None
if parsed_args.compare is not None:
if len(parsed_args.compare) == 0:
federation_config = prompt_for_comparison_values()
else:
print("Parsing comparison values...")
federation_config = parse_comparison_values_from_json(parsed_args.compare[0])
print("Done")
print("------------")
verifier = MongoVerifier(saml_parser.get_saml(),
saml_parser.get_certificate(),
comparison_values=federation_config)
if not parsed_args.summary_only:
verifier.validate_configuration()
display_validation_results(verifier)
if parsed_args.summary or parsed_args.summary_only:
display_summary(verifier)
def display_validation_results(verifier):
"""
Display MongoDB Cloud-specific recommendations for identifiable issues
with the SAML data.
Args:
verifier (MongoVerifier): SAML and cert data
Returns:
None
"""
error_messages = verifier.get_error_messages()
if not error_messages:
print("No errors found! :)")
print("------------")
return
print("-----MONGODB CLOUD VERIFICATION-----")
for msg in error_messages:
print(f"\n{msg}\n------")
def display_summary(verifier):
"""
Display summary of parsed SAML data
Args:
verifier (MongoVerifier): SAML and cert data
Returns:
None
"""
print("\n-----SAML SUMMARY-----")
if verifier.has_certificate():
print(f"IDENTITY PROVIDER "
f"(from certificate):"
f"\n{verifier.get_identity_provider()}")
print("---")
print(f"ASSERTION CONSUMER SERVICE URL:"
f"\n{verifier.get_assertion_consumer_service_url() or '(this value is missing)'}")
print("---")
print(f"AUDIENCE URL:"
f"\n{verifier.get_audience_url() or '(this value is missing)'}")
print("---")
print(f"ISSUER URI:"
f"\n{verifier.get_issuer() or '(this value is missing)'}")
print("---")
print(f"ENCRYPTION ALGORITHM:"
f"\n{verifier.get_encryption_algorithm() or '(this value is missing)'}")
print("---")
print(f"NAME ID:"
f"\nValue: {verifier.get_name_id() or '(this value is missing)'}"
f"\nFormat: {verifier.get_name_id_format() or '(this value is missing)'}")
print("---")
# Checking for the required attributes for MongoDB Cloud
print(f"ATTRIBUTES:")
if not verifier.get_claim_attributes():
print("No claim attributes found")
else:
for name, value in verifier.get_claim_attributes().items():
print(f"Name: {name}")
if isinstance(value, list):
print("Values:")
for v in value:
print(f"- {v}")
else:
print(f"Value: {value}")
print("--")
def prompt_for_comparison_values():
"""
Prompt user to enter values for comparing with the SAML response data
Returns:
(MongoFederationConfig) object containing validated comparison values
"""
federation_config = MongoFederationConfig()
print("Please enter the following values for comparison with\n"
"values in the SAML response. Press Return to skip a value.")
comparison_values = [
MongoComparisonValue('firstName', "Customer First Name:", multi_value=False),
MongoComparisonValue('lastName', "Customer Last Name:", multi_value=False),
MongoComparisonValue('email', "Customer Email Address:", multi_value=False),
MongoComparisonValue('acs', "MongoDB Assertion Consumer Service URL:", multi_value=False),
MongoComparisonValue('audience', "MongoDB Audience URL:", multi_value=False),
MongoComparisonValue('domains', "Domain(s) associated with IdP:", multi_value=True),
MongoComparisonValue('issuer', "IdP Issuer URI:", multi_value=False),
MongoComparisonValue('encryption', "Encryption Algorithm (""SHA1"" or ""SHA256""):", multi_value=False),
MongoComparisonValue('role_mapping_expected', "Is customer expecting role mapping (y/N):",
multi_value=False, default="N")
]
for value in comparison_values:
federation_config.set_value(
value.get_name(),
value.prompt_for_user_input()
)
if federation_config.get_value('role_mapping_expected'):
member_of_values = MongoComparisonValue(
'memberOf',
"Expected role mapping group names (if unknown, leave blank):",
multi_value=True
).prompt_for_user_input()
federation_config.set_value(
'memberOf',
member_of_values
)
print("------------")
return federation_config
def parse_comparison_values_from_json(filename):
"""
Read comparison values from JSON file and validate
Args:
filename (basestring): path to JSON-formatted file with comparison values
See `saml_reader.mongo.VALIDATION_REGEX_BY_ATTRIB` for valid fields.
Returns:
(MongoFederationConfig) object containing validated comparison values
"""
with open(filename, 'r') as f:
comparison_values = json.load(f)
federation_config = MongoFederationConfig(**comparison_values)
return federation_config
def start_saml_reader():
# This is the CLI hook in setup.py
cli(sys.argv[1:])
if __name__ == '__main__':
start_saml_reader() | en | 0.502774 | Command line interface for SAML Reader parser. These functions handle all user interaction/display. Entrypoint for the command line interface. Handles parsing command line arguments. Args: cl_args (iterable): Command-line arguments. Possibilities: - `<filepath>`: positional argument. Path to input file. If omitted, data will be read in from stdin unless `--clip` is specified. - `--stdin`: optional argument. Specifying will read data from stdin. - `--clip`: optional argument. Specifying will read data from clipboard - `--type <type>`: optional argument, default: 'xml'. Specifies the data type to be read in. Must be one of: 'xml', 'base64', 'har' - `--compare <file, optional>`: optional argument. Compare SAML data vs. data entered by user. If no file is specified, application will prompt for values. If file specified, must be JSON file which matches the attribute keys in `mongo.VALIDATION_REGEX_BY_ATTRIB` - `--summary`: optional argument. Will output a summary of relevant data read from SAML response. - `--summary-only`: optional argument. Only outputs summary info, does not perform MongoDB Cloud tests - `--version`: optional argument. Displays version information and exits. - `--help`: optional argument. Displays help information and exits. Returns: None # TODO: Look into having argparse verify if the path is valid # TODO: Add XML pretty print option # Parse saml data before prompting for input values to not risk clipboard being erased Display MongoDB Cloud-specific recommendations for identifiable issues with the SAML data. Args: verifier (MongoVerifier): SAML and cert data Returns: None Display summary of parsed SAML data Args: verifier (MongoVerifier): SAML and cert data Returns: None # Checking for the required attributes for MongoDB Cloud Prompt user to enter values for comparing with the SAML response data Returns: (MongoFederationConfig) object containing validated comparison values Read comparison values from JSON file and validate Args: filename (basestring): path to JSON-formatted file with comparison values See `saml_reader.mongo.VALIDATION_REGEX_BY_ATTRIB` for valid fields. Returns: (MongoFederationConfig) object containing validated comparison values # This is the CLI hook in setup.py | 3.192089 | 3 |
routes/weibo.py | naturalwang/flask-blog | 0 | 6613835 | from models.weibo import Weibo
from models.weibo import Comment
from models.user import User
from routes import *
from utils import log
# for decorators
from functools import wraps
main = Blueprint('weibo', __name__)
Model = Weibo
def current_user():
uid = session.get('user_id')
if uid is not None:
u = User.query.get(uid)
return u
# def admin_required(f):
# @wraps(f)
# def function(*args, **kwargs):
# # your code
# print('admin required')
# if request.args.get('uid') != '1':
# print('not admin')
# abort(404)
# return f(*args, **kwargs)
# return function
@main.route('/')
def index():
u = current_user()
if u is None:
return redirect(url_for('user.login_view'))
# 查找所有的 todo 并返回
weibos = Weibo.query.order_by(Weibo.id.desc()).all()
for w in weibos:
w.comment = w.load_comments()
for c in w.comment:
c.avatar = c.get_avatar()
w.avatar = w.get_avatar()
return render_template('weibo_index.html', weibos=weibos, user=u)
@main.route('/add', methods=['POST'])
def add():
u = current_user()
if u is not None:
# log('weibo add', u.id, u.username, u.password)
form = request.form
w = Weibo(form)
w.username = u.username
w.user_id = u.id
if w.valid_add():
w.save()
# log("save", w.user_id)
return redirect(url_for('.index', username=u.username))
else:
abort(401)
@main.route('/comment', methods=['POST'])
def comment_add():
u = current_user()
if u is not None:
# log('comment_add', u.id, u.username)
form = request.form
c = Comment(form)
c.user_id = u.id
c.username = u.username
c.weibo_id = int(form.get('weibo_id', -1))
if c.valid_add():
c.save()
return redirect(url_for('.index', username=u.username))
else:
abort(401)
@main.route('/delete/<int:id>')
# @admin_required
def delete(id):
u = current_user()
w = Model.query.get(id)
if u.id == w.user_id:
w.delete()
return redirect(url_for('.index'))
else:
return redirect(url_for('.index'))
| from models.weibo import Weibo
from models.weibo import Comment
from models.user import User
from routes import *
from utils import log
# for decorators
from functools import wraps
main = Blueprint('weibo', __name__)
Model = Weibo
def current_user():
uid = session.get('user_id')
if uid is not None:
u = User.query.get(uid)
return u
# def admin_required(f):
# @wraps(f)
# def function(*args, **kwargs):
# # your code
# print('admin required')
# if request.args.get('uid') != '1':
# print('not admin')
# abort(404)
# return f(*args, **kwargs)
# return function
@main.route('/')
def index():
u = current_user()
if u is None:
return redirect(url_for('user.login_view'))
# 查找所有的 todo 并返回
weibos = Weibo.query.order_by(Weibo.id.desc()).all()
for w in weibos:
w.comment = w.load_comments()
for c in w.comment:
c.avatar = c.get_avatar()
w.avatar = w.get_avatar()
return render_template('weibo_index.html', weibos=weibos, user=u)
@main.route('/add', methods=['POST'])
def add():
u = current_user()
if u is not None:
# log('weibo add', u.id, u.username, u.password)
form = request.form
w = Weibo(form)
w.username = u.username
w.user_id = u.id
if w.valid_add():
w.save()
# log("save", w.user_id)
return redirect(url_for('.index', username=u.username))
else:
abort(401)
@main.route('/comment', methods=['POST'])
def comment_add():
u = current_user()
if u is not None:
# log('comment_add', u.id, u.username)
form = request.form
c = Comment(form)
c.user_id = u.id
c.username = u.username
c.weibo_id = int(form.get('weibo_id', -1))
if c.valid_add():
c.save()
return redirect(url_for('.index', username=u.username))
else:
abort(401)
@main.route('/delete/<int:id>')
# @admin_required
def delete(id):
u = current_user()
w = Model.query.get(id)
if u.id == w.user_id:
w.delete()
return redirect(url_for('.index'))
else:
return redirect(url_for('.index'))
| en | 0.202534 | # for decorators # def admin_required(f): # @wraps(f) # def function(*args, **kwargs): # # your code # print('admin required') # if request.args.get('uid') != '1': # print('not admin') # abort(404) # return f(*args, **kwargs) # return function # 查找所有的 todo 并返回 # log('weibo add', u.id, u.username, u.password) # log("save", w.user_id) # log('comment_add', u.id, u.username) # @admin_required | 2.408196 | 2 |
setup.py | grktsh/python-cloud-tasks-deferred | 3 | 6613836 | from setuptools import setup
setup(
use_scm_version={
'write_to': 'src/cloud_tasks_deferred/__version__.py',
'write_to_template': '__version__ = {version!r}\n',
}
)
| from setuptools import setup
setup(
use_scm_version={
'write_to': 'src/cloud_tasks_deferred/__version__.py',
'write_to_template': '__version__ = {version!r}\n',
}
)
| none | 1 | 1.118006 | 1 | |
pythonlearn/Socket-Http/Socket/udpServe.py | yc19890920/Learn | 1 | 6613837 | # -*- coding: utf-8 -*-
import socket
from time import ctime
# Address and Port
HOST = ''
PORT = 8000
ADDR = (HOST, PORT)
# BufferSize
BUFFSIZE = 1024
# build socket
udpSerSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind socket
udpSerSock.bind(ADDR)
try:
while True:
print 'waiting the message...'
data, addr = udpSerSock.recvfrom(BUFFSIZE)
print 'received the message: '+ data +' from: ', addr
udpSerSock.sendto('[%s] %s' % (ctime(), data), addr)
except EOFError, KeyboardInterrupt:
udpSerSock.close() | # -*- coding: utf-8 -*-
import socket
from time import ctime
# Address and Port
HOST = ''
PORT = 8000
ADDR = (HOST, PORT)
# BufferSize
BUFFSIZE = 1024
# build socket
udpSerSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind socket
udpSerSock.bind(ADDR)
try:
while True:
print 'waiting the message...'
data, addr = udpSerSock.recvfrom(BUFFSIZE)
print 'received the message: '+ data +' from: ', addr
udpSerSock.sendto('[%s] %s' % (ctime(), data), addr)
except EOFError, KeyboardInterrupt:
udpSerSock.close() | en | 0.723807 | # -*- coding: utf-8 -*- # Address and Port # BufferSize # build socket # bind socket | 2.829326 | 3 |
codewars/difficulty_level_6kyu/unique_in_order/test_solution_unique_in_order.py | aleattene/python-codewars-challenges | 1 | 6613838 | <filename>codewars/difficulty_level_6kyu/unique_in_order/test_solution_unique_in_order.py
""" To start the tests, type from CLI: python test_solution_sum_of_missing_numbers.py """
import unittest
from solution_unique_in_order import unique_in_order
class TestSolution(unittest.TestCase):
def test_string(self):
# ONE ELEMENT
self.assertEqual(unique_in_order("A"), ['A'])
# REDUCE DUPLICATES
self.assertEqual(unique_in_order("AA"), ['A'])
self.assertEqual(unique_in_order("AAAABBBCCDAABBB"), ['A', 'B', 'C', 'D', 'A', 'B'])
self.assertEqual(unique_in_order("AADD"), ['A', 'D'])
self.assertEqual(unique_in_order("AAD"), ['A', 'D'])
self.assertEqual(unique_in_order("ADD"), ['A', 'D'])
# LOWERCASE AS DIFFERENT FROM UPPERCASE
self.assertEqual(unique_in_order("ABBCcAD"), ['A', 'B', 'C', 'c', 'A', 'D'])
def test_lists(self):
# EMPTY LIST
self.assertEqual(unique_in_order([]), [])
# SOME LISTS
self.assertEqual(unique_in_order([1, 2, 3, 3]), [1, 2, 3])
self.assertEqual(unique_in_order([1, 1, 2, 2, 3, 3, 1, 1 ]), [1, 2, 3, 1])
self.assertEqual(unique_in_order(['A', 'B', 'C', 'c', 'A', 'D']), ['A', 'B', 'C', 'c', 'A', 'D'])
self.assertEqual(unique_in_order(['a', 'b', 'b']), ['a', 'b'])
if __name__ == '__main__':
""" The following instruction executes the tests
by discovering all classes present in this file
that inherit from unittest.TestCase.
"""
unittest.main()
| <filename>codewars/difficulty_level_6kyu/unique_in_order/test_solution_unique_in_order.py
""" To start the tests, type from CLI: python test_solution_sum_of_missing_numbers.py """
import unittest
from solution_unique_in_order import unique_in_order
class TestSolution(unittest.TestCase):
def test_string(self):
# ONE ELEMENT
self.assertEqual(unique_in_order("A"), ['A'])
# REDUCE DUPLICATES
self.assertEqual(unique_in_order("AA"), ['A'])
self.assertEqual(unique_in_order("AAAABBBCCDAABBB"), ['A', 'B', 'C', 'D', 'A', 'B'])
self.assertEqual(unique_in_order("AADD"), ['A', 'D'])
self.assertEqual(unique_in_order("AAD"), ['A', 'D'])
self.assertEqual(unique_in_order("ADD"), ['A', 'D'])
# LOWERCASE AS DIFFERENT FROM UPPERCASE
self.assertEqual(unique_in_order("ABBCcAD"), ['A', 'B', 'C', 'c', 'A', 'D'])
def test_lists(self):
# EMPTY LIST
self.assertEqual(unique_in_order([]), [])
# SOME LISTS
self.assertEqual(unique_in_order([1, 2, 3, 3]), [1, 2, 3])
self.assertEqual(unique_in_order([1, 1, 2, 2, 3, 3, 1, 1 ]), [1, 2, 3, 1])
self.assertEqual(unique_in_order(['A', 'B', 'C', 'c', 'A', 'D']), ['A', 'B', 'C', 'c', 'A', 'D'])
self.assertEqual(unique_in_order(['a', 'b', 'b']), ['a', 'b'])
if __name__ == '__main__':
""" The following instruction executes the tests
by discovering all classes present in this file
that inherit from unittest.TestCase.
"""
unittest.main()
| en | 0.777126 | To start the tests, type from CLI: python test_solution_sum_of_missing_numbers.py # ONE ELEMENT # REDUCE DUPLICATES # LOWERCASE AS DIFFERENT FROM UPPERCASE # EMPTY LIST # SOME LISTS The following instruction executes the tests by discovering all classes present in this file that inherit from unittest.TestCase. | 3.519449 | 4 |
trex_train.py | uzunb/Trex-CNN | 5 | 6613839 | import glob
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from PIL import Image
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
import seaborn as sns
# for warnings
import warnings
warnings.filterwarnings("ignore")
imgs = glob.glob("./img/*.png")
# img size
width = 250
height = 100
X = []
Y = []
for img in imgs:
fileName = os.path.basename(img)
label = fileName.split('_')[0]
# "L" for grayscale. normalized with 255
im = np.array(Image.open(img).convert("L").resize((width, height))) / 255
X.append(im)
Y.append(label)
X = np.array(X)
X = X.reshape(X.shape[0], width, height, 1) # 1 is channel
def onehotLabels(values):
labelEncoder = LabelEncoder()
integerEncoded = labelEncoder.fit_transform(values)
onehotEncoder = OneHotEncoder(sparse=False)
integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)
onehot_encoded = onehotEncoder.fit_transform(integerEncoded)
return onehot_encoded
Y = onehotLabels(Y)
#train_test_split
trainX, testX, trainY, testY = train_test_split(X, Y, test_size = 0.25, random_state = 2)
# CNN Model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(width, height, 1)))
model.add(Conv2D(64, kernel_size=(3,3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(3, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics= ["accuracy"])
model.fit(trainX, trainY, epochs=35, batch_size=64)
# Load Trained Weights
# if os.path.exists("./trex_weight.h5"):
# model.load_weights("trex_weight.h5")
# print("Weights loaded.")
score_train = model.evaluate(trainX, trainY)
print("Training Accuracy: %",score_train[1]*100)
score_test = model.evaluate(testX, testY)
print("Test Accuracy: %",score_test[1]*100)
open("model_new.json","w").write(model.to_json())
model.save_weights("trex_weight_new.h5")
| import glob
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from PIL import Image
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
import seaborn as sns
# for warnings
import warnings
warnings.filterwarnings("ignore")
imgs = glob.glob("./img/*.png")
# img size
width = 250
height = 100
X = []
Y = []
for img in imgs:
fileName = os.path.basename(img)
label = fileName.split('_')[0]
# "L" for grayscale. normalized with 255
im = np.array(Image.open(img).convert("L").resize((width, height))) / 255
X.append(im)
Y.append(label)
X = np.array(X)
X = X.reshape(X.shape[0], width, height, 1) # 1 is channel
def onehotLabels(values):
labelEncoder = LabelEncoder()
integerEncoded = labelEncoder.fit_transform(values)
onehotEncoder = OneHotEncoder(sparse=False)
integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)
onehot_encoded = onehotEncoder.fit_transform(integerEncoded)
return onehot_encoded
Y = onehotLabels(Y)
#train_test_split
trainX, testX, trainY, testY = train_test_split(X, Y, test_size = 0.25, random_state = 2)
# CNN Model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(width, height, 1)))
model.add(Conv2D(64, kernel_size=(3,3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(3, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics= ["accuracy"])
model.fit(trainX, trainY, epochs=35, batch_size=64)
# Load Trained Weights
# if os.path.exists("./trex_weight.h5"):
# model.load_weights("trex_weight.h5")
# print("Weights loaded.")
score_train = model.evaluate(trainX, trainY)
print("Training Accuracy: %",score_train[1]*100)
score_test = model.evaluate(testX, testY)
print("Test Accuracy: %",score_test[1]*100)
open("model_new.json","w").write(model.to_json())
model.save_weights("trex_weight_new.h5")
| en | 0.58232 | # for warnings # img size # "L" for grayscale. normalized with 255 # 1 is channel #train_test_split # CNN Model # Load Trained Weights # if os.path.exists("./trex_weight.h5"): # model.load_weights("trex_weight.h5") # print("Weights loaded.") | 3.067984 | 3 |
indra/sources/lincs_drug/__init__.py | djinnome/indra | 0 | 6613840 | """This module provides and API and processor for the HMS LINCS small molecule
target relationship database. This is a manually curated set of relationships
with the "nominal" target of each drug determined by a human expert. Note that
the determination of the "nominal" target is not always backed up by
experimentally measured affinities. The underlying data is available here:
http://lincs.hms.harvard.edu/db/datasets/20000/results
"""
from .api import *
| """This module provides and API and processor for the HMS LINCS small molecule
target relationship database. This is a manually curated set of relationships
with the "nominal" target of each drug determined by a human expert. Note that
the determination of the "nominal" target is not always backed up by
experimentally measured affinities. The underlying data is available here:
http://lincs.hms.harvard.edu/db/datasets/20000/results
"""
from .api import *
| en | 0.883941 | This module provides and API and processor for the HMS LINCS small molecule target relationship database. This is a manually curated set of relationships with the "nominal" target of each drug determined by a human expert. Note that the determination of the "nominal" target is not always backed up by experimentally measured affinities. The underlying data is available here: http://lincs.hms.harvard.edu/db/datasets/20000/results | 1.160696 | 1 |
login.py | Gstar320/git_ssh | 0 | 6613841 | a=10
b=10
<<<<<<< HEAD
c=6
=======
c=6
d=66
>>>>>>> b5bae40d8f4d688b3be7880b82dfece5e27b1689
| a=10
b=10
<<<<<<< HEAD
c=6
=======
c=6
d=66
>>>>>>> b5bae40d8f4d688b3be7880b82dfece5e27b1689
| none | 1 | 1.396927 | 1 | |
services/abstract_score_service.py | Design-Patterns-Project-Group/swimming-mangement-in-python | 0 | 6613842 | <reponame>Design-Patterns-Project-Group/swimming-mangement-in-python
from abc import ABC, abstractmethod
class AbstractScoreService(ABC):
@abstractmethod
def getAllBySeason(self, season):
pass
@abstractmethod
def getAllBySeasonName(self, season_name):
pass
@abstractmethod
def getAllBySeasonWithAgeGroup(self, age_group, season):
pass
@abstractmethod
def getAllBySeasonWithAgeGroupNames(self, age_group_name, season_name):
pass
| from abc import ABC, abstractmethod
class AbstractScoreService(ABC):
@abstractmethod
def getAllBySeason(self, season):
pass
@abstractmethod
def getAllBySeasonName(self, season_name):
pass
@abstractmethod
def getAllBySeasonWithAgeGroup(self, age_group, season):
pass
@abstractmethod
def getAllBySeasonWithAgeGroupNames(self, age_group_name, season_name):
pass | none | 1 | 3.123749 | 3 | |
config.py | avihaie/bug-hunter | 0 | 6613843 | <gh_stars>0
import os
from rrmng.rrmngmnt import Host, RootUser
ROOT_USER = "root"
LOCALHOST_LOGS_PATH = os.path.expanduser("~/tmp/bug_hunter_logs")
SHORT_LOGS_DIR = "/short_logs"
LOCAL_ROOT_PASSWORD = "<PASSWORD>"
# Slave/local host
SLAVE_HOST = Host("127.0.0.1")
SLAVE_HOST.users.append(RootUser(LOCAL_ROOT_PASSWORD))
FULL_PERMISSIONS = "777"
| import os
from rrmng.rrmngmnt import Host, RootUser
ROOT_USER = "root"
LOCALHOST_LOGS_PATH = os.path.expanduser("~/tmp/bug_hunter_logs")
SHORT_LOGS_DIR = "/short_logs"
LOCAL_ROOT_PASSWORD = "<PASSWORD>"
# Slave/local host
SLAVE_HOST = Host("127.0.0.1")
SLAVE_HOST.users.append(RootUser(LOCAL_ROOT_PASSWORD))
FULL_PERMISSIONS = "777" | en | 0.895784 | # Slave/local host | 1.603828 | 2 |
0ak/app/__init__.py | akkayin/0aK | 2 | 6613844 | import os
import click
import sys
import logging
from flask import Flask, render_template
from app.blueprints.blog import blog_bp
from app.blueprints.auth import auth_bp
from app.blueprints.admin import admin_bp
from app.extensions import db, moment, bootstrap, login_manager, csrf, ckeditor, pagedown, migrate, sitemap, search
from app.models import Admin, Post, Category, Tag
from flask_wtf.csrf import CSRFError
from logging.handlers import SMTPHandler, RotatingFileHandler
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def create_app(config_name = None):
app = Flask('app')
register_logging(app)
register_errors(app)
register_blueprints(app)
register_commands(app)
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
# mysql
# app.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:root@127.0.0.1:3306/0ak'
#设置这一项是每次请求结束后都会自动提交数据库中的变动
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = "secret_key"
register_extensions(app)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Post': Post}
return app
def register_blueprints(app):
app.register_blueprint(blog_bp)
app.register_blueprint(auth_bp, url_prefix='/auth')
app.register_blueprint(admin_bp, url_prefix='/admin')
def register_logging(app):
class RequestFormatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super(RequestFormatter, self).format(record)
request_formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = RotatingFileHandler(os.path.join(basedir, 'logs/app.log'),
maxBytes=10 * 1024 * 1024, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
if not app.debug:
app.logger.addHandler(file_handler)
def register_extensions(app):
db.init_app(app)
bootstrap.init_app(app)
ckeditor.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
pagedown.init_app(app)
moment.init_app(app)
migrate.init_app(app, db)
sitemap.init_app(app)
search.init_app(app)
def register_errors(app):
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('errors/404.html'), 500
def register_commands(app):
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database."""
if drop:
click.confirm('This operation will delete the database, do you want to continue?', abort=True)
db.drop_all()
click.echo('Drop tables.')
db.create_all()
click.echo('Initialized database.')
@app.cli.command()
@click.option('--username', prompt=True, help='The username used to login.')
@click.option('--password', prompt=True, hide_input=True,
confirmation_prompt=True, help='The password used to login.')
def init(username, password):
"""Building Bluelog, just for you."""
click.echo('Initializing the database...')
db.create_all()
admin = Admin.query.first()
if admin is not None:
click.echo('The administrator already exists, updating...')
admin.username = username
admin.set_password(password)
else:
click.echo('Creating the temporary administrator account...')
admin = Admin(
username=username
)
admin.set_password(password)
db.session.add(admin)
category = Category.query.first()
if category is None:
click.echo('Creating the default category...')
category = Category(name='Default')
db.session.add(category)
db.session.commit()
click.echo('Done.')
@app.cli.command()
@click.option('--category', default=10, help='Quantity of categories, default is 10.')
@click.option('--post', default=50, help='Quantity of posts, default is 50.')
@click.option('--tag', default=10, help='Quantity of tags, default is 10.')
def forge(category, post, tag):
"""Generate fake data."""
from app.fakes import fake_categories, fake_posts, fake_tags
db.drop_all()
db.create_all()
click.echo('Generating %d categories...' % category)
fake_categories(category)
click.echo('Generating %d tags...' % tag)
fake_tags(tag)
click.echo('Generating %d posts...' % post)
fake_posts(post)
click.echo('Done.')
| import os
import click
import sys
import logging
from flask import Flask, render_template
from app.blueprints.blog import blog_bp
from app.blueprints.auth import auth_bp
from app.blueprints.admin import admin_bp
from app.extensions import db, moment, bootstrap, login_manager, csrf, ckeditor, pagedown, migrate, sitemap, search
from app.models import Admin, Post, Category, Tag
from flask_wtf.csrf import CSRFError
from logging.handlers import SMTPHandler, RotatingFileHandler
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def create_app(config_name = None):
app = Flask('app')
register_logging(app)
register_errors(app)
register_blueprints(app)
register_commands(app)
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
# mysql
# app.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:root@127.0.0.1:3306/0ak'
#设置这一项是每次请求结束后都会自动提交数据库中的变动
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = "secret_key"
register_extensions(app)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Post': Post}
return app
def register_blueprints(app):
app.register_blueprint(blog_bp)
app.register_blueprint(auth_bp, url_prefix='/auth')
app.register_blueprint(admin_bp, url_prefix='/admin')
def register_logging(app):
class RequestFormatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super(RequestFormatter, self).format(record)
request_formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = RotatingFileHandler(os.path.join(basedir, 'logs/app.log'),
maxBytes=10 * 1024 * 1024, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
if not app.debug:
app.logger.addHandler(file_handler)
def register_extensions(app):
db.init_app(app)
bootstrap.init_app(app)
ckeditor.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
pagedown.init_app(app)
moment.init_app(app)
migrate.init_app(app, db)
sitemap.init_app(app)
search.init_app(app)
def register_errors(app):
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('errors/404.html'), 500
def register_commands(app):
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database."""
if drop:
click.confirm('This operation will delete the database, do you want to continue?', abort=True)
db.drop_all()
click.echo('Drop tables.')
db.create_all()
click.echo('Initialized database.')
@app.cli.command()
@click.option('--username', prompt=True, help='The username used to login.')
@click.option('--password', prompt=True, hide_input=True,
confirmation_prompt=True, help='The password used to login.')
def init(username, password):
"""Building Bluelog, just for you."""
click.echo('Initializing the database...')
db.create_all()
admin = Admin.query.first()
if admin is not None:
click.echo('The administrator already exists, updating...')
admin.username = username
admin.set_password(password)
else:
click.echo('Creating the temporary administrator account...')
admin = Admin(
username=username
)
admin.set_password(password)
db.session.add(admin)
category = Category.query.first()
if category is None:
click.echo('Creating the default category...')
category = Category(name='Default')
db.session.add(category)
db.session.commit()
click.echo('Done.')
@app.cli.command()
@click.option('--category', default=10, help='Quantity of categories, default is 10.')
@click.option('--post', default=50, help='Quantity of posts, default is 50.')
@click.option('--tag', default=10, help='Quantity of tags, default is 10.')
def forge(category, post, tag):
"""Generate fake data."""
from app.fakes import fake_categories, fake_posts, fake_tags
db.drop_all()
db.create_all()
click.echo('Generating %d categories...' % category)
fake_categories(category)
click.echo('Generating %d tags...' % tag)
fake_tags(tag)
click.echo('Generating %d posts...' % post)
fake_posts(post)
click.echo('Done.')
| en | 0.213202 | # SQLite URI compatible # mysql # app.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:root@127.0.0.1:3306/0ak' #设置这一项是每次请求结束后都会自动提交数据库中的变动 Initialize the database. Building Bluelog, just for you. Generate fake data. | 2.101404 | 2 |
train_PASSRnet.py | Yuval-H/iclr_17_compression | 0 | 6613845 | <filename>train_PASSRnet.py
import PIL.Image
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from datasets import StereoDataset_passrNet
import time
import torchvision
from losses import *
from models.PASSRnet import PASSRnet
############## Train parameters ##############
path_to_reconstructed_images = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/try-GPNN/reconstructed'
#stereo_dir_2012 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview'
#stereo_dir_2015 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview'
stereo_dir_2012 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview'
stereo_dir_2015 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview'
batch_size = 1
lr_start = 1e-4
epoch_patience = 6
n_epochs = 25000
val_every = 1
save_every = 2000
using_blank_loss = False
hammingLossOnBinaryZ = False
useStereoPlusDataSet = False
start_from_pretrained = '/home/access/dev/weights-passr/model_best_weights1.pth'
save_path = '/home/access/dev/weights-passr'
################ Data transforms ################
tsfm = transforms.Compose([transforms.ToTensor()])
#tsfm = transforms.Compose([transforms.CenterCrop((320, 640)), transforms.ToTensor()])
tsfm_val = transforms.Compose([transforms.CenterCrop((320, 320)), transforms.ToTensor()])
#tsfm_val = transforms.Compose([transforms.ToTensor()])
#transforms.Resize((160, 160), interpolation=3)
######### Set Seeds ###########
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
training_data = StereoDataset_passrNet(stereo_dir_2012, stereo_dir_2015, tsfm, randomCrop=True)
val_data = StereoDataset_passrNet(stereo_dir_2012, stereo_dir_2015, tsfm, randomCrop=True, isTrainingData=False)
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_data, batch_size=1)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
# Load model:
model = PASSRnet(upscale_factor=1)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr_start)
#optimizer = torch.optim.SGD(model.parameters(), lr=lr_start, weight_decay=1e-8, momentum=0.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=epoch_patience, verbose=True)
epoch_start = 1
if start_from_pretrained != '':
checkpoint = torch.load(start_from_pretrained)
model.load_state_dict(checkpoint['model_state_dict'])
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
#epoch_start = checkpoint['epoch']
#loss = checkpoint['loss']
model.train()
# Epochs
best_loss = 10000
best_val_loss = 10000
#criterion = nn.L1Loss()
criterion_mse = nn.MSELoss()
criterion_L1 = nn.L1Loss()
for epoch in range(epoch_start, n_epochs + 1):
# monitor training loss
train_loss = 0.0
# Training
epoch_start_time = time.time()
for batch, data in enumerate(train_dataloader):
# Get stereo pair
LR_left, HR_right, HR_left = data
b, c, h, w = LR_left.shape
LR_left = LR_left.to(device)
HR_left = HR_left.to(device)
HR_right = HR_right.to(device)
optimizer.zero_grad()
SR_left, (M_right_to_left, M_left_to_right), (M_left_right_left, M_right_left_right), \
(V_left_to_right, V_right_to_left) = model(LR_left, HR_right, is_training=True)
###SR_left = model(LR_left, HR_right, is_training=True)
#msssim = pytorch_msssim.ms_ssim(images_cam1, img_recon, data_range=1.0)
#if not msssim == msssim:
# print('nan value')
#loss = criterion(SR_left, HR_left)
### loss_SR
loss_SR = criterion_mse(SR_left, HR_left)
### loss_smoothness
loss_h = criterion_L1(M_right_to_left[:, :-1, :, :], M_right_to_left[:, 1:, :, :]) + \
criterion_L1(M_left_to_right[:, :-1, :, :], M_left_to_right[:, 1:, :, :])
loss_w = criterion_L1(M_right_to_left[:, :, :-1, :-1], M_right_to_left[:, :, 1:, 1:]) + \
criterion_L1(M_left_to_right[:, :, :-1, :-1], M_left_to_right[:, :, 1:, 1:])
loss_smooth = loss_w + loss_h
### loss_cycle
Identity = (torch.eye(w, w).repeat(b, h, 1, 1)).to(device)
loss_cycle = criterion_L1(M_left_right_left * V_left_to_right.permute(0, 2, 1, 3),
Identity * V_left_to_right.permute(0, 2, 1, 3)) + \
criterion_L1(M_right_left_right * V_right_to_left.permute(0, 2, 1, 3),
Identity * V_right_to_left.permute(0, 2, 1, 3))
### loss_photometric
HR_right_warped = torch.bmm(M_right_to_left.contiguous().view(b * h, w, w),
HR_right.permute(0, 2, 3, 1).contiguous().view(b * h, w, c))
HR_right_warped = HR_right_warped.view(b, h, w, c).contiguous().permute(0, 3, 1, 2)
LR_left_warped = torch.bmm(M_left_to_right.contiguous().view(b * h, w, w),
LR_left.permute(0, 2, 3, 1).contiguous().view(b * h, w, c))
LR_left_warped = LR_left_warped.view(b, h, w, c).contiguous().permute(0, 3, 1, 2)
loss_photo = criterion_L1(LR_left * V_left_to_right, HR_right_warped * V_left_to_right) + \
criterion_L1(HR_right * V_right_to_left, LR_left_warped * V_right_to_left)
### losses
loss = loss_SR + 0.005 * (loss_photo + loss_smooth + loss_cycle)
loss.backward()
optimizer.step()
train_loss += loss.item() #* images_cam1.size(0)
train_loss = train_loss / len(train_dataloader)
# Note that step should be called after validate()
#scheduler.step(train_loss)
if train_loss < best_loss:
best_loss = train_loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': train_loss,
}, save_path+'/model_best_weights.pth')
#save_model(model, 1, save_path) #save_model(model, epoch, save_path)
elif epoch % save_every == 0:
#save_model(model, epoch, save_path)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': train_loss,
}, save_path+'/model_weights_epoch_' + str(epoch) + '.pth')
#Validation
if epoch % val_every == 0:
# validate
model.eval()
val_loss = 0
for batch, data in enumerate(val_dataloader):
# Get stereo pair
LR_left, HR_right, HR_left = data
LR_left = LR_left.to(device)
HR_left = HR_left.to(device)
HR_right = HR_right.to(device)
# get model outputs
SR_left = model(LR_left, HR_right, is_training=False)
loss = criterion_L1(SR_left, HR_left)
val_loss += loss.item() # * images_cam1.size(0)
model.train()
val_loss = val_loss / len(val_dataloader)
scheduler.step(val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': train_loss,
}, save_path + '/model_bestVal_loss.pth')
print('Epoch: {} \tTraining Loss: {:.6f}\tVal Loss: {:.6f}\tEpoch Time: {:.6f}'
.format(epoch, train_loss, val_loss, time.time() - epoch_start_time))#, end="\r")
else:
print('Epoch: {} \tTraining Loss: {:.6f}\tEpoch Time: {:.6f}'.format(epoch, train_loss, time.time() - epoch_start_time))
torch.save(model.state_dict(), 'model_weights.pth')
print("Done!")
| <filename>train_PASSRnet.py
import PIL.Image
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from datasets import StereoDataset_passrNet
import time
import torchvision
from losses import *
from models.PASSRnet import PASSRnet
############## Train parameters ##############
path_to_reconstructed_images = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/try-GPNN/reconstructed'
#stereo_dir_2012 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview'
#stereo_dir_2015 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview'
stereo_dir_2012 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview'
stereo_dir_2015 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview'
batch_size = 1
lr_start = 1e-4
epoch_patience = 6
n_epochs = 25000
val_every = 1
save_every = 2000
using_blank_loss = False
hammingLossOnBinaryZ = False
useStereoPlusDataSet = False
start_from_pretrained = '/home/access/dev/weights-passr/model_best_weights1.pth'
save_path = '/home/access/dev/weights-passr'
################ Data transforms ################
tsfm = transforms.Compose([transforms.ToTensor()])
#tsfm = transforms.Compose([transforms.CenterCrop((320, 640)), transforms.ToTensor()])
tsfm_val = transforms.Compose([transforms.CenterCrop((320, 320)), transforms.ToTensor()])
#tsfm_val = transforms.Compose([transforms.ToTensor()])
#transforms.Resize((160, 160), interpolation=3)
######### Set Seeds ###########
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
training_data = StereoDataset_passrNet(stereo_dir_2012, stereo_dir_2015, tsfm, randomCrop=True)
val_data = StereoDataset_passrNet(stereo_dir_2012, stereo_dir_2015, tsfm, randomCrop=True, isTrainingData=False)
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_data, batch_size=1)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
# Load model:
model = PASSRnet(upscale_factor=1)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr_start)
#optimizer = torch.optim.SGD(model.parameters(), lr=lr_start, weight_decay=1e-8, momentum=0.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=epoch_patience, verbose=True)
epoch_start = 1
if start_from_pretrained != '':
checkpoint = torch.load(start_from_pretrained)
model.load_state_dict(checkpoint['model_state_dict'])
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
#epoch_start = checkpoint['epoch']
#loss = checkpoint['loss']
model.train()
# Epochs
best_loss = 10000
best_val_loss = 10000
#criterion = nn.L1Loss()
criterion_mse = nn.MSELoss()
criterion_L1 = nn.L1Loss()
for epoch in range(epoch_start, n_epochs + 1):
# monitor training loss
train_loss = 0.0
# Training
epoch_start_time = time.time()
for batch, data in enumerate(train_dataloader):
# Get stereo pair
LR_left, HR_right, HR_left = data
b, c, h, w = LR_left.shape
LR_left = LR_left.to(device)
HR_left = HR_left.to(device)
HR_right = HR_right.to(device)
optimizer.zero_grad()
SR_left, (M_right_to_left, M_left_to_right), (M_left_right_left, M_right_left_right), \
(V_left_to_right, V_right_to_left) = model(LR_left, HR_right, is_training=True)
###SR_left = model(LR_left, HR_right, is_training=True)
#msssim = pytorch_msssim.ms_ssim(images_cam1, img_recon, data_range=1.0)
#if not msssim == msssim:
# print('nan value')
#loss = criterion(SR_left, HR_left)
### loss_SR
loss_SR = criterion_mse(SR_left, HR_left)
### loss_smoothness
loss_h = criterion_L1(M_right_to_left[:, :-1, :, :], M_right_to_left[:, 1:, :, :]) + \
criterion_L1(M_left_to_right[:, :-1, :, :], M_left_to_right[:, 1:, :, :])
loss_w = criterion_L1(M_right_to_left[:, :, :-1, :-1], M_right_to_left[:, :, 1:, 1:]) + \
criterion_L1(M_left_to_right[:, :, :-1, :-1], M_left_to_right[:, :, 1:, 1:])
loss_smooth = loss_w + loss_h
### loss_cycle
Identity = (torch.eye(w, w).repeat(b, h, 1, 1)).to(device)
loss_cycle = criterion_L1(M_left_right_left * V_left_to_right.permute(0, 2, 1, 3),
Identity * V_left_to_right.permute(0, 2, 1, 3)) + \
criterion_L1(M_right_left_right * V_right_to_left.permute(0, 2, 1, 3),
Identity * V_right_to_left.permute(0, 2, 1, 3))
### loss_photometric
HR_right_warped = torch.bmm(M_right_to_left.contiguous().view(b * h, w, w),
HR_right.permute(0, 2, 3, 1).contiguous().view(b * h, w, c))
HR_right_warped = HR_right_warped.view(b, h, w, c).contiguous().permute(0, 3, 1, 2)
LR_left_warped = torch.bmm(M_left_to_right.contiguous().view(b * h, w, w),
LR_left.permute(0, 2, 3, 1).contiguous().view(b * h, w, c))
LR_left_warped = LR_left_warped.view(b, h, w, c).contiguous().permute(0, 3, 1, 2)
loss_photo = criterion_L1(LR_left * V_left_to_right, HR_right_warped * V_left_to_right) + \
criterion_L1(HR_right * V_right_to_left, LR_left_warped * V_right_to_left)
### losses
loss = loss_SR + 0.005 * (loss_photo + loss_smooth + loss_cycle)
loss.backward()
optimizer.step()
train_loss += loss.item() #* images_cam1.size(0)
train_loss = train_loss / len(train_dataloader)
# Note that step should be called after validate()
#scheduler.step(train_loss)
if train_loss < best_loss:
best_loss = train_loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': train_loss,
}, save_path+'/model_best_weights.pth')
#save_model(model, 1, save_path) #save_model(model, epoch, save_path)
elif epoch % save_every == 0:
#save_model(model, epoch, save_path)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': train_loss,
}, save_path+'/model_weights_epoch_' + str(epoch) + '.pth')
#Validation
if epoch % val_every == 0:
# validate
model.eval()
val_loss = 0
for batch, data in enumerate(val_dataloader):
# Get stereo pair
LR_left, HR_right, HR_left = data
LR_left = LR_left.to(device)
HR_left = HR_left.to(device)
HR_right = HR_right.to(device)
# get model outputs
SR_left = model(LR_left, HR_right, is_training=False)
loss = criterion_L1(SR_left, HR_left)
val_loss += loss.item() # * images_cam1.size(0)
model.train()
val_loss = val_loss / len(val_dataloader)
scheduler.step(val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': train_loss,
}, save_path + '/model_bestVal_loss.pth')
print('Epoch: {} \tTraining Loss: {:.6f}\tVal Loss: {:.6f}\tEpoch Time: {:.6f}'
.format(epoch, train_loss, val_loss, time.time() - epoch_start_time))#, end="\r")
else:
print('Epoch: {} \tTraining Loss: {:.6f}\tEpoch Time: {:.6f}'.format(epoch, train_loss, time.time() - epoch_start_time))
torch.save(model.state_dict(), 'model_weights.pth')
print("Done!")
| en | 0.414913 | ############## Train parameters ############## #stereo_dir_2012 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview' #stereo_dir_2015 = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview' ################ Data transforms ################ #tsfm = transforms.Compose([transforms.CenterCrop((320, 640)), transforms.ToTensor()]) #tsfm_val = transforms.Compose([transforms.ToTensor()]) #transforms.Resize((160, 160), interpolation=3) ######### Set Seeds ########### # Load model: #optimizer = torch.optim.SGD(model.parameters(), lr=lr_start, weight_decay=1e-8, momentum=0.9) #optimizer.load_state_dict(checkpoint['optimizer_state_dict']) #scheduler.load_state_dict(checkpoint['scheduler_state_dict']) #epoch_start = checkpoint['epoch'] #loss = checkpoint['loss'] # Epochs #criterion = nn.L1Loss() # monitor training loss # Training # Get stereo pair ###SR_left = model(LR_left, HR_right, is_training=True) #msssim = pytorch_msssim.ms_ssim(images_cam1, img_recon, data_range=1.0) #if not msssim == msssim: # print('nan value') #loss = criterion(SR_left, HR_left) ### loss_SR ### loss_smoothness ### loss_cycle ### loss_photometric ### losses #* images_cam1.size(0) # Note that step should be called after validate() #scheduler.step(train_loss) #save_model(model, 1, save_path) #save_model(model, epoch, save_path) #save_model(model, epoch, save_path) #Validation # validate # Get stereo pair # get model outputs # * images_cam1.size(0) #, end="\r") | 2.081597 | 2 |
torch_solver/solver/.ipynb_checkpoints/bert_train_batch-checkpoint.py | guoxuxu/LOANT | 8 | 6613846 | <filename>torch_solver/solver/.ipynb_checkpoints/bert_train_batch-checkpoint.py<gh_stars>1-10
from .train_iter import train_batch_sin
from .metric import print_CM
from .latent_optimization import optimize_z
import torch.nn as nn
def train_batch(scaler, model, optimizer, batch, options, log:bool, **kwargs):
# batch: sample id, vocab id, mask, label
cuda = options['cuda']
batch = [B.cuda(cuda) for B in batch[1:]]
batch_forward_time = 0
train_log_line = ''
cls_repr, outs, task_loss, time_elapsed = train_batch_sin(model, batch)
batch_forward_time += time_elapsed
if options['optimize_cls_repr'] is True:
epsilon = optimizer.param_groups[0]['lr']
epsilon = epsilon * options['epsilon']
cls_repr = optimize_z(scaler, optimizer, epsilon, loss=task_loss, ad_loss=None, z=cls_repr, multi_obj=False)
outs = model.second_forward(cls_repr)
task_loss = nn.CrossEntropyLoss()(outs, batch[2])
if log is True:
train_log_line = ','.join([str(int(task_loss.item() * 10000) / 10000), print_CM(y_true=batch[2].tolist(), y_pred=outs.max(1)[1].tolist())]) + ';'
return task_loss, batch_forward_time, train_log_line | <filename>torch_solver/solver/.ipynb_checkpoints/bert_train_batch-checkpoint.py<gh_stars>1-10
from .train_iter import train_batch_sin
from .metric import print_CM
from .latent_optimization import optimize_z
import torch.nn as nn
def train_batch(scaler, model, optimizer, batch, options, log:bool, **kwargs):
# batch: sample id, vocab id, mask, label
cuda = options['cuda']
batch = [B.cuda(cuda) for B in batch[1:]]
batch_forward_time = 0
train_log_line = ''
cls_repr, outs, task_loss, time_elapsed = train_batch_sin(model, batch)
batch_forward_time += time_elapsed
if options['optimize_cls_repr'] is True:
epsilon = optimizer.param_groups[0]['lr']
epsilon = epsilon * options['epsilon']
cls_repr = optimize_z(scaler, optimizer, epsilon, loss=task_loss, ad_loss=None, z=cls_repr, multi_obj=False)
outs = model.second_forward(cls_repr)
task_loss = nn.CrossEntropyLoss()(outs, batch[2])
if log is True:
train_log_line = ','.join([str(int(task_loss.item() * 10000) / 10000), print_CM(y_true=batch[2].tolist(), y_pred=outs.max(1)[1].tolist())]) + ';'
return task_loss, batch_forward_time, train_log_line | en | 0.471954 | # batch: sample id, vocab id, mask, label | 2.172554 | 2 |
nautilus/conf/api.py | stannum-l/nautilus | 0 | 6613847 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
driver = cfg.StrOpt('driver',
default='sql',
help="Persistent data store driver.")
timeout = cfg.IntOpt('timeout',
default=300,
help="Time out value for blah.")
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
driver,
timeout,
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
driver = cfg.StrOpt('driver',
default='sql',
help="Persistent data store driver.")
timeout = cfg.IntOpt('timeout',
default=300,
help="Time out value for blah.")
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
driver,
timeout,
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
| en | 0.859654 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.803579 | 2 |
venv/lib/python3.8/site-packages/clikit/api/config/command_config.py | GiulianaPola/select_repeats | 2 | 6613848 | <filename>venv/lib/python3.8/site-packages/clikit/api/config/command_config.py
/home/runner/.cache/pip/pool/f6/13/ba/9655155f4c76cf5dc4900241d3a5c9aee641cdb88403d089a3c2bb91a0 | <filename>venv/lib/python3.8/site-packages/clikit/api/config/command_config.py
/home/runner/.cache/pip/pool/f6/13/ba/9655155f4c76cf5dc4900241d3a5c9aee641cdb88403d089a3c2bb91a0 | none | 1 | 1.017436 | 1 | |
net.py | hanhun7/bishe | 19 | 6613849 | <reponame>hanhun7/bishe
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn import functional as F
import time
import timm
import random
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from depth import DepthBranch
from mobilenet import MobileNetV2Encoder
def upsample(x, size):
return F.interpolate(x, size, mode='bilinear', align_corners=True)
class DFMNet(nn.Module):
def __init__(self, **kwargs):
super(DFMNet, self).__init__()
self.rgb = RGBBranch()
self.depth = DepthBranch()
def forward(self, r, d):
size = r.shape[2:]
outputs = []
sal_d,feat = self.depth(d)
sal_final= self.rgb(r,feat)
sal_final = upsample(sal_final, size)
sal_d = upsample(sal_d, size)
outputs.append(sal_final)
outputs.append(sal_d)
return outputs
class _ConvBNReLU(nn.Module):
"""Conv-BN-ReLU"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,dilation=1, **kwargs):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,dilation=dilation ,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _ConvBNSig(nn.Module):
"""Conv-BN-Sigmoid"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,dilation=1, **kwargs):
super(_ConvBNSig, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,dilation=dilation ,bias=False),
nn.BatchNorm2d(out_channels),
nn.Sigmoid()
)
def forward(self, x):
return self.conv(x)
class _DSConv(nn.Module):
"""Depthwise Separable Convolutions"""
def __init__(self, dw_channels, out_channels, stride=1, **kwargs):
super(_DSConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, 3, stride, 1, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
def _make_layer( block, inplanes, planes, blocks, t=6, stride=1):
layers = []
layers.append(block(inplanes, planes, t, stride))
for i in range(1, blocks):
layers.append(block(planes, planes, t, 1))
return nn.Sequential(*layers)
class _DWConv(nn.Module):
def __init__(self, dw_channels, out_channels, stride=1, **kwargs):
super(_DWConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, out_channels, 3, stride, 1, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class LinearBottleneck(nn.Module):
"""LinearBottleneck used in MobileNetV2"""
def __init__(self, in_channels, out_channels, t=6, stride=2, **kwargs):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and in_channels == out_channels
self.block = nn.Sequential(
# pw
_ConvBNReLU(in_channels, in_channels * t, 1),
# dw
_DWConv(in_channels * t, in_channels * t, stride),
# pw-linear
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.block(x)
if self.use_shortcut:
out = x + out
return out
class PyramidPooling(nn.Module):
"""Pyramid pooling module"""
def __init__(self, in_channels, out_channels, **kwargs):
super(PyramidPooling, self).__init__()
inter_channels = int(in_channels / 4)
self.conv1 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv2 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv3 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv4 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.out = _ConvBNReLU(in_channels * 2, out_channels, 1)
def pool(self, x, size):
avgpool = nn.AdaptiveAvgPool2d(size)
return avgpool(x)
def forward(self, x):
size = x.size()[2:]
feat1 = upsample(self.conv1(self.pool(x, 1)), size)
feat2 = upsample(self.conv2(self.pool(x, 2)), size)
feat3 = upsample(self.conv3(self.pool(x, 3)), size)
feat4 = upsample(self.conv4(self.pool(x, 6)), size)
x = torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
x = self.out(x)
return x
class RGBBranch(nn.Module):
"""RGBBranch for low-level RGB feature extract"""
def __init__(self, c1=16, c2=24, c3=32, c4=96,c5=320,k=32 ,**kwargs):
super(RGBBranch, self).__init__()
self.base = MobileNetV2Encoder(3)
initialize_weights(self.base)
self.conv_cp1 = _DSConv(c1,k)
self.conv_cp2 = _DSConv(c2, k)
self.conv_cp3 = _DSConv(c3, k)
self.conv_cp4 = _DSConv(c4, k)
self.conv_cp5 = _DSConv(c5, k)
self.conv_s_f = nn.Sequential(_DSConv(2 * k, k),
_DSConv( k, k),
nn.Conv2d(k, 1, 1), )
# self.focus = focus()
self.ca1 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca2 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca3 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca4 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca5 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.conv_r1_tran = _ConvBNReLU(16, 16, 1, 1)
self.conv_d1_tran = _ConvBNReLU(16, 16, 1, 1)
self.mlp = nn.Sequential(_ConvBNReLU(48, 24, 1, 1),_ConvBNSig(24,5,1,1))
self.conv_r1_tran2 = _ConvBNReLU(16, 16, 1, 1)
self.conv_d1_tran2 = _ConvBNReLU(16, 16, 1, 1)
self.conv_sgate1 = _ConvBNReLU(16, 16, 3, 1,2,2)
self.conv_sgate2 = _ConvBNReLU(16, 16, 3, 1,2,2)
self.conv_sgate3 = _ConvBNSig(16,5,3,1,1)
self.ppm = PyramidPooling(320, 32)
self.conv_guide = _ConvBNReLU(320, 16, 1, 1)
def forward(self, x,feat):
d1, d2, d3, d4, d5 = feat
d5_guide = upsample(self.conv_guide(d5),d1.shape[2:])
r1 = self.base.layer1(x)
r1t = self.conv_r1_tran(r1)
d1t = self.conv_d1_tran(d1)
r1t2 = self.conv_r1_tran2(r1)
d1t2 = self.conv_d1_tran2(d1)
# QDW
iou = F.adaptive_avg_pool2d(r1t * d1t, 1) / \
(F.adaptive_avg_pool2d(r1t + d1t, 1))
e_rp = F.max_pool2d(r1t, 2, 2)
e_dp = F.max_pool2d(d1t, 2, 2)
e_rp2 = F.max_pool2d(e_rp, 2, 2)
e_dp2 = F.max_pool2d(e_dp, 2, 2)
iou_p1 = F.adaptive_avg_pool2d(e_rp * e_dp, 1) / \
(F.adaptive_avg_pool2d(e_rp + e_dp, 1))
iou_p2 = F.adaptive_avg_pool2d(e_rp2 * e_dp2, 1) / \
(F.adaptive_avg_pool2d(e_rp2 + e_dp2, 1))
gate = self.mlp(torch.cat((iou, iou_p1, iou_p2), dim=1))
# DHA
mc = r1t2 * d1t2
sgate = self.conv_sgate1(upsample(mc + d5_guide, d2.shape[2:]))
d5_guide1 = mc + upsample(sgate, d1.shape[2:])
sgate = self.conv_sgate1(upsample(mc + d5_guide1, d2.shape[2:]))
d5_guide2 = mc + upsample(sgate, d1.shape[2:])
sgate = self.conv_sgate3(d5_guide1 + d5_guide2 + mc)
dqw1 = gate[:,0:1,...]
dha1 = upsample(sgate[:, 0:1, ...], d1.shape[2:])
dqw2 = gate[:, 1:2, ...]
dha2 = upsample(sgate[:, 1:2, ...], d2.shape[2:])
dqw3 = gate[:, 2:3, ...]
dha3 = upsample(sgate[:, 2:3, ...], d3.shape[2:])
dqw4 = gate[:, 3:4, ...]
dha4 = upsample(sgate[:, 3:4, ...], d4.shape[2:])
dqw5 = gate[:, 4:5, ...]
dha5 = upsample(sgate[:, 4:5, ...], d5.shape[2:])
r1 = r1 + d1 * dqw1 * dha1
r2 = self.base.layer2(r1) + d2 * dqw2 * dha2
r3 = self.base.layer3(r2) + d3 * dqw3 * dha3
r4 = self.base.layer4(r3) + d4 * dqw4 * dha4
r5 = self.base.layer5(r4) + d5 * dqw5 * dha5
r6 = self.ppm(r5)
# Two stage decoder
## pre-fusion
r5 = self.conv_cp5(r5)
r4 = self.conv_cp4(r4)
r3 = self.conv_cp3(r3)
r2 = self.conv_cp2(r2)
r1 = self.conv_cp1(r1)
r5 = self.ca5(F.adaptive_avg_pool2d(r5, 1)) * r5
r4 = self.ca4(F.adaptive_avg_pool2d(r4, 1)) * r4
r3 = self.ca3(F.adaptive_avg_pool2d(r3, 1)) * r3
r2 = self.ca2(F.adaptive_avg_pool2d(r2, 1)) * r2
r1 = self.ca1(F.adaptive_avg_pool2d(r1, 1)) * r1
r3 = upsample(r3, r1.shape[2:])
r2 = upsample(r2, r1.shape[2:])
rh = r4 + r5 + r6
rl = r1 + r2 + r3
## full-fusion
rh = upsample(rh, rl.shape[2:])
sal = self.conv_s_f (torch.cat((rh,rl),dim=1))
return sal
def initialize_weights(model):
m = torch.hub.load('pytorch/vision:v0.6.0', 'mobilenet_v2', pretrained=True)
pretrained_dict = m.state_dict()
all_params = {}
for k, v in model.state_dict().items():
if k in pretrained_dict.keys():
v = pretrained_dict[k]
all_params[k] = v
model.load_state_dict(all_params,strict = False)
if __name__ == '__main__':
img = torch.randn(1, 3, 256, 256).cuda()
depth = torch.randn(1, 1, 256, 256).cuda()
model = DFMNet().cuda()
model.eval()
time1= time.time()
outputs = model(img,depth)
time2 = time.time()
torch.cuda.synchronize()
print(1000/(time2-time1))
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(num_params)
| import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn import functional as F
import time
import timm
import random
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from depth import DepthBranch
from mobilenet import MobileNetV2Encoder
def upsample(x, size):
return F.interpolate(x, size, mode='bilinear', align_corners=True)
class DFMNet(nn.Module):
def __init__(self, **kwargs):
super(DFMNet, self).__init__()
self.rgb = RGBBranch()
self.depth = DepthBranch()
def forward(self, r, d):
size = r.shape[2:]
outputs = []
sal_d,feat = self.depth(d)
sal_final= self.rgb(r,feat)
sal_final = upsample(sal_final, size)
sal_d = upsample(sal_d, size)
outputs.append(sal_final)
outputs.append(sal_d)
return outputs
class _ConvBNReLU(nn.Module):
"""Conv-BN-ReLU"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,dilation=1, **kwargs):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,dilation=dilation ,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _ConvBNSig(nn.Module):
"""Conv-BN-Sigmoid"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,dilation=1, **kwargs):
super(_ConvBNSig, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,dilation=dilation ,bias=False),
nn.BatchNorm2d(out_channels),
nn.Sigmoid()
)
def forward(self, x):
return self.conv(x)
class _DSConv(nn.Module):
"""Depthwise Separable Convolutions"""
def __init__(self, dw_channels, out_channels, stride=1, **kwargs):
super(_DSConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, 3, stride, 1, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
def _make_layer( block, inplanes, planes, blocks, t=6, stride=1):
layers = []
layers.append(block(inplanes, planes, t, stride))
for i in range(1, blocks):
layers.append(block(planes, planes, t, 1))
return nn.Sequential(*layers)
class _DWConv(nn.Module):
def __init__(self, dw_channels, out_channels, stride=1, **kwargs):
super(_DWConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, out_channels, 3, stride, 1, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class LinearBottleneck(nn.Module):
"""LinearBottleneck used in MobileNetV2"""
def __init__(self, in_channels, out_channels, t=6, stride=2, **kwargs):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and in_channels == out_channels
self.block = nn.Sequential(
# pw
_ConvBNReLU(in_channels, in_channels * t, 1),
# dw
_DWConv(in_channels * t, in_channels * t, stride),
# pw-linear
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.block(x)
if self.use_shortcut:
out = x + out
return out
class PyramidPooling(nn.Module):
"""Pyramid pooling module"""
def __init__(self, in_channels, out_channels, **kwargs):
super(PyramidPooling, self).__init__()
inter_channels = int(in_channels / 4)
self.conv1 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv2 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv3 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv4 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.out = _ConvBNReLU(in_channels * 2, out_channels, 1)
def pool(self, x, size):
avgpool = nn.AdaptiveAvgPool2d(size)
return avgpool(x)
def forward(self, x):
size = x.size()[2:]
feat1 = upsample(self.conv1(self.pool(x, 1)), size)
feat2 = upsample(self.conv2(self.pool(x, 2)), size)
feat3 = upsample(self.conv3(self.pool(x, 3)), size)
feat4 = upsample(self.conv4(self.pool(x, 6)), size)
x = torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
x = self.out(x)
return x
class RGBBranch(nn.Module):
"""RGBBranch for low-level RGB feature extract"""
def __init__(self, c1=16, c2=24, c3=32, c4=96,c5=320,k=32 ,**kwargs):
super(RGBBranch, self).__init__()
self.base = MobileNetV2Encoder(3)
initialize_weights(self.base)
self.conv_cp1 = _DSConv(c1,k)
self.conv_cp2 = _DSConv(c2, k)
self.conv_cp3 = _DSConv(c3, k)
self.conv_cp4 = _DSConv(c4, k)
self.conv_cp5 = _DSConv(c5, k)
self.conv_s_f = nn.Sequential(_DSConv(2 * k, k),
_DSConv( k, k),
nn.Conv2d(k, 1, 1), )
# self.focus = focus()
self.ca1 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca2 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca3 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca4 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.ca5 = nn.Sequential(_ConvBNReLU(k, k, 1, 1), nn.Conv2d(k, k, 1, 1), nn.Sigmoid())
self.conv_r1_tran = _ConvBNReLU(16, 16, 1, 1)
self.conv_d1_tran = _ConvBNReLU(16, 16, 1, 1)
self.mlp = nn.Sequential(_ConvBNReLU(48, 24, 1, 1),_ConvBNSig(24,5,1,1))
self.conv_r1_tran2 = _ConvBNReLU(16, 16, 1, 1)
self.conv_d1_tran2 = _ConvBNReLU(16, 16, 1, 1)
self.conv_sgate1 = _ConvBNReLU(16, 16, 3, 1,2,2)
self.conv_sgate2 = _ConvBNReLU(16, 16, 3, 1,2,2)
self.conv_sgate3 = _ConvBNSig(16,5,3,1,1)
self.ppm = PyramidPooling(320, 32)
self.conv_guide = _ConvBNReLU(320, 16, 1, 1)
def forward(self, x,feat):
d1, d2, d3, d4, d5 = feat
d5_guide = upsample(self.conv_guide(d5),d1.shape[2:])
r1 = self.base.layer1(x)
r1t = self.conv_r1_tran(r1)
d1t = self.conv_d1_tran(d1)
r1t2 = self.conv_r1_tran2(r1)
d1t2 = self.conv_d1_tran2(d1)
# QDW
iou = F.adaptive_avg_pool2d(r1t * d1t, 1) / \
(F.adaptive_avg_pool2d(r1t + d1t, 1))
e_rp = F.max_pool2d(r1t, 2, 2)
e_dp = F.max_pool2d(d1t, 2, 2)
e_rp2 = F.max_pool2d(e_rp, 2, 2)
e_dp2 = F.max_pool2d(e_dp, 2, 2)
iou_p1 = F.adaptive_avg_pool2d(e_rp * e_dp, 1) / \
(F.adaptive_avg_pool2d(e_rp + e_dp, 1))
iou_p2 = F.adaptive_avg_pool2d(e_rp2 * e_dp2, 1) / \
(F.adaptive_avg_pool2d(e_rp2 + e_dp2, 1))
gate = self.mlp(torch.cat((iou, iou_p1, iou_p2), dim=1))
# DHA
mc = r1t2 * d1t2
sgate = self.conv_sgate1(upsample(mc + d5_guide, d2.shape[2:]))
d5_guide1 = mc + upsample(sgate, d1.shape[2:])
sgate = self.conv_sgate1(upsample(mc + d5_guide1, d2.shape[2:]))
d5_guide2 = mc + upsample(sgate, d1.shape[2:])
sgate = self.conv_sgate3(d5_guide1 + d5_guide2 + mc)
dqw1 = gate[:,0:1,...]
dha1 = upsample(sgate[:, 0:1, ...], d1.shape[2:])
dqw2 = gate[:, 1:2, ...]
dha2 = upsample(sgate[:, 1:2, ...], d2.shape[2:])
dqw3 = gate[:, 2:3, ...]
dha3 = upsample(sgate[:, 2:3, ...], d3.shape[2:])
dqw4 = gate[:, 3:4, ...]
dha4 = upsample(sgate[:, 3:4, ...], d4.shape[2:])
dqw5 = gate[:, 4:5, ...]
dha5 = upsample(sgate[:, 4:5, ...], d5.shape[2:])
r1 = r1 + d1 * dqw1 * dha1
r2 = self.base.layer2(r1) + d2 * dqw2 * dha2
r3 = self.base.layer3(r2) + d3 * dqw3 * dha3
r4 = self.base.layer4(r3) + d4 * dqw4 * dha4
r5 = self.base.layer5(r4) + d5 * dqw5 * dha5
r6 = self.ppm(r5)
# Two stage decoder
## pre-fusion
r5 = self.conv_cp5(r5)
r4 = self.conv_cp4(r4)
r3 = self.conv_cp3(r3)
r2 = self.conv_cp2(r2)
r1 = self.conv_cp1(r1)
r5 = self.ca5(F.adaptive_avg_pool2d(r5, 1)) * r5
r4 = self.ca4(F.adaptive_avg_pool2d(r4, 1)) * r4
r3 = self.ca3(F.adaptive_avg_pool2d(r3, 1)) * r3
r2 = self.ca2(F.adaptive_avg_pool2d(r2, 1)) * r2
r1 = self.ca1(F.adaptive_avg_pool2d(r1, 1)) * r1
r3 = upsample(r3, r1.shape[2:])
r2 = upsample(r2, r1.shape[2:])
rh = r4 + r5 + r6
rl = r1 + r2 + r3
## full-fusion
rh = upsample(rh, rl.shape[2:])
sal = self.conv_s_f (torch.cat((rh,rl),dim=1))
return sal
def initialize_weights(model):
m = torch.hub.load('pytorch/vision:v0.6.0', 'mobilenet_v2', pretrained=True)
pretrained_dict = m.state_dict()
all_params = {}
for k, v in model.state_dict().items():
if k in pretrained_dict.keys():
v = pretrained_dict[k]
all_params[k] = v
model.load_state_dict(all_params,strict = False)
if __name__ == '__main__':
img = torch.randn(1, 3, 256, 256).cuda()
depth = torch.randn(1, 1, 256, 256).cuda()
model = DFMNet().cuda()
model.eval()
time1= time.time()
outputs = model(img,depth)
time2 = time.time()
torch.cuda.synchronize()
print(1000/(time2-time1))
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(num_params) | en | 0.581059 | Conv-BN-ReLU Conv-BN-Sigmoid Depthwise Separable Convolutions LinearBottleneck used in MobileNetV2 # pw # dw # pw-linear Pyramid pooling module RGBBranch for low-level RGB feature extract # self.focus = focus() # QDW # DHA # Two stage decoder ## pre-fusion ## full-fusion | 2.126423 | 2 |
main/gtk4/template.py | RoastVeg/cports | 0 | 6613850 | <filename>main/gtk4/template.py
pkgname = "gtk4"
pkgver = "4.6.2"
pkgrel = 0
build_style = "meson"
configure_args = [
"-Dman-pages=true", "-Dbuild-tests=false", "-Dgtk_doc=false",
"-Dbroadway-backend=true", "-Dx11-backend=true", "-Dwayland-backend=true",
"-Dintrospection=enabled", "-Dcolord=enabled", "-Dvulkan=enabled",
"-Dcloudproviders=disabled",
]
hostmakedepends = [
"meson", "pkgconf", "gobject-introspection", "perl", "glib-devel",
"gettext-tiny-devel", "wayland-progs", "wayland-protocols", "xsltproc",
"docbook-xsl-nons", "python-docutils", "sassc", "gtk-update-icon-cache",
]
makedepends = [
"at-spi2-atk-devel", "gdk-pixbuf-devel", "libepoxy-devel", "pango-devel",
"colord-devel", "libxkbcommon-devel", "wayland-devel", "wayland-protocols",
"mesa-devel", "libxcursor-devel", "libxdamage-devel", "libxext-devel",
"libxinerama-devel", "libxrandr-devel", "libxcomposite-devel",
"libxi-devel", "vulkan-loader", "vulkan-headers", "cups-devel",
"graphene-devel", "gst-plugins-bad-devel", "ffmpeg-devel", "iso-codes",
]
depends = [
"gtk-update-icon-cache", "adwaita-icon-theme",
"virtual:gdk-pixbuf-loader-svg!librsvg"
]
pkgdesc = "Gimp Toolkit version 4"
maintainer = "q66 <<EMAIL>>"
license = "LGPL-2.1-or-later"
url = "https://gtk.org"
source = f"$(GNOME_SITE)/gtk/{pkgver[:-2]}/gtk-{pkgver}.tar.xz"
sha256 = "ff263af609a50eb76056653592d929459aef4819a444c436f6d52c6f63c1faec"
def post_install(self):
# we don't really need it (provided by gtk3)
self.rm(self.destdir / "usr/bin/gtk4-update-icon-cache")
self.rm(self.destdir / "usr/share/man/man1/gtk4-update-icon-cache.1")
@subpackage("gtk4-devel")
def _devel(self):
self.depends += ["vulkan-headers"]
return self.default_devel()
@subpackage("gtk4-demo")
def _demo(self):
self.pkgdesc = f"{pkgdesc} (demo applications)"
return [
"usr/bin/gtk4-demo",
"usr/bin/gtk4-widget-factory",
"usr/bin/gtk4-demo-application",
"usr/share/man/man1/gtk4-demo.1",
"usr/share/man/man1/gtk4-widget-factory.1",
"usr/share/man/man1/gtk4-demo-application.1",
"usr/share/gtk-4.0/gtk4builder.rng",
"usr/share/glib-2.0/schemas/org.gtk.Demo4.gschema.xml",
"usr/share/applications/org.gtk.Demo4.desktop",
"usr/share/applications/org.gtk.PrintEditor4.desktop",
"usr/share/applications/org.gtk.WidgetFactory4.desktop",
"usr/share/icons/hicolor/scalable/apps/org.gtk.Demo4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.Demo4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.PrintEditor4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.PrintEditor4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.PrintEditor4.Devel.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.WidgetFactory4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.WidgetFactory4-symbolic.svg",
]
@subpackage("gtk4-cups")
def _cups(self):
self.pkgdesc = f"{pkgdesc} (CUPS print backend)"
self.install_if = [f"{pkgname}={pkgver}-r{pkgrel}", "cups"]
return ["usr/lib/gtk-4.0/4.0.0/printbackends/libprintbackend-cups.so"]
| <filename>main/gtk4/template.py
pkgname = "gtk4"
pkgver = "4.6.2"
pkgrel = 0
build_style = "meson"
configure_args = [
"-Dman-pages=true", "-Dbuild-tests=false", "-Dgtk_doc=false",
"-Dbroadway-backend=true", "-Dx11-backend=true", "-Dwayland-backend=true",
"-Dintrospection=enabled", "-Dcolord=enabled", "-Dvulkan=enabled",
"-Dcloudproviders=disabled",
]
hostmakedepends = [
"meson", "pkgconf", "gobject-introspection", "perl", "glib-devel",
"gettext-tiny-devel", "wayland-progs", "wayland-protocols", "xsltproc",
"docbook-xsl-nons", "python-docutils", "sassc", "gtk-update-icon-cache",
]
makedepends = [
"at-spi2-atk-devel", "gdk-pixbuf-devel", "libepoxy-devel", "pango-devel",
"colord-devel", "libxkbcommon-devel", "wayland-devel", "wayland-protocols",
"mesa-devel", "libxcursor-devel", "libxdamage-devel", "libxext-devel",
"libxinerama-devel", "libxrandr-devel", "libxcomposite-devel",
"libxi-devel", "vulkan-loader", "vulkan-headers", "cups-devel",
"graphene-devel", "gst-plugins-bad-devel", "ffmpeg-devel", "iso-codes",
]
depends = [
"gtk-update-icon-cache", "adwaita-icon-theme",
"virtual:gdk-pixbuf-loader-svg!librsvg"
]
pkgdesc = "Gimp Toolkit version 4"
maintainer = "q66 <<EMAIL>>"
license = "LGPL-2.1-or-later"
url = "https://gtk.org"
source = f"$(GNOME_SITE)/gtk/{pkgver[:-2]}/gtk-{pkgver}.tar.xz"
sha256 = "ff263af609a50eb76056653592d929459aef4819a444c436f6d52c6f63c1faec"
def post_install(self):
# we don't really need it (provided by gtk3)
self.rm(self.destdir / "usr/bin/gtk4-update-icon-cache")
self.rm(self.destdir / "usr/share/man/man1/gtk4-update-icon-cache.1")
@subpackage("gtk4-devel")
def _devel(self):
self.depends += ["vulkan-headers"]
return self.default_devel()
@subpackage("gtk4-demo")
def _demo(self):
self.pkgdesc = f"{pkgdesc} (demo applications)"
return [
"usr/bin/gtk4-demo",
"usr/bin/gtk4-widget-factory",
"usr/bin/gtk4-demo-application",
"usr/share/man/man1/gtk4-demo.1",
"usr/share/man/man1/gtk4-widget-factory.1",
"usr/share/man/man1/gtk4-demo-application.1",
"usr/share/gtk-4.0/gtk4builder.rng",
"usr/share/glib-2.0/schemas/org.gtk.Demo4.gschema.xml",
"usr/share/applications/org.gtk.Demo4.desktop",
"usr/share/applications/org.gtk.PrintEditor4.desktop",
"usr/share/applications/org.gtk.WidgetFactory4.desktop",
"usr/share/icons/hicolor/scalable/apps/org.gtk.Demo4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.Demo4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.PrintEditor4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.PrintEditor4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.PrintEditor4.Devel.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.WidgetFactory4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.WidgetFactory4-symbolic.svg",
]
@subpackage("gtk4-cups")
def _cups(self):
self.pkgdesc = f"{pkgdesc} (CUPS print backend)"
self.install_if = [f"{pkgname}={pkgver}-r{pkgrel}", "cups"]
return ["usr/lib/gtk-4.0/4.0.0/printbackends/libprintbackend-cups.so"]
| en | 0.996076 | # we don't really need it (provided by gtk3) | 1.199398 | 1 |